gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
if len(sys.argv) > 1:
sys.path.insert(0, sys.argv.pop(1))
import unittest
import viennagrid.wrapper
##################
# LINEAR DOMAINS #
##################
class TestLinearCartesian1D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian1D(1),
viennagrid.wrapper.PointCartesian1D(2),
viennagrid.wrapper.PointCartesian1D(3),
viennagrid.wrapper.PointCartesian1D(4),
viennagrid.wrapper.PointCartesian1D(5),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian1D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCartesian1D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearCartesian2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCartesian2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearPolar2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearPolar2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
######################
# TRIANGULAR DOMAINS #
######################
class TestTriangularCartesian2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularCartesian2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularPolar2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularPolar2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
#########################
# QUADRILATERAL DOMAINS #
#########################
class TestQuadrilateralCartesian2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralCartesian2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralPolar2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralPolar2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
#######################
# TETRAHEDRAL DOMAINS #
#######################
class TestTetrahedralCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TetrahedralCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTetrahedralCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TetrahedralCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTetrahedralSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TetrahedralSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from twisted.enterprise import adbapi
from sqlite3 import Row
class Table(object):
modified = None
cursor = None
schema = None
joindata = None
fielddata = None
def __init__(self, *args, **kwargs):
self.modified = set()
self.attributes = [('id', None, int)] + self.attributes
self.__load(**kwargs)
if 'id' in kwargs:
self.id = int(kwargs['id'])
self.modified = set()
def __load(self, *args, **kwargs):
for attr, dflt, nrmlzr in self.attributes:
if attr in kwargs and kwargs[attr] != None:
setattr(self, attr, nrmlzr(kwargs[attr]))
else:
setattr(self, attr, dflt)
return self
def load(self, *args, **kwargs):
self.__load(*args, **kwargs).modified = set()
return self
def __setattr__(self, attr, value):
super(Table, self).__setattr__(attr, value)
self.modified.add(attr)
def getTable(self):
r = '''"%s"''' % (self.tablename)
if self.schema is not None:
r = '''"%s".%s''' % (self.schema, r)
return r
def getChanges(self):
return filter(lambda a: a[0] in self.modified, self.attributes)
def insert(self, ignore=False):
changes = self.getChanges()
attrbts = None
if len(changes) != 0 and len(changes) != len(self.attributes):
attrbts = '(' + ','.join([a[0] for a in changes]) + ')'
asksgn = ','.join(['?' for i in changes ])
values = [getattr(self, a[0]) for a in changes]
stmt = '''INSERT %s INTO %s%s VALUES (%s);''' % (
'OR IGNORE' if ignore else '', self.getTable(),
attrbts if attrbts is not None else '', asksgn)
return stmt, values
def update(self):
changes = self.getChanges()
asksgn = ','.join([a[0] + ' = ?' for a in changes])
values = [ getattr(self, a[0]) for a in changes ]
stmt = '''UPDATE %s SET %s WHERE id = %s;''' % (
self.getTable(), asksgn, self.id)
return stmt, values
def delete(self, *args, **kwargs):
attrbts = filter(lambda a: a[0] in kwargs, self.attributes)
values = map(lambda t: t[2](kwargs[t[0]]), attrbts)
assert len(attrbts) is not 0, "At least one criteria is needed"
stmt = '''DELETE FROM %s WHERE %s''' % (self.getTable(),
' AND '.join([a[0] + ' = ?' for a in attrbts]))
return stmt, values
def persist(self):
stmt = None
if self.id is None:
return self.insert()
return self.update()
def select(self, *args, **kwargs):
if self.joindata != None:
fulltable = "%s %s" % (self.getTable(), self.joindata)
else:
fulltable = self.getTable()
if self.fielddata != None:
fields = self.fielddata
else:
fields = '*'
stmt = '''SELECT %s FROM %s WHERE %%s;''' % (fields, fulltable)
if 'raw' in kwargs and kwargs['raw'] == True:
return stmt % kwargs['conditions'], [ ]
attrbts = filter(lambda a: a[0] in kwargs, self.attributes)
if len(attrbts) is not 0:
asksgn = ' AND '.join(
['%s = ?' % (a[0])
if not isinstance(kwargs[a[0]], tuple)
else '%s %s ?' % (a[0],kwargs[a[0]][0]) for a in attrbts])
else:
asksgn = '1 = 1'
if 'offset' in kwargs:
asksgn += ' LIMIT 1 OFFSET %s' % (kwargs['offset'])
return stmt % asksgn, [kwargs[a[0]]
if not isinstance(kwargs[a[0]], tuple)
else kwargs[a[0]][1] for a in attrbts]
def join(self, table, mine='', their='', kind='LEFT'):
self.joindata = '%s JOIN %s ON (%s = %s)' % (kind, table.getTable(),
"%s.%s" % (self.tablename, mine),
"%s.%s" % (table.tablename, their))
for f in table.attributes:
if f not in self.attributes:
self.attributes.append(f)
return self
def fields(self, fieldlist):
self.fielddata = ', '.join(fieldlist)
return self
class Package(Table):
tablename = 'packages'
attributes = [
('name', '', str), ('md5', '', str), ('size', 0, int) ]
class Header(Table):
tablename = 'headers'
attributes = [
('source', '', str), ('destination', '', str), ('package_id', None, int),
('length', 0, int), ('date', '', str), ('md5', '', str) ]
class Body(Table):
tablename = 'bodies'
attributes = [ ('header_id', 0, int), ('begin', 0, int),
('end', 0, int), ('content', '', str) ]
class Delivery(Table):
tablename = 'deliveries'
attributes = [ ('header_id', 0, int) ]
class TableFactory():
def __init__(self, trnsctnmngr, table, joins=[]):
self.trnsctnmngr = trnsctnmngr
self.table = table
self.joins = joins
def execute(self, q, a):
for r in self.trnsctnmngr.execute(q, a).fetchall():
data = self.table(r)
for t in self.joins:
data.join(t)
yield data.load(**r)
class DictRowledCPool(adbapi.ConnectionPool):
def namedTuple(self, cursor, row):
fields = { }
for i, col in enumerate(cursor.description):
if isinstance(row[i], str) or isinstance(row[i], unicode):
fields[col[0].encode('utf-8')] = row[i].encode('utf-8')
else:
fields[col[0].encode('utf-8')] = row[i]
return fields
def connect(self):
conn = adbapi.ConnectionPool.connect(self)
conn.row_factory = self.namedTuple
return conn
from twisted.internet.defer import Deferred, setDebugging
from twisted.internet.protocol import Protocol
from twisted.internet.address import IPv4Address
from twisted.python.failure import Failure
from twisted.web.client import Agent, FileBodyProducer
from twisted.web.http_headers import Headers
from twisted.web.resource import Resource
from twisted.web.server import Site, NOT_DONE_YET
from wind import AbstractWind, AbstractTCPWind, TCPWindFactory
from datetime import datetime
from StringIO import StringIO
setDebugging(True)
datamodel = '''-- HTTP-DTN Data model
PRAGMA foreign_keys = ON;
CREATE TABLE IF NOT EXISTS packages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(160),
md5 VARCHAR(32),
size INTEGER,
UNIQUE (name,md5)
);
CREATE UNIQUE INDEX IF NOT EXISTS packgesIDIndex ON packages(id);
CREATE TABLE IF NOT EXISTS headers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
source VARCHAR(128),
destination VARCHAR(128),
package_id INTEGER REFERENCES packages(id) ON DELETE SET NULL ON UPDATE CASCADE,
length INTEGER,
date TEXT,
md5 VARCHAR(32),
UNIQUE (source,destination,date)
);
CREATE UNIQUE INDEX IF NOT EXISTS headersIDIndex ON headers(id);
CREATE TABLE IF NOT EXISTS bodies (
id INTEGER PRIMARY KEY AUTOINCREMENT,
header_id INTEGER REFERENCES headers(id),
begin INTEGER,
end INTEGER,
content BLOB
);
CREATE UNIQUE INDEX IF NOT EXISTS bodiesIDIndex ON bodies(id);
CREATE TABLE IF NOT EXISTS deliveries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
header_id INTEGER REFERENCES headers(id),
UNIQUE (header_id)
);
CREATE UNIQUE INDEX IF NOT EXISTS deliveriesIDIndex ON deliveries(id);
'''
def headerExtractor(header, normalizers=[]):
r = { }
for t in header.getAllRawHeaders():
label = t[0].lower().replace('content-', '').replace('http-dtn-', '')
r[label] = t[1][0]
for n in normalizers:
if n[0] in r:
r[n[0]] = n[2](r[n[0]])
return r
TO_STRING_FMT = "%a, %d %b %Y %H:%M:%S GMT"
FM_STRING_FMT = "%a, %d %b %Y %H:%M:%S %Z"
def currentTimestring(self):
return datetime.utcnow().strftime(TO_STRING_FMT)
def datetimeToTimestring(date):
return date.strftime(TO_STRING_FMT)
def timestringToDatetime(date):
return datetime.strptime(date, FM_STRING_FMT)
def datetimeToTimestamp(date):
return int(date.strftime("%s"))
def timestampToDatetime(date):
return datetime.utcfromtimestamp(date)
class HTTPDtnBodyConsumer(Protocol):
def __init__(self, rh, header):
self.header_id = header['id']
if 'range' in rh:
b,e = map(int, rh['range'].split(' to '))
if (e - b) != rh['length']:
rh['begin'], rh['end'] = b, e
else:
rh['begin'], rh['end'] = 0, rh['length']
else:
rh['begin'], rh['end'] = 0, rh['length']
rh['content'] = ''
self.rh = rh
def dataReceived(self, bytes):
self.rh['content'] += bytes
def connectionLost(self, reason):
b = Body(**self.rh)
b.header_id = self.header_id
def okprinter(_, *args, **kwargs):
if isinstance(_, Failure):
_.printTraceback()
# ' -> Body persisted'
return _
HTTPDtnPersister.persistencer.runQuery(
*b.insert(ignore=True)).addBoth(okprinter)
class RequestWrapper(object):
def __init__(self, request):
self.headers = request.requestHeaders
self.content = request.content.read()
self.length = len(self.content)
def deliverBody(self, consumer):
consumer.dataReceived(self.content)
consumer.connectionLost(None)
class HTTPDtnPersister(object):
persistencer = None
def __init__(self, *args, **kwargs):
HTTPDtnPersister.persistencer = DictRowledCPool(
'sqlite3', 'test.db', check_same_thread=False)
HTTPDtnPersister.persistencer.runInteraction(
lambda r: r.executescript(datamodel))
def getPersistencer(self):
return HTTPDtnPersister.persistencer
def returnObject(self, trnsctnmngr, (query, params), request):
f = TableFactory(trnsctnmngr, Body, [Header()])
for o in f.execute(query, params):
request.setHeader('content-source', o.source)
request.setHeader('content-destination', o.destination)
request.setHeader('content-md5', o.md5)
request.setHeader('date', o.date)
request.setHeader('content-length', str(o.length))
if o.begin != 0 or o.end != o.length:
request.setHeader(
'content-range', '%s to %s' % (o.begin, o.end))
request.write(o.content)
break
else:
request.setResponseCode(404)
request.finish()
def returnDelivery(self, trnsctnmngr, (query, params), request):
f = TableFactory(trnsctnmngr, Delivery, [Header()])
for d in f.execute(query, params):
request.setHeader('content-source', d.source)
request.setHeader('content-destination', d.destination)
request.setHeader('content-md5', d.md5)
request.setHeader('date', d.date)
break
else:
request.setResponseCode(404)
request.finish()
def processBodies(self, trnsctnmngr, response, returnPath):
h = headerExtractor(response.headers)
h['length'] = response.length
header = trnsctnmngr.execute(*Header().select(**h)).fetchone()
if header is None: # ' -> Header not found on our headers table'
trnsctnmngr.execute(*Header(**h).insert()).fetchone()
header = trnsctnmngr.execute(*Header().select(**h)).fetchone()
else: # ' -> Header already in DB'
alreadyDelivered = trnsctnmngr.execute(
*Delivery().select(header_id=header['id'])).fetchone()
if alreadyDelivered is not None: # ' -> Header already delivered'
# We don't know how to tell it to the guy; then we just return
if returnPath is None:
return True
# Or, if we know the path, let's notify him
httpHeader = Headers({
'Content-source': [header['source'].encode('utf-8')],
'Content-destination':
[header['destination'].encode('utf-8')],
'Content-md5': [header['md5'].encode('utf-8')],
'Date': [header['date'].encode('utf-8')],
})
HTTPDTNWindClient(reactor).request(
'POST', returnPath + 'delivery', httpHeader
).addCallback(
lambda r: True)
return True
# ' -> Persisting body'
response.deliverBody(HTTPDtnBodyConsumer(h, header))
return True
def processDelivery(self, trnsctnmngr, response):
h = headerExtractor(response.headers)
header = trnsctnmngr.execute(*Header().select(**h)).fetchone()
if header is None: # ' -> Headers not found on our headers table'
trnsctnmngr.execute(*Header(**h).insert()).fetchone()
header = trnsctnmngr.execute(*Header().select(**h)).fetchone()
trnsctnmngr.execute(
*Delivery(header_id=header['id']).insert(True)
).fetchone()
else: # ' -> Header already in our DB'
trnsctnmngr.execute(
*Delivery(header_id=header['id']).insert(True)
).fetchone()
trnsctnmngr.execute(
*Body().delete(header_id=header['id'])).fetchone()
class HTTPDTNWindServer(Resource, AbstractWind, HTTPDtnPersister):
isLeaf = True
mandatoryHeadersFields = [
'source', 'destination', 'md5', 'length', 'date' ]
def __init__(self, *args, **kwargs):
Resource.__init__(self, *args, **kwargs)
HTTPDtnPersister.__init__(self)
def connectionMade(self):
Resource.connectionMade(self)
AbstractWind.connectionMade(self)
def render_GET(self, request):
queryData = headerExtractor(request.requestHeaders)
if request.path in ['/', '*']:
fields = [
'headers.' + n for n in [
'source', 'destination', 'length', 'date', 'md5']] + [
'bodies.' + n for n in ['begin', 'end', 'content']]
query = Body().join(
Header(), 'header_id', 'id'
).fields(
fields
).select(**queryData)
self.getPersistencer().runInteraction(
self.returnObject, query, request)
elif request.path in [ '/deliveries' ]:
fields = ['headers.' + n for n in [
'source', 'destination', 'length', 'date', 'md5']]
query = Delivery().join(
Header(), 'header_id', 'id').select(**queryData)
self.getPersistencer().runInteraction(
self.returnDelivery, query, request)
else:
request.setResponseCode(400)
return ''
return NOT_DONE_YET
def chainBodies(self, *args, **kwargs):
print ' -> Chain bodies vault'
return ''
def render_POST(self, request):
senderHeader = headerExtractor(request.requestHeaders)
#for f in self.mandatoryHeadersFields:
#if f not in senderHeader:
#request.setResponseCode(400)
#return ''
if request.path in ['/', '*']:
returnPath = (
'http://%(return-path)s:%(return-port)s/' % (senderHeader)
) if ('return-path' in senderHeader
and 'return-port' in senderHeader) else None
self.getPersistencer().runInteraction(
self.processBodies, RequestWrapper(request), returnPath)
elif request.path in ['/delivery']:
# The peer is trying to send us a delivery notification
self.getPersistencer().runInteraction(
self.processDelivery, RequestWrapper(request))
else: # It doesn't know what he is doing... Is it really a peer?
request.setResponseCode(400)
return ''
class HTTPDTNWindClient(Agent, AbstractTCPWind, HTTPDtnPersister):
connected = False
''' -+- '''
def __init__(self, *args, **kwargs):
Agent.__init__(self, *args, **kwargs)
def chainBodies(self, response, baseURL, offset=0):
if response != None:
if response.code != 200:
return True
self.getPersistencer().runInteraction(
self.processBodies, response, baseURL)
self.request(
'GET', baseURL, Headers({'HTTP-DTN-Offset': [ offset ]})
).addCallback(
self.chainBodies, baseURL=baseURL, offset=offset+1)
return True
def chainDeliveries(self, response, baseURL, offset=0):
if response != None:
if response.code != 200:
self.chainBodies(None, baseURL)
return True
self.getPersistencer().runInteraction(
self.processDelivery, response)
self.request(
'GET', baseURL + 'deliveries',
Headers({'HTTP-DTN-Offset': [ offset ]})
).addCallback(
self.chainDeliveries, baseURL=baseURL, offset=offset+1)
return True
def chainRequests(self, host, port):
self.chainDeliveries(None, 'http://%s:%s/' % (host, port))
class HTTPDTNWind(AbstractWind):
raw = True
factory = None
maxAge = 300
def __init__(self, host, port, reactor):
self.host, self.port = host, port
self.last, self.line = datetime.now(), False
self.reactor = reactor
def __eq__(self, o):
return self.port == o.port and self.host == o.host
def howLong(self):
return (datetime.now() - self.last).total_seconds()
def worth(self):
return (self.howLong() < HTTPDTNPeer.maxAge)
def setFactory(self, f):
self.factory = f
return self
def getPeer(self):
return IPv4Address('TCP', self.host, self.port)
def setLine(self, value):
self.last = datetime.now()
self.line = value
def setOnline(self, callbacked):
self.setLine(True)
return callbacked
def setOffline(self, callbacked):
self.setLine(False)
return callbacked
def online(self):
return self.line
def spread(self, headers, data):
for p in self.factory.knownHosts:
baseURL = 'http://%s:%s/' % (p.host, p.port)
# TODO: Spread the word
HTTPDTNWindClient(
self.reactor
).request(
'PUT', baseURL, headers, FileBodyProducer(StringIO(data))
).addCallbacks(
callback=p.setOnline,
errback=p.setOffline
).addErrback(
lambda r: True)
def doSend(self, message, data):
cmd5 = md5(data).hexdigest()
date = currentTimestring()
baseURL = 'http://%s:%s/' % (self.host, self.port)
headers = Headers({
'content-source': message.getSource(),
'content-destination': message.getDestination(),
'content-md5': cmd5, 'content-length': len(data),
'date': date,
})
if self.online() is True or self.worth():
return HTTPDTNWindClient(
self.reactor
).request(
'PUT', baseURL, headers, FileBodyProducer(StringIO(data))
).addCallbacks(
callback=self.setOnline,
errback=self.setOffline,
).addErrback(
self.spread, headers, data)
return self.spread(headers, data)
class HTTPDTNWindFactory(Site, TCPWindFactory):
knownHosts = set()
def __init__(self, overlay, rcr=None):
Site.__init__(self, HTTPDTNWindServer())
TCPWindFactory.__init__(self, overlay, rcr)
def listen(self, port, address=None):
interface = '' if address is None else address
# TODO: add the address parameter to the listener
return Site.listen(self, port)
def doConnect(self, (host, port)):
t = HTTPDTNWind(host, port, self.reactor)
t.setFactory(self).connectionMade()
self.knownHosts.add(t)
if __name__ == '__main__':
from twisted.internet import reactor
from sys import argv
shallServ = False
if 'client' in argv:
shallServ = True
reactor.callLater(
1, HTTPDTNWindClient(reactor).chainRequests, '143.54.12.57', 8080)
if 'server' in argv or shallServ:
SiteFactory(None).listen(8080)
if 'sql' in argv:
p = Package(name='asdsa', md5='asdsa')
p.name = 'pedrotestes'
q = Package(name='dsads', md5='asdsa', size=20)
print p.name, p.md5, p.size, p.modified, p.persist()
print q.name, q.md5, q.size, q.modified, q.persist()
print Package().select(name=10, size=('>=', 100))
print Package().select(raw=True, conditions='ASD = DSA')
print Delivery().join(
Header(), 'header_id', 'id'
).fields([
'header.source', 'header.destination'
]).select(
source='alderan', destination='dagobah')
reactor.run() if reduce(lambda a,b: a or b,
filter(lambda c: c in argv,
['server','client']), False) else None
|
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module is used to cache per-collection field information.
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2018, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from annalist import layout
from annalist.exceptions import Annalist_Error
from annalist.identifiers import ANNAL, RDFS
from annalist.models.collectionentitycache import (
Cache_Error, CollectionEntityCacheObject, CollectionEntityCache
)
from annalist.models.closurecache import ClosureCache
from annalist.models.recordfield import RecordField
# ---------------------------------------------------------------------------
#
# Field-cache object class
#
# ---------------------------------------------------------------------------
class CollectionFieldCacheObject(CollectionEntityCacheObject):
"""
This class is a field definition cache for a specified collection.
It extends class CollectionEntityCacheObject with field-specific logic; notably
overriding method _load_entity with additional logic to maintain a superproperty
closure cache, and methods to access that cache.
"""
def __init__(self, coll_id, entity_cls=RecordField):
"""
Initialize a cache object for a specified collection.
coll_id Collection id with which the field cache is associated.
"""
super(CollectionFieldCacheObject, self).__init__(coll_id, entity_cls)
self._superproperty_closure = ClosureCache(coll_id, ANNAL.CURIE.superproperty_uri)
return
def _load_entity(self, coll, field_entity):
"""
Internal helper method loads field data to cache.
Also updates superproperty closure cache.
Returns True if new field was added.
"""
field_id = field_entity.get_id()
property_uri = field_entity.get_property_uri()
field_parent = field_entity.get_parent().get_id()
field_data = field_entity.get_save_values()
add_field = super(CollectionFieldCacheObject, self)._load_entity(
coll, field_entity, entity_uri=property_uri
)
if add_field:
# Add relations for superproperty references from the new property URI
for superproperty_obj in field_data.get(ANNAL.CURIE.superproperty_uri, []):
superproperty_uri = superproperty_obj["@id"]
self._superproperty_closure.add_rel(property_uri, superproperty_uri)
# Also add relations for references *to* the new property URI
for try_subproperty_obj in self.get_all_entities(coll):
sub_superp_objs = try_subproperty_obj.get(ANNAL.CURIE.superproperty_uri, [])
sub_superp_uris = (
[ sub_superp_obj["@id"] for sub_superp_obj in sub_superp_objs ]
)
if property_uri in sub_superp_uris:
subproperty_uri = try_subproperty_obj.get(ANNAL.CURIE.property_uri, None)
if subproperty_uri:
self._superproperty_closure.add_rel(subproperty_uri, property_uri)
return add_field
def _drop_entity(self, coll, field_id):
"""
Override method that drops an entity from the cache, to also remove references
from the superproperty closure cache.
Returns the field entity removed, or None if not found.
"""
field_entity = super(CollectionFieldCacheObject, self)._drop_entity(coll, field_id)
if field_entity:
property_uri = field_entity.get_property_uri()
self._superproperty_closure.remove_val(property_uri)
return field_entity
def get_superproperty_uris(self, property_uri):
"""
Returns all superproperty URIs for a specified property URI.
Returns all superproperty URIs, even those for which there
is no defined field entity.
"""
return self._superproperty_closure.fwd_closure(property_uri)
def get_subproperty_uris(self, property_uri):
"""
Returns all subproperty URIs for a specified property URI.
Returns all subproperty URIs, even those for which there
is no defined field entity.
"""
return self._superproperty_closure.rev_closure(property_uri)
def get_superproperty_fields(self, coll, property_uri):
"""
Returns all superproperties for a specified property URI.
This method returns only those superproperties that are defined as entities.
"""
self._load_entities(coll)
for st_uri in self.get_superproperty_uris(property_uri):
st = self.get_entity_from_uri(coll, st_uri)
if st:
yield st
return
def get_subproperty_fields(self, coll, property_uri):
"""
Returns all subproperties for a specified property URI.
This method returns only those subproperties that are defined as entities.
"""
self._load_entities(coll)
for st_uri in self.get_subproperty_uris(property_uri):
st = self.get_entity_from_uri(coll, st_uri)
if st:
yield st
return
def remove_cache(self):
"""
Close down and release all collection field cache data
"""
# log.debug("@@@@remove field cache %r"%(self.get_coll_id(),))
super(CollectionFieldCacheObject, self).remove_cache()
self._superproperty_closure.remove_cache()
self._superproperty_closure = None
return
# ---------------------------------------------------------------------------
#
# Collection field-cache class
#
# ---------------------------------------------------------------------------
class CollectionFieldCache(CollectionEntityCache):
"""
This class manages field cache objects over multiple collections
"""
def __init__(self):
"""
Initialize.
Initializes a value cache cache with no per-collection data.
"""
super(CollectionFieldCache, self).__init__(CollectionFieldCacheObject, RecordField)
return
# Collection field cache allocation and access methods
def set_field(self, coll, field_entity):
"""
Save a new or updated field definition
"""
return self.set_entity(coll, field_entity)
def remove_field(self, coll, field_id):
"""
Remove field from collection field cache.
Returns the field entity removed if found, or None if not defined.
"""
return self.remove_entity(coll, field_id)
def get_field(self, coll, field_id):
"""
Retrieve a field description for a given field Id.
Returns a field object for the specified collection and field Id.
"""
return self.get_entity(coll, field_id)
def get_field_from_uri(self, coll, field_uri):
"""
Retrieve a field description for a given property URI.
Returns a field object for the specified collection and property URI.
"""
return self.get_entity_from_uri(coll, field_uri)
def get_all_field_ids(self, coll, altscope=None):
"""
Returns all fields currently available for a collection in the indicated scope.
Default scope is fields defined directly in the indicated collection.
"""
return self.get_all_entity_ids(coll, altscope=altscope)
def get_all_fields(self, coll, altscope=None):
"""
Returns all fields currently available for a collection in the indicated scope.
Default scope is fields defined directly in the indicated collection.
"""
return self.get_all_entities(coll, altscope=altscope)
def get_superproperty_fields(self, coll, field_uri):
"""
Returns all superproperties for a specieid property URI.
"""
field_cache = self._get_cache(coll)
return field_cache.get_superproperty_fields(coll, field_uri)
def get_subproperty_fields(self, coll, field_uri):
"""
Returns all subproperties for a specieid property URI.
"""
field_cache = self._get_cache(coll)
return field_cache.get_subproperty_fields(coll, field_uri)
def get_superproperty_uris(self, coll, field_uri):
"""
Returns all superproperties for a specieid property URI.
"""
field_cache = self._get_cache(coll)
return field_cache.get_superproperty_uris(field_uri)
def get_subproperty_uris(self, coll, field_uri):
"""
Returns all subproperties for a specieid property URI.
"""
field_cache = self._get_cache(coll)
return field_cache.get_subproperty_uris(field_uri)
# End.
|
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
class Config(object):
def __init__(self):
self._db_address = 'localhost'
self._db_port = 9200
self._amqp_address = 'localhost'
self.amqp_username = 'guest'
self.amqp_password = 'guest'
self.amqp_ssl_enabled = False
self.amqp_ca_path = ''
self._file_server_root = None
self._file_server_base_uri = None
self._file_server_blueprints_folder = None
self._file_server_uploaded_blueprints_folder = None
self._file_server_snapshots_folder = None
self._file_server_resources_uri = None
self._rest_service_log_level = None
self._rest_service_log_path = None
self._rest_service_log_file_size_MB = None
self._rest_service_log_files_backup_count = None
self._test_mode = False
self._security_enabled = False
self._security_ssl = {'enabled': False}
self._security_admin_username = None
self._security_admin_password = None
self._security_auth_token_generator = None
self._security_audit_log_level = None
self._security_audit_log_file = None
self._security_audit_log_file_size_MB = None
self._security_audit_log_files_backup_count = None
self._security_userstore_driver = None
self._security_authentication_providers = []
self._security_authorization_provider = None
@property
def db_address(self):
return self._db_address
@db_address.setter
def db_address(self, value):
self._db_address = value
@property
def db_port(self):
return self._db_port
@db_port.setter
def db_port(self, value):
self._db_port = value
@property
def amqp_address(self):
return self._amqp_address
@amqp_address.setter
def amqp_address(self, value):
self._amqp_address = value
@property
def file_server_root(self):
return self._file_server_root
@file_server_root.setter
def file_server_root(self, value):
self._file_server_root = value
@property
def file_server_base_uri(self):
return self._file_server_base_uri
@file_server_base_uri.setter
def file_server_base_uri(self, value):
self._file_server_base_uri = value
@property
def file_server_blueprints_folder(self):
return self._file_server_blueprints_folder
@file_server_blueprints_folder.setter
def file_server_blueprints_folder(self, value):
self._file_server_blueprints_folder = value
@property
def file_server_uploaded_blueprints_folder(self):
return self._file_server_uploaded_blueprints_folder
@file_server_uploaded_blueprints_folder.setter
def file_server_uploaded_blueprints_folder(self, value):
self._file_server_uploaded_blueprints_folder = value
@property
def file_server_snapshots_folder(self):
return self._file_server_snapshots_folder
@file_server_snapshots_folder.setter
def file_server_snapshots_folder(self, value):
self._file_server_snapshots_folder = value
@property
def file_server_resources_uri(self):
return self._file_server_resources_uri
@file_server_resources_uri.setter
def file_server_resources_uri(self, value):
self._file_server_resources_uri = value
@property
def rest_service_log_path(self):
return self._rest_service_log_path
@rest_service_log_path.setter
def rest_service_log_path(self, value):
self._rest_service_log_path = value
@property
def rest_service_log_level(self):
return self._rest_service_log_level
@rest_service_log_level.setter
def rest_service_log_level(self, value):
self._rest_service_log_level = value
@property
def rest_service_log_file_size_MB(self):
return self._rest_service_log_file_size_MB
@rest_service_log_file_size_MB.setter
def rest_service_log_file_size_MB(self, value):
self._rest_service_log_file_size_MB = value
@property
def rest_service_log_files_backup_count(self):
return self._rest_service_log_files_backup_count
@rest_service_log_files_backup_count.setter
def rest_service_log_files_backup_count(self, value):
self._rest_service_log_files_backup_count = value
@property
def test_mode(self):
return self._test_mode
@test_mode.setter
def test_mode(self, value):
self._test_mode = value
@property
def security_enabled(self):
return self._security_enabled
@security_enabled.setter
def security_enabled(self, value):
self._security_enabled = value
@property
def security_ssl(self):
return self._security_ssl
@security_ssl.setter
def security_ssl(self, value):
self._security_ssl = value
@property
def security_admin_username(self):
return self._security_admin_username
@security_admin_username.setter
def security_admin_username(self, value):
self._security_admin_username = value
@property
def security_admin_password(self):
return self._security_admin_password
@security_admin_password.setter
def security_admin_password(self, value):
self._security_admin_password = value
@property
def security_authentication_providers(self):
return self._security_authentication_providers
@security_authentication_providers.setter
def security_authentication_providers(self, value):
self._security_authentication_providers = value
@property
def security_auth_token_generator(self):
return self._security_auth_token_generator
@security_auth_token_generator.setter
def security_auth_token_generator(self, value):
self._security_auth_token_generator = value
@property
def security_audit_log_level(self):
return self._security_audit_log_level
@security_audit_log_level.setter
def security_audit_log_level(self, value):
self._security_audit_log_level = value
@property
def file_server_uploaded_plugins_folder(self):
if not self._file_server_root:
return None
return os.path.join(self._file_server_root, 'plugins')
@property
def security_audit_log_file(self):
return self._security_audit_log_file
@security_audit_log_file.setter
def security_audit_log_file(self, value):
self._security_audit_log_file = value
@property
def security_audit_log_file_size_MB(self):
return self._security_audit_log_file_size_MB
@security_audit_log_file_size_MB.setter
def security_audit_log_file_size_MB(self, value):
self._security_audit_log_file_size_MB = value
@property
def security_audit_log_files_backup_count(self):
return self._security_audit_log_files_backup_count
@security_audit_log_files_backup_count.setter
def security_audit_log_files_backup_count(self, value):
self._security_audit_log_files_backup_count = value
@property
def security_userstore_driver(self):
return self._security_userstore_driver
@security_userstore_driver.setter
def security_userstore_driver(self, value):
self._security_userstore_driver = value
@property
def security_authorization_provider(self):
return self._security_authorization_provider
@security_authorization_provider.setter
def security_authorization_provider(self, value):
self._security_authorization_provider = value
_instance = Config()
def reset(configuration=None):
global _instance
if configuration is not None:
_instance = configuration
else:
_instance = Config()
def instance():
return _instance
|
|
"""archvyrt provisioner base module"""
# stdlib
import logging
import os
import subprocess
# archvyrt
import archvyrt.tools as tools
LOG = logging.getLogger(__name__)
class Provisioner:
"""
Base provisioner for domain
"""
def __init__(self, domain):
"""
Initialize provisioner
"""
self._domain = domain
@property
def domain(self):
"""
Libvirt domain, this provisioner is attached to
"""
return self._domain
@staticmethod
def _runcmd(cmds, output=False, **kwargs):
"""
Run a unix command
"""
# output shall be captured
if output:
LOG.debug('Run command: %s', ' '.join(cmds))
rval = subprocess.check_output(
cmds,
stderr=subprocess.STDOUT,
**kwargs
).decode()
# output does not matter, send it to /dev/null
else:
with open(os.devnull, 'w') as devnull:
LOG.debug('Run command: %s', ' '.join(cmds))
rval = subprocess.call(
cmds,
stdout=devnull,
stderr=devnull,
**kwargs
)
if rval != 0:
raise RuntimeError(
'Command %s failed, env: %s' % (' '.join(cmds),
kwargs.get('env', 'default'))
)
return rval
@staticmethod
def writefile(filename, lines, mode='w'):
"""
Write to a file
"""
LOG.debug('Write file %s', filename)
with open(filename, mode) as fobj:
fobj.write('%s\n' % '\n'.join(lines))
def cleanup(self):
"""
cleanup actions
"""
raise NotImplementedError
class LinuxProvisioner(Provisioner):
"""
Linux Base Provisioner
"""
def __init__(self, domain, target="/provision"):
"""
Initializes and runs the provisioner.
"""
super().__init__(domain)
self._target = target
self._uuid = {}
self._cleanup = []
self._prepare_disks()
self._install()
self._network_config()
self._locale_config()
self._fstab_config()
self._boot_config()
self._access_config()
@property
def target(self):
"""
Temporary provisioning target, where the domains disks are mounted
"""
return self._target
def run(self, *cmds, output=False, **kwargs):
"""
Runs a command, ensures proper environment
"""
env = kwargs.pop('env', os.environ.copy())
return self._runcmd(cmds, output, env=env, **kwargs)
def runchroot(self, *cmds, output=False, add_env=None, **kwargs):
"""
Runs a command in the guest
"""
env = kwargs.pop('env', os.environ.copy())
env['PATH'] = ":".join(("/usr/local/sbin",
"/usr/local/bin",
"/usr/sbin",
"/usr/bin",
"/sbin",
"/bin"))
if add_env is not None:
env.update(add_env)
chroot_cmds = (tools.ARCH_CHROOT,
self.target) + cmds
return self.run(*chroot_cmds, output=output, env=env, **kwargs)
def writetargetfile(self, filename, lines, mode='w'):
"""
Writes a file in the guest
"""
targetfilename = "%s%s" % (self.target, filename)
self.writefile(targetfilename, lines, mode)
def chmodtargetfile(self, filename, chmod):
"""
Change permission of file in the guest
"""
targetfilename = "%s%s" % (self.target, filename)
os.chmod(targetfilename, chmod)
def deletetargetfile(self, filename):
"""
Delete a file in the guest
"""
targetfilename = "%s%s" % (self.target, filename)
os.remove(targetfilename)
def cleanup(self):
"""
Cleanup actions, such as unmounting and disconnecting disks
"""
for cmd in reversed(self._cleanup):
self.run(*cmd)
def _prepare_disks(self):
"""
Format and mount disks
"""
LOG.info('Prepare disks')
for disk in self.domain.disks:
dev = '/dev/nbd%s' % disk.number
cur_part = 0
# "mount" qcow2 image file as block device
self.run(
tools.QEMU_NBD,
'-n',
'-c',
dev,
disk.path
)
self._cleanup.append([
tools.QEMU_NBD,
'-d',
dev,
])
# create empty partition table
self.run(
tools.SGDISK,
'-o',
dev
)
# On first disk, we create a bios boot partition
if disk.number == '0':
cur_part += 1
self.run(
tools.SGDISK,
'-n', '%d:2048:4095' % cur_part,
'-t', '%d:ef02' % cur_part,
dev
)
endsector = self.run(
tools.SGDISK,
'-E',
dev,
output=True).strip()
cur_part += 1
self.run(
tools.SGDISK,
'-n', '%d:4096:%s' % (cur_part, endsector),
dev
)
else:
# create single partition
cur_part += 1
self.run(
tools.SGDISK,
'-n', '%d' % cur_part,
dev
)
if disk.fstype == 'ext4':
# format ext4
self.run(
tools.MKFS_EXT4,
'%sp%d' % (dev, cur_part)
)
mountpoint = '/provision/%s' % disk.mountpoint.lstrip('/')
if disk.mountpoint == '/':
# set a filesystem label to aid grub configuration
self.run(
tools.TUNE2FS,
'-L',
'ROOTFS',
'%sp%d' % (dev, cur_part)
)
else:
# create mountpoint
os.makedirs(mountpoint)
self.run(
tools.MOUNT,
'%sp%d' % (dev, cur_part),
mountpoint
)
self._cleanup.append([
tools.UMOUNT,
mountpoint,
])
uuid = self.run(
tools.BLKID,
'-s',
'UUID',
'-o',
'value',
'%sp%d' % (dev, cur_part),
output=True
).strip()
self._uuid.setdefault('ext4', {})[disk.mountpoint] = uuid
elif disk.fstype == 'swap':
# set partition type to linux swap
self.run(
tools.SGDISK,
'-t',
'%d:8200' % cur_part,
dev
)
# format swap space
self.run(
tools.MKSWAP,
'-f',
'%sp%d' % (dev, cur_part)
)
self.run(
tools.SWAPON,
'%sp%d' % (dev, cur_part)
)
self._cleanup.append([
tools.SWAPOFF,
'%sp%d' % (dev, cur_part)
])
uuid = self.run(
tools.BLKID,
'-s',
'UUID',
'-o',
'value',
'%sp%d' % (dev, cur_part),
output=True
).strip()
self._uuid.setdefault('swap', []).append(uuid)
else:
raise RuntimeError('Unsupported fstype %s' % disk.fstype)
def _install(self):
"""
Linux base installation
"""
raise NotImplementedError
def _network_config(self):
"""
Domain network configuration
"""
raise NotImplementedError
def _locale_config(self):
"""
Domain locale/language settings
"""
raise NotImplementedError
def _fstab_config(self):
"""
Domain fstab configuration
"""
LOG.info('Write fstab configuration')
swap_lines = []
ext4_lines = []
for key, value in self._uuid.items():
fsckcount = 0
if key == 'swap':
for uuid in value:
swap_lines.append("UUID=%s none swap defaults 0 0" % uuid)
elif key == 'ext4':
for mountpoint, uuid in sorted(value.items()):
fsckcount += 1
ext4_lines.append(
"UUID=%s %s ext4 rw,relatime,data=ordered 0 %d" % (
uuid, mountpoint, fsckcount
)
)
self.writetargetfile('/etc/fstab', ext4_lines + swap_lines, 'a')
def _boot_config(self):
"""
Domain bootloader, initrd configuration
"""
raise NotImplementedError
def _access_config(self):
"""
Domain access configuration such as sudo/ssh and local users
"""
raise NotImplementedError
|
|
"""A kernel manager for multiple kernels"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
import os
import uuid
import zmq
from traitlets.config.configurable import LoggingConfigurable
from ipython_genutils.importstring import import_item
from traitlets import (
Instance, Dict, List, Unicode, Any, DottedObjectName
)
from ipython_genutils.py3compat import unicode_type
from .kernelspec import NATIVE_KERNEL_NAME, KernelSpecManager
class DuplicateKernelError(Exception):
pass
def kernel_method(f):
"""decorator for proxying MKM.method(kernel_id) to individual KMs by ID"""
def wrapped(self, kernel_id, *args, **kwargs):
# get the kernel
km = self.get_kernel(kernel_id)
method = getattr(km, f.__name__)
# call the kernel's method
r = method(*args, **kwargs)
# last thing, call anything defined in the actual class method
# such as logging messages
f(self, kernel_id, *args, **kwargs)
# return the method result
return r
return wrapped
class MultiKernelManager(LoggingConfigurable):
"""A class for managing multiple kernels."""
default_kernel_name = Unicode(NATIVE_KERNEL_NAME, config=True,
help="The name of the default kernel to start"
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_manager_class = DottedObjectName(
"jupyter_client.ioloop.IOLoopKernelManager", config=True,
help="""The kernel manager class. This is configurable to allow
subclassing of the KernelManager for customized behavior.
"""
)
def _kernel_manager_class_changed(self, name, old, new):
self.kernel_manager_factory = import_item(new)
kernel_manager_factory = Any(help="this is kernel_manager_class after import")
def _kernel_manager_factory_default(self):
return import_item(self.kernel_manager_class)
context = Instance('zmq.Context')
def _context_default(self):
return zmq.Context.instance()
connection_dir = Unicode('')
_kernels = Dict()
def list_kernel_ids(self):
"""Return a list of the kernel ids of the active kernels."""
# Create a copy so we can iterate over kernels in operations
# that delete keys.
return list(self._kernels.keys())
def __len__(self):
"""Return the number of running kernels."""
return len(self.list_kernel_ids())
def __contains__(self, kernel_id):
return kernel_id in self._kernels
def start_kernel(self, kernel_name=None, **kwargs):
"""Start a new kernel.
The caller can pick a kernel_id by passing one in as a keyword arg,
otherwise one will be picked using a uuid.
The kernel ID for the newly started kernel is returned.
"""
kernel_id = kwargs.pop('kernel_id', unicode_type(uuid.uuid4()))
if kernel_id in self:
raise DuplicateKernelError('Kernel already exists: %s' % kernel_id)
if kernel_name is None:
kernel_name = self.default_kernel_name
# kernel_manager_factory is the constructor for the KernelManager
# subclass we are using. It can be configured as any Configurable,
# including things like its transport and ip.
if self.kernel_spec_manager:
kwargs['kernel_spec_manager'] = self.kernel_spec_manager
km = self.kernel_manager_factory(connection_file=os.path.join(
self.connection_dir, "kernel-%s.json" % kernel_id),
parent=self, autorestart=True, log=self.log, kernel_name=kernel_name,
**kwargs
)
km.start_kernel(**kwargs)
self._kernels[kernel_id] = km
return kernel_id
@kernel_method
def shutdown_kernel(self, kernel_id, now=False, restart=False):
"""Shutdown a kernel by its kernel uuid.
Parameters
==========
kernel_id : uuid
The id of the kernel to shutdown.
now : bool
Should the kernel be shutdown forcibly using a signal.
restart : bool
Will the kernel be restarted?
"""
self.log.info("Kernel shutdown: %s" % kernel_id)
self.remove_kernel(kernel_id)
@kernel_method
def request_shutdown(self, kernel_id, restart=False):
"""Ask a kernel to shut down by its kernel uuid"""
@kernel_method
def finish_shutdown(self, kernel_id, waittime=1, pollinterval=0.1):
"""Wait for a kernel to finish shutting down, and kill it if it doesn't
"""
self.log.info("Kernel shutdown: %s" % kernel_id)
@kernel_method
def cleanup(self, kernel_id, connection_file=True):
"""Clean up a kernel's resources"""
def remove_kernel(self, kernel_id):
"""remove a kernel from our mapping.
Mainly so that a kernel can be removed if it is already dead,
without having to call shutdown_kernel.
The kernel object is returned.
"""
return self._kernels.pop(kernel_id)
def shutdown_all(self, now=False):
"""Shutdown all kernels."""
kids = self.list_kernel_ids()
for kid in kids:
self.request_shutdown(kid)
for kid in kids:
self.finish_shutdown(kid)
self.cleanup(kid)
self.remove_kernel(kid)
@kernel_method
def interrupt_kernel(self, kernel_id):
"""Interrupt (SIGINT) the kernel by its uuid.
Parameters
==========
kernel_id : uuid
The id of the kernel to interrupt.
"""
self.log.info("Kernel interrupted: %s" % kernel_id)
@kernel_method
def signal_kernel(self, kernel_id, signum):
"""Sends a signal to the kernel by its uuid.
Note that since only SIGTERM is supported on Windows, this function
is only useful on Unix systems.
Parameters
==========
kernel_id : uuid
The id of the kernel to signal.
"""
self.log.info("Signaled Kernel %s with %s" % (kernel_id, signum))
@kernel_method
def restart_kernel(self, kernel_id, now=False):
"""Restart a kernel by its uuid, keeping the same ports.
Parameters
==========
kernel_id : uuid
The id of the kernel to interrupt.
"""
self.log.info("Kernel restarted: %s" % kernel_id)
@kernel_method
def is_alive(self, kernel_id):
"""Is the kernel alive.
This calls KernelManager.is_alive() which calls Popen.poll on the
actual kernel subprocess.
Parameters
==========
kernel_id : uuid
The id of the kernel.
"""
def _check_kernel_id(self, kernel_id):
"""check that a kernel id is valid"""
if kernel_id not in self:
raise KeyError("Kernel with id not found: %s" % kernel_id)
def get_kernel(self, kernel_id):
"""Get the single KernelManager object for a kernel by its uuid.
Parameters
==========
kernel_id : uuid
The id of the kernel.
"""
self._check_kernel_id(kernel_id)
return self._kernels[kernel_id]
@kernel_method
def add_restart_callback(self, kernel_id, callback, event='restart'):
"""add a callback for the KernelRestarter"""
@kernel_method
def remove_restart_callback(self, kernel_id, callback, event='restart'):
"""remove a callback for the KernelRestarter"""
@kernel_method
def get_connection_info(self, kernel_id):
"""Return a dictionary of connection data for a kernel.
Parameters
==========
kernel_id : uuid
The id of the kernel.
Returns
=======
connection_dict : dict
A dict of the information needed to connect to a kernel.
This includes the ip address and the integer port
numbers of the different channels (stdin_port, iopub_port,
shell_port, hb_port).
"""
@kernel_method
def connect_iopub(self, kernel_id, identity=None):
"""Return a zmq Socket connected to the iopub channel.
Parameters
==========
kernel_id : uuid
The id of the kernel
identity : bytes (optional)
The zmq identity of the socket
Returns
=======
stream : zmq Socket or ZMQStream
"""
@kernel_method
def connect_shell(self, kernel_id, identity=None):
"""Return a zmq Socket connected to the shell channel.
Parameters
==========
kernel_id : uuid
The id of the kernel
identity : bytes (optional)
The zmq identity of the socket
Returns
=======
stream : zmq Socket or ZMQStream
"""
@kernel_method
def connect_stdin(self, kernel_id, identity=None):
"""Return a zmq Socket connected to the stdin channel.
Parameters
==========
kernel_id : uuid
The id of the kernel
identity : bytes (optional)
The zmq identity of the socket
Returns
=======
stream : zmq Socket or ZMQStream
"""
@kernel_method
def connect_hb(self, kernel_id, identity=None):
"""Return a zmq Socket connected to the hb channel.
Parameters
==========
kernel_id : uuid
The id of the kernel
identity : bytes (optional)
The zmq identity of the socket
Returns
=======
stream : zmq Socket or ZMQStream
"""
|
|
import os, glob, shutil, traceback
import PIL_Helper
TYPE, TITLE, COLOR, VALUE, FLAVOR = range(5)
DIRECTORY = "BaBOC"
PAGE_WIDTH = 3
PAGE_HEIGHT = 3
TOTAL_CARDS = PAGE_WIDTH*PAGE_HEIGHT
workspace_path = os.path.dirname("workspace")
card_set = os.path.dirname("deck.cards")
CardPath = "BaBOC/cards/"
ResourcePath = "BaBOC/resources/"
VassalTemplatesPath = DIRECTORY+"/vassal templates"
VassalWorkspacePath = DIRECTORY+"/vassal workspace"
VassalImagesPath = os.path.join(VassalWorkspacePath, "images")
VassalCard = [0]
bleed_w = 788
bleed_h = 1088
w_marg = 31
h_marg = 36
bleedrect=[(w_marg,h_marg),(bleed_w-w_marg,bleed_h-h_marg)]
textmaxwidth = 580
LineM=PIL_Helper.Image.open(ResourcePath+"line_M.png")
LineH=PIL_Helper.Image.open(ResourcePath+"line_H.png")
LineG=PIL_Helper.Image.open(ResourcePath+"line_G.png")
LineS=PIL_Helper.Image.open(ResourcePath+"line_S.png")
titlefont = ResourcePath+"ComicNeue-Regular.ttf"
titleboldfont = ResourcePath+"ComicNeue-Bold.ttf"
symbolfont = ResourcePath+"Eligible-Regular.ttf"
TitleFont = PIL_Helper.BuildFont(titleboldfont, 60)
SymbolFont = PIL_Helper.BuildFont(symbolfont, 150)
BigSymbolFont = PIL_Helper.BuildFont(symbolfont, 200)
ValueFont = PIL_Helper.BuildFont(symbolfont, 90)
RulesFont = PIL_Helper.BuildFont(titlefont, 50)
TypeFont = PIL_Helper.BuildFont(titleboldfont, 70)
GenreFont = PIL_Helper.BuildFont(titleboldfont,50)
FlavorFont = PIL_Helper.BuildFont("BaBOC/resources/KlinicSlabBookIt.otf", 40)
CopyFont = PIL_Helper.BuildFont("BaBOC/resources/Barth_Regular.ttf", 10)
TypeAnchor = (bleed_w/2+70, 50)
TitleAnchor = (80, 60)
FormTitleAnchor = (80, -60)
SymbolAnchor = (80, -100)
RulesAnchor = (bleed_w/2+70, 650)
OneLineAnchor = (bleed_w/2+70, 160)
TwoLineAnchor = (bleed_w/2+70, 220)
FlavorAnchor = (bleed_w/2+70, -30)
ColDict={
"G": (225,200,225),
"S": (225,255,225),
"H": (255,225,225),
"M": (225,225,255),
"+": (225,225,225),
"-": (225,225,225)
}
ColDictDark={
"G": (100,0,100),
"S": (25,150,25),
"H": (255,25,25),
"M": (25,25,255),
"+": (225,225,225),
"-": (125,125,125)
}
GenreDict={
"G": "Grimdark",
"S": "Sci-Fi",
"H": "Hardcore",
"M": "Magick"
}
RulesDict={
"FORM": "Counts as a Feature.\n+1 for every card matching your genre.",
"FEATURE": "Play this card to your play area. You may attach Modifiers to this card.",
"MODIFIER": "Play this card on your Form or any Features in your play area.",
"FORM MODIFIER": "Counts as a Modifier but can be played ONLY on your own Form.",
"SWITCH": "Change the sign of a card in your play area to {0}. Can be used as an Interrupt.",
"GENRE CHANGE": "Change the genre of any card in your play area (even your Form). Can be used as an Interrupt."
}
def BuildCard( linein):
tags = linein.strip('\n').replace(r'\n', '\n').split('`')
try:
im = PickCardFunc(tags[TYPE], tags)
MakeVassalCard(im)
except Exception as e:
im = MakeBlankCard()
print "Warning, Bad Card: {0}".format(tags)
traceback.print_exc()
return im
def PickCardFunc( card_type, tags):
if tags[TYPE] == "FORM":
return MakeFormCard(tags)
elif tags[TYPE] == "FEATURE":
return MakeFeatureCard(tags)
elif tags[TYPE] == "MODIFIER":
return MakeModifierCard(tags)
elif tags[TYPE] == "FORM MODIFIER":
return MakeFormModifierCard(tags)
elif tags[TYPE] == "SWITCH":
return MakeSwitchCard(tags)
elif tags[TYPE] == "GENRE CHANGE":
return MakeGenreChangeCard(tags)
elif tags[TYPE] == "BLANK":
return MakeBlankCard()
else:
raise Exception("No card of type {0}".format(tags[TYPE]))
def DrawSidebar( image, color):
PIL_Helper.DrawRect(image, 0, 0, 160, 1088, color)
def DrawLines(image,genres):
for c in genres:
if c=="G":
image.paste(LineG,(0,660),LineG)
if c=="S":
image.paste(LineS,(0,720),LineS)
if c=="M":
image.paste(LineM,(0,800),LineM)
if c=="H":
image.paste(LineH,(0,880),LineH)
def TypeText( image, text):
PIL_Helper.AddText(
image = image,
text = text,
font = TypeFont,
anchor = TypeAnchor
)
def GenreText( image, text, color):
PIL_Helper.AddText(
image = image,
text = text,
font = GenreFont,
fill = color,
anchor = OneLineAnchor,
valign = "top",
halign = "center",
)
def TitleText( image, text, color=(0, 0, 0)):
print text
PIL_Helper.AddText(
image = image,
text = text,
font = TitleFont,
fill = color,
anchor = TitleAnchor,
max_width = bleed_h-150,
leading_offset = 0,
rotate = 90
)
def ValueText( image, text):
PIL_Helper.AddText(
image = image,
text = text,
font = ValueFont,
anchor = (70,920)
)
def SymbolText( image, text):
font = BigSymbolFont if text == "-" else SymbolFont
PIL_Helper.AddText(
image = image,
text = text,
font = font,
anchor = SymbolAnchor,
valign = "center"
)
def RulesText( image, text):
PIL_Helper.AddText(
image = image,
text = text,
font = RulesFont,
anchor = RulesAnchor,
max_width = textmaxwidth,
leading_offset = 0
)
def FlavorText( image, text):
PIL_Helper.AddText(
image = image,
text = text,
font = FlavorFont,
anchor = FlavorAnchor,
max_width = textmaxwidth,
valign = "bottom"
)
def MakeBlankCard():
image = PIL_Helper.BlankImage(bleed_w, bleed_h)
print("Blank Card")
PIL_Helper.AddText(
image = image,
text = "This Card Intentionally Left Blank",
font = TitleFont,
fill = (200,200,200),
anchor = TypeAnchor,
max_width = textmaxwidth
)
return image
def MakeFormCard( tags):
image = PIL_Helper.BlankImage(bleed_w, bleed_h,
color=ColDict[tags[COLOR][0]]
)
TypeText(image, "Form")
GenreText(image,
GenreDict[tags[COLOR][0]],
ColDictDark[tags[COLOR][0]]
)
# Form sidebar type
TitleText(image, "Form")
# Form Title
PIL_Helper.AddText(
image = image,
text = tags[TITLE],
font = TitleFont,
fill = (0,0,0),
anchor = FormTitleAnchor,
max_width = bleed_h,
valign = "bottom",
rotate = 90
)
RulesText(image, RulesDict["FORM"])
if len(tags) > FLAVOR:
FlavorText(image, tags[FLAVOR])
#DrawLines(image,tags[COLOR])
return image
def MakeFeatureCard( tags):
image = PIL_Helper.BlankImage(bleed_w, bleed_h)
DrawSidebar(image, ColDict[tags[COLOR][0]])
DrawLines(image,tags[COLOR])
TypeText(image, "Feature")
GenreText(image,
GenreDict[tags[COLOR][0]],
ColDictDark[tags[COLOR][0]]
)
TitleText(image, tags[TITLE])
ValueText(image, tags[VALUE])
RulesText(image, RulesDict["FEATURE"])
if len(tags) > FLAVOR:
FlavorText(image, tags[FLAVOR])
return image
def MakeModifierCard( tags):
image = PIL_Helper.BlankImage(bleed_w, bleed_h)
DrawSidebar(image, ColDict[tags[COLOR][0]])
DrawLines(image,tags[COLOR])
TypeText(image, "Modifier")
GenreText(image,
GenreDict[tags[COLOR][0]],
ColDictDark[tags[COLOR][0]]
)
TitleText(image, tags[TITLE])
ValueText(image, tags[VALUE])
RulesText(image, RulesDict["MODIFIER"])
if len(tags) > FLAVOR:
FlavorText(image, tags[FLAVOR])
return image
def MakeFormModifierCard( tags):
image = PIL_Helper.BlankImage(bleed_w, bleed_h)
DrawSidebar(image, ColDict[tags[COLOR][0]])
DrawLines(image,tags[COLOR])
TypeText(image, "Form Modifier")
GenreText(image,
GenreDict[tags[COLOR][0]],
ColDictDark[tags[COLOR][0]]
)
TitleText(image, tags[TITLE])
ValueText(image, tags[VALUE])
RulesText(image, RulesDict["FORM MODIFIER"])
if len(tags) > FLAVOR:
FlavorText(image, tags[FLAVOR])
return image
def MakeSwitchCard( tags):
image = PIL_Helper.BlankImage(bleed_w, bleed_h)
DrawSidebar(image, ColDict[tags[COLOR][0]])
DrawLines(image,tags[COLOR])
TypeText(image, "Switch")
TitleText(image, tags[TITLE])
SymbolText(image, tags[COLOR][0])
RulesText(image, RulesDict["SWITCH"].format(tags[COLOR][0]))
if len(tags) > FLAVOR:
FlavorText(image, tags[FLAVOR])
return image
def MakeGenreChangeCard( tags):
image = PIL_Helper.BlankImage(bleed_w, bleed_h)
DrawLines(image,tags[COLOR])
TypeText(image, "Genre Change")
GenreText(image,
GenreDict[tags[COLOR][0]],
ColDictDark[tags[COLOR][0]]
)
TitleText(image, tags[TITLE],
color=ColDictDark[tags[COLOR][0]]
)
SymbolText(image, "<")
RulesText(image, RulesDict["GENRE CHANGE"])
if len(tags) > FLAVOR:
FlavorText(image, tags[FLAVOR])
return image
def BuildPage(card_list, page_num, page_width=PAGE_WIDTH, page_height=PAGE_HEIGHT):
PIL_Helper.BuildPage(card_list, page_width, page_height,r"page_{0:>03}.png".format(page_num))
def InitVassalModule(): pass
def MakeVassalCard(im):
VassalCard[0]+=1
#BuildCard(line).save(VassalImagesPath + "/" + str(VassalCard) + ".png")
im.save(VassalImagesPath + "\\" + str(VassalCard[0]) + ".png")
def CompileVassalModule(): pass
if __name__ == "__main__":
print "Not a main module. Run GameGen.py"
|
|
# Class definition:
# SiteInformation
# This class is responsible for downloading, verifying and manipulating queuedata
# Note: not compatible with Singleton Design Pattern due to the subclassing
import os
import re
import commands
import urlparse
from pUtil import tolog, getExtension, replace, readpar, getDirectAccessDic
from pUtil import getExperiment as getExperimentObject
from PilotErrors import PilotErrors
class SiteInformation(object):
"""
Should this class ask the Experiment class which the current experiment is?
Not efficient if every readpar() calls some Experiment method unless Experiment is a singleton class as well
"""
# private data members
__experiment = "generic"
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
__securityKeys = {}
def __init__(self):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
pass
def readpar(self, par, alt=False):
""" Read parameter variable from queuedata """
value = ""
fileName = self.getQueuedataFileName(alt=alt)
try:
fh = open(fileName)
except:
try:
# try without the path
fh = open(os.path.basename(fileName))
except Exception, e:
tolog("!!WARNING!!2999!! Could not read queuedata file: %s" % str(e))
fh = None
if fh:
queuedata = fh.read()
fh.close()
if queuedata != "":
value = self.getpar(par, queuedata, containsJson=fileName.endswith("json"))
# repair JSON issue
if value == None:
value = ""
return value
def getpar(self, par, s, containsJson=False):
""" Extract par from s """
parameter_value = ""
if containsJson:
# queuedata is a json string
from json import loads
pars = loads(s)
if pars.has_key(par):
parameter_value = pars[par]
if type(parameter_value) == unicode: # avoid problem with unicode for strings
parameter_value = parameter_value.encode('ascii')
else:
tolog("WARNING: Could not find parameter %s in queuedata" % (par))
parameter_value = ""
else:
# queuedata is a string on the form par1=value1|par2=value2|...
matches = re.findall("(^|\|)([^\|=]+)=",s)
for tmp,tmpPar in matches:
if tmpPar == par:
patt = "%s=(.*)" % par
idx = matches.index((tmp,tmpPar))
if idx+1 == len(matches):
patt += '$'
else:
patt += "\|%s=" % matches[idx+1][1]
mat = re.search(patt,s)
if mat:
parameter_value = mat.group(1)
else:
parameter_value = ""
return parameter_value
def getQueuedataFileName(self, useExtension=None, check=True, alt=False):
""" Define the queuedata filename """
# use a forced extension if necessary
if useExtension:
extension = useExtension
else:
extension = getExtension(alternative='dat')
# prepend alt. for alternative stage-out site queuedata
if alt:
extension = "alt." + extension
path = "%s/queuedata.%s" % (os.environ['PilotHomeDir'], extension)
# remove the json extension if the file cannot be found (complication due to wrapper)
if not os.path.exists(path) and check:
if extension == 'json':
_path = path.replace('.json', '.dat')
if os.path.exists(_path):
tolog("Updating queuedata file name to: %s" % (_path))
path = _path
else:
tolog("!!WARNING!! Queuedata paths do not exist: %s, %s" % (path, _path))
if extension == 'dat':
_path = path.replace('.dat', '.json')
if os.path.exists(_path):
tolog("Updating queuedata file name to: %s" % (_path))
path = _path
else:
tolog("!!WARNING!! Queuedata paths do not exist: %s, %s" % (path, _path))
return path
def replaceQueuedataField(self, field, value, verbose=True):
""" replace a given queuedata field with a new value """
# copytool = <whatever> -> lcgcp
# replaceQueuedataField("copytool", "lcgcp")
status = False
verbose = True
queuedata_filename = self.getQueuedataFileName()
if "json" in queuedata_filename.lower():
if self.replaceJSON(queuedata_filename, field, value):
if verbose:
tolog("Successfully changed %s to: %s" % (field, value))
status = True
else:
stext = field + "=" + self.readpar(field)
rtext = field + "=" + value
if replace(queuedata_filename, stext, rtext):
if verbose:
tolog("Successfully changed %s to: %s" % (field, value))
status = True
else:
tolog("!!WARNING!!1999!! Failed to change %s to: %s" % (field, value))
return status
def replaceJSON(self, queuedata_filename, field, value):
""" Replace/update queuedata field in JSON file """
status = False
from json import load, dump
try:
fp = open(queuedata_filename, "r")
except Exception, e:
tolog("!!WARNING!!4003!! Failed to open file: %s, %s" % (queuedata_filename, e))
else:
try:
dic = load(fp)
except Exception, e:
tolog("!!WARNING!!4004!! Failed to load dictionary: %s" % (e))
else:
fp.close()
if dic.has_key(field):
dic[field] = value
try:
fp = open(queuedata_filename, "w")
except Exception, e:
tolog("!!WARNING!!4005!! Failed to open file: %s, %s" % (queuedata_filename, e))
else:
try:
dump(dic, fp)
except Exception, e:
tolog("!!WARNING!!4005!! Failed to dump dictionary: %s" % (e))
else:
fp.close()
status = True
else:
tolog("!!WARNING!!4005!! No such field in queuedata dictionary: %s" % (field))
return status
def evaluateQueuedata(self):
""" Evaluate environmental variables if used and replace the value in the queuedata """
tolog("Evaluating queuedata")
# the following fields are allowed to contain environmental variables
fields = ["appdir", "copysetup", "copysetupin", "recoverdir", "wntmpdir", "sepath", "seprodpath", "lfcpath", "lfcprodpath"]
# process each field and evaluate the environment variables if present
for field in fields:
# grab the field value and split it since some fields can contain ^-separators
old_values = self.readpar(field)
new_values = []
try:
for value in old_values.split("^"):
pipe = ""
if value.startswith("$"):
# get rid of any |-signs (e.g. appdir containing nightlies bit)
if "|" in value:
pipe = value[value.find('|'):] # add this back later (e.g. pipe = "|nightlies")
value = value[:value.find('|')]
# evaluate the environmental variable
new_value = os.path.expandvars(value)
if new_value == "":
tolog("!!WARNING!!2999!! Environmental variable not set: %s" % (value))
value = new_value + pipe
new_values.append(value)
# rebuild the string (^-separated if necessary)
new_values_joined = '^'.join(new_values)
# replace the field value in the queuedata with the new value
if new_values_joined != old_values:
if self.replaceQueuedataField(field, new_values_joined, verbose=False):
tolog("Updated field %s in queuedata (replaced \'%s\' with \'%s\')" % (field, old_values, new_values_joined))
except:
# ignore None values
continue
def verifyQueuedata(self, queuename, filename, _i, _N, url):
""" Verify the consistency of the queuedata """
hasQueuedata = False
try:
f = open(filename, "r")
except Exception, e:
tolog("!!WARNING!!1999!! Open failed with %s" % (e))
else:
output = f.read()
f.close()
if not ('appdir' in output and 'copytool' in output):
if len(output) == 0:
tolog("!!WARNING!!1999!! curl command returned empty queuedata (wrong queuename %s?)" % (queuename))
else:
tolog("!!WARNING!!1999!! Attempt %d/%d: curl command did not return valid queuedata from config DB server %s" %\
(_i, _N, url))
output = output.replace('\n', '')
output = output.replace(' ', '')
tolog("!!WARNING!!1999!! Output begins with: %s" % (output[:64]))
try:
os.remove(filename)
except Exception, e:
tolog("!!WARNING!!1999!! Failed to remove file %s: %s" % (filename, e))
else:
# found valid queuedata info, break the for-loop
tolog("schedconfigDB returned: %s" % (output))
hasQueuedata = True
return hasQueuedata
def getQueuedata(self, queuename, forceDownload=False, alt=False, url=""):
""" Download the queuedata if not already downloaded """
# Queuedata means the dump of all geometrical data for a given site. This method downloads and stores queuedata in a JSON or pickle
# file called queuedata.[json|dat]. JSON format is preferable and is used for python versions >= 2.6.
#
# Exeute the following command for a queuedata example:
# curl --connect-timeout 20 --max-time 120 -sS "http://pandaserver.cern.ch:25085/cache/schedconfig/CERN-PROD-all-prod-CEs.pilot.json"
# Input:
# queuename = name of the PanDA queue (e.g. CERN-PROD-all-prod-CEs)
# forceDownload = False (default),
# alt = False (default), if alternative queuedata should be downloaded (if stage-out to an alternative SE, new queuedata is needed
# but it will not overwrite the old queuedata)
# Returns:
# error code (int), status for queuedata download (boolean)
if url == "":
exp = getExperimentObject(self.__experiment)
url = exp.getSchedconfigURL()
tolog("The schedconfig URL was not set by the wrapper - Will use default server url = %s (hardcoded)" % (url))
if not os.environ.has_key('PilotHomeDir'):
os.environ['PilotHomeDir'] = commands.getoutput('pwd')
hasQueuedata = False
# try the config servers one by one in case one of them is not responding
# in case the wrapper has already downloaded the queuedata, it might have a .dat extension
# otherwise, give it a .json extension if possible
filename_dat = self.getQueuedataFileName(useExtension='dat', check=False, alt=alt)
if os.path.exists(filename_dat):
filename = filename_dat
else:
filename = self.getQueuedataFileName(check=False, alt=alt)
if os.path.exists(filename) and not forceDownload:
tolog("Queuedata has already been downloaded by pilot wrapper script (will confirm validity)")
hasQueuedata = self.verifyQueuedata(queuename, filename, 1, 1, "(see batch log for url)")
if hasQueuedata:
tolog("Queuedata was successfully downloaded by pilot wrapper script")
else:
tolog("Queuedata was not downloaded successfully by pilot wrapper script, will try again")
if not hasQueuedata:
# loop over pandaserver round robin _N times until queuedata has been verified, or fail
ret = -1
if os.environ.has_key('X509_USER_PROXY'):
sslCert = os.environ['X509_USER_PROXY']
else:
sslCert = '/tmp/x509up_u%s' % str(os.getuid())
cmd = 'curl --connect-timeout 20 --max-time 120 --cacert %s -sS "%s:25085/cache/schedconfig/%s.all.%s" > %s' % \
(sslCert, url, queuename, getExtension(alternative='pilot'), filename)
_N = 3
for _i in range(_N):
tolog("Executing command: %s" % (cmd))
try:
# output will be empty since we pipe into a file
ret, output = commands.getstatusoutput(cmd)
except Exception, e:
tolog("!!WARNING!!1999!! Failed with curl command: %s" % str(e))
return -1, False
else:
if ret == 0:
# read back the queuedata to verify its validity
hasQueuedata = self.verifyQueuedata(queuename, filename, _i, _N, url)
if hasQueuedata:
break
else:
tolog("!!WARNING!!1999!! curl command exited with code %d" % (ret))
return 0, hasQueuedata
def postProcessQueuedata(self, queuename, pshttpurl, thisSite, _jobrec, force_devpilot):
""" Update queuedata fields if necessary """
if 'pandadev' in pshttpurl or force_devpilot or thisSite.sitename == "CERNVM":
ec = self.replaceQueuedataField("status", "online")
_status = self.readpar('status')
if _status != None and _status != "":
if _status.upper() == "OFFLINE":
tolog("Site %s is currently in %s mode - aborting pilot" % (thisSite.sitename, _status.lower()))
return -1, None, None
else:
tolog("Site %s is currently in %s mode" % (thisSite.sitename, _status.lower()))
# override pilot run options
temp_jobrec = self.readpar('retry')
if temp_jobrec.upper() == "TRUE":
tolog("Job recovery turned on")
_jobrec = True
elif temp_jobrec.upper() == "FALSE":
tolog("Job recovery turned off")
_jobrec = False
else:
tolog("Job recovery variable (retry) not set")
# evaluate the queuedata if needed
self.evaluateQueuedata()
# set pilot variables in case they have not been set by the pilot launcher
thisSite = self.setUnsetVars(thisSite)
return 0, thisSite, _jobrec
def extractQueuedataOverwrite(self, jobParameters):
""" Extract the queuedata overwrite key=value pairs from the job parameters """
# The dictionary will be used to overwrite existing queuedata values
# --overwriteQueuedata={key1=value1,key2=value2}
queuedataUpdateDictionary = {}
# define regexp pattern for the full overwrite command
pattern = re.compile(r'\ \-\-overwriteQueuedata\=\{.+}')
fullCommand = re.findall(pattern, jobParameters)
if fullCommand[0] != "":
# tolog("Extracted the full command from the job parameters: %s" % (fullCommand[0]))
# e.g. fullCommand[0] = '--overwriteQueuedata={key1=value1 key2=value2}'
# remove the overwriteQueuedata command from the job parameters
jobParameters = jobParameters.replace(fullCommand[0], "")
tolog("Removed the queuedata overwrite command from job parameters: %s" % (jobParameters))
# define regexp pattern for the full overwrite command
pattern = re.compile(r'\-\-overwriteQueuedata\=\{(.+)\}')
# extract the key value pairs string from the already extracted full command
keyValuePairs = re.findall(pattern, fullCommand[0])
# e.g. keyValuePairs[0] = 'key1=value1,key2=value2'
if keyValuePairs[0] != "":
# tolog("Extracted the key value pairs from the full command: %s" % (keyValuePairs[0]))
# remove any extra spaces if present
keyValuePairs[0] = keyValuePairs[0].replace(" ", "")
commaDictionary = {}
if "\'" in keyValuePairs[0] or '\"' in keyValuePairs[0]:
tolog("Detected quotation marks in the job parameters: %s" % (keyValuePairs[0]))
# e.g. key1=value1,key2=value2,key3='value3,value4'
# handle quoted key-values separately
# replace any simple qoutation marks with double quotation marks to simplify the regexp below
keyValuePairs[0] = keyValuePairs[0].replace("\'",'\"')
keyValuePairs[0] = keyValuePairs[0].replace('\\"','\"') # in case double backslashes are present
# extract all values containing commas
commaList = re.findall('"([^"]*)"', keyValuePairs[0])
# create a dictionary with key-values using format "key_%d" = value, where %d is the id of the found value
# e.g. { key_1: valueX,valueY,valueZ, key_2: valueA,valueB }
# replace the original comma-containing value with "key_%d", and replace it later
commaDictionary = {}
counter = 0
for commaValue in commaList:
counter += 1
key = 'key_%d' % (counter)
commaDictionary[key] = commaValue
keyValuePairs[0] = keyValuePairs[0].replace('\"'+commaValue+'\"', key)
tolog("keyValuePairs=%s" % (keyValuePairs[0]))
tolog("commaDictionary=%s" % str(commaDictionary))
# define the regexp pattern for the actual key=value pairs
# full backslash escape, see (adjusted for python):
# http://stackoverflow.com/questions/168171/regular-expression-for-parsing-name-value-pairs
pattern = re.compile( r'((?:\\.|[^=,]+)*)=("(?:\\.|[^"\\]+)*"|(?:\\.|[^,"\\]+)*)' )
# finally extract the key=value parameters
keyValueList = re.findall(pattern, keyValuePairs[0])
# e.g. keyValueList = [('key1', 'value1'), ('key2', 'value_2')]
# put the extracted pairs in a proper dictionary
if keyValueList != []:
tolog("Extracted the following key value pairs from job parameters: %s" % str(keyValueList))
for keyValueTuple in keyValueList:
key = keyValueTuple[0]
value = keyValueTuple[1]
if key != "":
# extract the value from the commaDictionary if it exists
if commaDictionary.has_key(value):
value = commaDictionary[value]
queuedataUpdateDictionary[key] = value
else:
tolog("!!WARNING!!1223!! Bad key detected in key value tuple: %s" % str(keyValueTuple))
else:
tolog("!!WARNING!!1223!! Failed to extract the key value pair list from: %s" % (keyValuePairs[0]))
else:
tolog("!!WARNING!!1223!! Failed to extract the key value pairs from: %s" % (keyValuePairs[0]))
else:
tolog("!!WARNING!!1223!! Failed to extract the full queuedata overwrite command from jobParameters=%s" % (jobParameters))
return jobParameters, queuedataUpdateDictionary
def updateQueuedataFromJobParameters(self, jobParameters):
""" Extract queuedata overwrite command from job parameters and update queuedata """
tolog("called updateQueuedataFromJobParameters with: %s" % (jobParameters))
transferType = ""
# extract and remove queuedata overwrite command from job parameters
if "--overwriteQueuedata" in jobParameters:
tolog("Encountered an --overwriteQueuedata command in the job parameters")
# (jobParameters might be updated [queuedata overwrite command should be removed if present], so they needs to be returned)
jobParameters, queuedataUpdateDictionary = self.extractQueuedataOverwrite(jobParameters)
# update queuedata
if queuedataUpdateDictionary != {}:
tolog("Queuedata will be updated from job parameters")
for field in queuedataUpdateDictionary.keys():
if field.lower() == "transfertype":
# transferType is not a schedconfig field and must be handled separately
transferType = queuedataUpdateDictionary[field]
else:
ec = self.replaceQueuedataField(field, queuedataUpdateDictionary[field])
tolog("Updated %s in queuedata: %s (read back from file)" % (field, self.readpar(field)))
# disable FAX if set in schedconfig
if "--disableFAX" in jobParameters:
tolog("Encountered a --disableFAX command in the job parameters")
# remove string from jobParameters
jobParameters = jobParameters.replace(" --disableFAX", "")
# update queuedata if necessary
if readpar("allowfax").lower() == "true":
field = "allowfax"
ec = self.replaceQueuedataField(field, "False")
tolog("Updated %s in queuedata: %s (read back from file)" % (field, self.readpar(field)))
else:
tolog("No need to update queuedata for --disableFAX (allowfax is not set to True)")
return jobParameters, transferType
def setUnsetVars(self, thisSite):
""" Set pilot variables in case they have not been set by the pilot launcher """
# thisSite will be updated and returned
tolog('Setting unset pilot variables using queuedata')
if thisSite.appdir == "":
scappdir = self.readpar('appdir')
if os.environ.has_key("OSG_APP") and not os.environ.has_key("VO_ATLAS_SW_DIR"):
if scappdir == "":
scappdir = os.environ["OSG_APP"]
if scappdir == "":
scappdir = "/usatlas/projects/OSG"
tolog('!!WARNING!!4000!! appdir not set in queuedata or $OSG_APP: using default %s' % (scappdir))
else:
tolog('!!WARNING!!4000!! appdir not set in queuedata - using $OSG_APP: %s' % (scappdir))
tolog('appdir: %s' % (scappdir))
thisSite.appdir = scappdir
if thisSite.dq2url == "":
_dq2url = self.readpar('dq2url')
if _dq2url == "":
tolog('Note: dq2url not set')
else:
tolog('dq2url: %s' % (_dq2url))
thisSite.dq2url = _dq2url
if thisSite.wntmpdir == "":
_wntmpdir = self.readpar('wntmpdir')
if _wntmpdir == "":
_wntmpdir = thisSite.workdir
tolog('!!WARNING!!4000!! wntmpdir not set - using site workdir: %s' % (_wntmpdir))
tolog('wntmpdir: %s' % (_wntmpdir))
thisSite.wntmpdir = _wntmpdir
return thisSite
def isTier1(self, sitename):
""" Is the given site a Tier-1? """
return False
def isTier2(self, sitename):
""" Is the given site a Tier-2? """
return False
def isTier3(self):
""" Is the given site a Tier-3? """
# Note: defined by DB
return False
def updateCopysetup(self, jobParameters, field, _copysetup, transferType=None, useCT=None, directIn=None, useFileStager=None):
"""
Update copysetup in the presence of directIn and/or useFileStager in jobParameters
Possible copysetup's are:
"setup^oldPrefix^newPrefix^useFileStager^directIn"
"setup^oldPrefix1,oldPrefix2^newPrefix1,newPrefix2^useFileStager^directIn"
"setup^useFileStager^directIn"
"setup"
"None"
"""
# get copysetup from queuedata
copysetup = _copysetup
tolog("updateCopysetup: copysetup=%s" % (copysetup))
if "^" in copysetup:
fields = copysetup.split("^")
n = len(fields)
# fields[0] = setup
# fields[1] = useFileStager
# fields[2] = directIn
# or
# fields[0] = setup
# fields[1] = oldPrefix or oldPrefix1, oldPrefix2
# fields[2] = newPrefix or newPrefix1, newPrefix2
# fields[3] = useFileStager
# fields[4] = directIn
if n == 3 or n == 5:
# update the relevant fields if necessary
if useCT:
tolog("Copy tool is enforced, turning off any set remote I/O or file stager options")
fields[n-1] = "False"
fields[n-2] = "False"
else:
# in case directIn or useFileStager were set by accessmode via jobParameters
if directIn or useFileStager:
if useFileStager and directIn:
fields[n-1] = "True" # directIn
fields[n-2] = "True" # useFileStager
elif directIn and not useFileStager:
fields[n-1] = "True" # directIn
fields[n-2] = "False" # useFileStager
if transferType == "direct":
fields[n-1] = "True" # directIn
fields[n-2] = "False" # make sure file stager is turned off
# in case directIn or useFileStager were set in jobParameters or with transferType
else:
if fields[n-1].lower() == "false" and ("--directIn" in jobParameters or transferType == "direct"):
fields[n-1] = "True" # directIn
fields[n-2] = "False" # useFileStager
if fields[n-2].lower() == "false" and "--useFileStager" in jobParameters:
fields[n-1] = "True" # directIn
fields[n-2] = "True" # useFileStager
if "," in copysetup:
tolog("Multiple old/new prefices, turning off any set remote I/O or file stager options")
fields[n-1] = "False"
fields[n-2] = "False"
copysetup = "^".join(fields)
else:
tolog("!!WARNING!!2990!! This site is not setup properly for using direct access/file stager: copysetup=%s" % (copysetup))
else:
if transferType == "direct" or directIn and not useFileStager:
copysetup += "^False^True"
elif useFileStager:
copysetup += "^True^True"
# undo remote I/O copysetup modification if requested
if transferType == "undodirect" and "^" in copysetup:
tolog("Requested re-modification of copysetup due to previous error")
fields = copysetup.split("^")
copysetup = fields[0]
copysetup += "^False^False"
# update copysetup if updated
if copysetup != _copysetup:
ec = self.replaceQueuedataField(field, copysetup)
tolog("Updated %s in queuedata: %s (read back from file)" % (field, self.readpar(field)))
else:
tolog("copysetup does not need to be updated")
def getAppdirs(self, appdir):
""" Create a list of all appdirs in appdir """
# appdir = '/cvmfs/atlas.cern.ch/repo/sw|nightlies^/cvmfs/atlas-nightlies.cern.ch/repo/sw/nightlies'
# -> ['/cvmfs/atlas.cern.ch/repo/sw', '/cvmfs/atlas-nightlies.cern.ch/repo/sw/nightlies']
appdirs = []
if "|" in appdir:
for a in appdir.split("|"):
# remove any processingType
if "^" in a:
a = a.split("^")[1]
appdirs.append(a)
else:
appdirs.append(appdir)
return appdirs
def extractAppdir(self, appdir, processingType, homePackage):
""" extract and (re-)confirm appdir from possibly encoded schedconfig.appdir """
# e.g. for CERN:
# processingType = unvalid
# schedconfig.appdir = /afs/cern.ch/atlas/software/releases|release^/afs/cern.ch/atlas/software/releases|unvalid^/afs/cern.ch/atlas/software/unvalidated/caches
# -> appdir = /afs/cern.ch/atlas/software/unvalidated/caches
# if processingType does not match anything, use the default first entry (/afs/cern.ch/atlas/software/releases)
# NOTE: this function can only be called after a job has been downloaded since processType is unknown until then
ec = 0
tolog("Extracting appdir (current value=%s)" % (appdir))
# override processingType for analysis jobs that use nightlies
if "rel_" in homePackage:
tolog("Temporarily modifying processingType from %s to nightlies" % (processingType))
processingType = "nightlies"
_appdir = appdir
if "|" in _appdir and "^" in _appdir:
# extract appdir by matching with processingType
appdir_split = _appdir.split("|")
appdir_default = appdir_split[0]
# loop over all possible appdirs
sub_appdir = ""
for i in range(1, len(appdir_split)):
# extract the processingType and sub appdir
sub_appdir_split = appdir_split[i].split("^")
if processingType == sub_appdir_split[0]:
# found match
sub_appdir = sub_appdir_split[1]
break
if sub_appdir == "":
_appdir = appdir_default
tolog("Using default appdir: %s (processingType = \'%s\')" % (_appdir, processingType))
else:
_appdir = sub_appdir
tolog("Matched processingType %s to appdir %s" % (processingType, _appdir))
else:
# check for empty appdir's on LCG
if _appdir == "":
if os.environ.has_key("VO_ATLAS_SW_DIR"):
_appdir = os.environ["VO_ATLAS_SW_DIR"]
tolog("Set site.appdir to %s" % (_appdir))
else:
tolog("Got plain appdir: %s" % (_appdir))
# should the software directory be verified? (at the beginning of the pilot)
if self.verifySoftwareDirectory():
# verify the existence of appdir
if os.path.exists(_appdir):
tolog("Software directory %s exists" % (_appdir))
# force queuedata update
_ec = self.replaceQueuedataField("appdir", _appdir)
del _ec
else:
if _appdir != "":
tolog("!!FAILED!!1999!! Software directory does not exist: %s" % (_appdir))
else:
tolog("!!FAILED!!1999!! Software directory (appdir) is not set")
ec = self.__error.ERR_NOSOFTWAREDIR
else:
tolog("WARNING: Software directory will not be verified")
return ec, _appdir
def verifySoftwareDirectory(self):
""" Should the software directory (schedconfig.appdir) be verified? """
return True
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
def allowAlternativeStageOut(self, flag=None):
""" Is alternative stage-out allowed? """
# E.g. if stage-out to primary SE (at Tier-2) fails repeatedly, is it allowed to attempt stage-out to secondary SE (at Tier-1)?
# Argument 'flag' can be used for special conditions
return False
def forceAlternativeStageOut(self, flag=None):
""" Force stage-out to use alternative SE """
# Argument 'flag' can be used for special conditions
# See allowAlternativeStageOut()
return False
def getSSLCertificate(self):
""" Return the path to the SSL certificate """
if os.environ.has_key('X509_USER_PROXY'):
sslCertificate = os.environ['X509_USER_PROXY']
else:
sslCertificate = '/tmp/x509up_u%s' % str(os.getuid())
return sslCertificate
def getSSLCertificatesDirectory(self):
""" Return the path to the SSL certificates directory """
sslCertificatesDirectory = ''
if os.environ.has_key('X509_CERT_DIR'):
sslCertificatesDirectory = os.environ['X509_CERT_DIR']
else:
_dir = '/etc/grid-security/certificates'
if os.path.exists(_dir):
sslCertificatesDirectory = _dir
else:
tolog("!!WARNING!!2999!! $X509_CERT_DIR is not set and default location %s does not exist" % (_dir))
return sslCertificatesDirectory
def getProperPaths(self, error, analyJob, token, prodSourceLabel, dsname, filename, **pdict):
""" Return proper paths for the storage element used during stage-out """
# Implement in sub-class
return ""
def getTier1Queue(self, cloud):
""" Download the queuedata for the Tier-1 in the corresponding cloud and get the queue name """
# Implement in sub-class
# This method is used during stage-out to alternative [Tier-1] site when primary stage-out on a Tier-2 fails
# See methods in ATLASSiteInformation
return None
def getCopySetup(self, stageIn=False):
"""Get the setup string from queuedata"""
copysetup = ""
if stageIn:
copysetup = readpar('copysetupin')
if copysetup == "":
copysetup = readpar('copysetup')
tolog("Using copysetup = %s" % (copysetup))
else:
tolog("Using copysetupin = %s" % (copysetup))
if copysetup != '':
# discard the directAccess info also stored in this variable
_count = copysetup.count('^')
if _count > 0:
# make sure the DB entry doesn't start with directAccess info
if _count == 2 or _count == 4 or _count == 5:
copysetup = copysetup.split('^')[0]
else:
tolog('!!WARNING!!2999!! Could not figure out copysetup: %s' % (copysetup))
tolog('!!WARNING!!2999!! Resetting copysetup to an empty string')
copysetup = ''
# Check copysetup actually exists!
if copysetup != '' and os.access(copysetup, os.R_OK) == False:
tolog('WARNING: copysetup %s is not readable - resetting to empty string' % (copysetup))
copysetup = ''
else:
tolog("copysetup is: %s (file access verified)" % (copysetup))
else:
tolog("No copysetup found in queuedata")
return copysetup
def getCopyTool(self, stageIn=False):
"""
Selects the correct copy tool (SiteMover id) given a site name
'mode' is used to distinguish between different copy commands
"""
copytoolname = ''
if stageIn:
copytoolname = readpar('copytoolin')
if copytoolname == "":
# not set, use same copytool for stage-in as for stage-out
copytoolname = readpar('copytool')
if copytoolname.find('^') > -1:
copytoolname, pstage = copytoolname.split('^')
if copytoolname == '':
tolog("!!WARNING!!2999!! copytool not found (using default cp)")
copytoolname = 'cp'
copysetup = self.getCopySetup(stageIn)
return (copytoolname, copysetup)
def getCopyPrefix(self, stageIn=False):
"""Get Copy Prefix"""
copyprefix = ""
if stageIn:
copyprefix = readpar('copyprefixin')
if copyprefix == "":
copyprefix = readpar('copyprefix')
tolog("Using copyprefix = %s" % (copyprefix))
else:
tolog("Using copyprefixin = %s" % (copyprefix))
return copyprefix
def getCopyPrefixList(self, copyprefix):
""" extract from and to info from copyprefix """
pfrom = ""
pto = ""
if copyprefix != "":
if copyprefix.count("^") == 1:
pfrom, pto = copyprefix.split("^")
elif copyprefix.startswith("^") or copyprefix.count("^") > 1:
tolog("!!WARNING!!2988!! copyprefix has wrong format (not pfrom^pto): %s" % (copyprefix))
else:
pfrom = copyprefix
if pfrom == "":
pfrom = "dummy"
else:
if pfrom.endswith('/'):
pfrom = pfrom[:-1]
tolog("Cut away trailing / from %s (see copyprefix[in])" % (pfrom))
if pto == "":
pto = "dummy"
if "," in pfrom:
pfroms = pfrom.split(",")
else:
pfroms = [pfrom]
if "," in pto:
ptos = pto.split(",")
else:
ptos = [pto]
return pfroms, ptos
def getCopyPrefixPath(self, path, stageIn=False):
"""convert path to copy prefix path """
# figure out which copyprefix to use (use the PFN to figure out where the file is and then use the appropriate copyprefix)
# e.g. copyprefix=srm://srm-eosatlas.cern.ch,srm://srm-atlas.cern.ch^root://eosatlas.cern.ch/,root://castoratlas-xrdssl/
# PFN=srm://srm-eosatlas.cern.ch/.. use copyprefix root://eosatlas.cern.ch/ to build the TURL src_loc_pfn
# full example:
# Using copyprefixin = srm://srm-eosatlas.cern.ch,srm://srm-atlas.cern.ch^root://eosatlas.cern.ch/,root://castoratlas-xrdssl/
# PFN=srm://srm-eosatlas.cern.ch/eos/atlas/atlasdatadisk/rucio/mc12_8TeV/8d/c0/EVNT.01212395._000004.pool.root.1
# TURL=root://eosatlas.cern.ch//eos/atlas/atlasdatadisk/rucio/mc12_8TeV/8d/c0/EVNT.01212395._000004.pool.root.1
copyprefix = self.getCopyPrefix(stageIn=stageIn)
if copyprefix == "":
errorLog = "Empty copyprefix, cannot continue"
tolog("!!WARNING!!1777!! %s" % (errorLog))
return path
# handle copyprefix lists
pfroms, ptos = self.getCopyPrefixList(copyprefix)
if len(pfroms) != len(ptos):
errorLog = "Copyprefix lists not of equal length: %s, %s" % (str(pfroms), str(ptos))
tolog("!!WARNING!!1777!! %s" % (errorLog))
return path
if "SFN" in path:
local_path = path.split('SFN=')[1]
else:
local_path = '/' + path.split('/', 3)[3] # 0:method, 2:host+port, 3:abs-path
ret_path = path
for (pfrom, pto) in map(None, pfroms, ptos):
if (pfrom != "" and pfrom != None and pfrom != "dummy") and (pto != "" and pto != None and pto != "dummy"):
if path[:len(pfrom)] == pfrom or path[:len(pto)] == pto:
ret_path = pto + local_path
ret_path = ret_path.replace('///','//')
break
return ret_path
def getCopyPrefixPathNew(self, path, stageIn=False):
"""convert path to copy prefix path """
# figure out which copyprefix to use (use the PFN to figure out where the file is and then use the appropriate copyprefix)
# e.g. copyprefix=srm://srm-eosatlas.cern.ch,srm://srm-atlas.cern.ch^root://eosatlas.cern.ch/,root://castoratlas-xrdssl/
# PFN=srm://srm-eosatlas.cern.ch/.. use copyprefix root://eosatlas.cern.ch/ to build the TURL src_loc_pfn
# full example:
# Using copyprefixin = srm://srm-eosatlas.cern.ch,srm://srm-atlas.cern.ch^root://eosatlas.cern.ch/,root://castoratlas-xrdssl/
# PFN=srm://srm-eosatlas.cern.ch/eos/atlas/atlasdatadisk/rucio/mc12_8TeV/8d/c0/EVNT.01212395._000004.pool.root.1
# TURL=root://eosatlas.cern.ch//eos/atlas/atlasdatadisk/rucio/mc12_8TeV/8d/c0/EVNT.01212395._000004.pool.root.1
copyprefix = self.getCopyPrefix(stageIn=stageIn)
if copyprefix == "":
errorLog = "Empty copyprefix, cannot continue"
tolog("!!WARNING!!1777!! %s" % (errorLog))
return path
# handle copyprefix lists
pfroms, ptos = self.getCopyPrefixList(copyprefix)
if len(pfroms) != len(ptos):
errorLog = "Copyprefix lists not of equal length: %s, %s" % (str(pfroms), str(ptos))
tolog("!!WARNING!!1777!! %s" % (errorLog))
return path
ret_path = path
for (pfrom, pto) in map(None, pfroms, ptos):
ret_path = re.sub(pfrom, pto, ret_path)
ret_path = ret_path.replace('///','//')
return ret_path
def getCopyFileAccessInfo(self, stageIn=True):
""" return a tuple with all info about how the input files should be accessed """
# default values
oldPrefix = None
newPrefix = None
useFileStager = None
directIn = None
# move input files from local DDM area to workdir if needed using a copy tool (can be turned off below in case of remote I/O)
useCT = True
dInfo = None
if stageIn:
# remove all input root files for analysis job for xrootd sites
# (they will be read by pAthena directly from xrootd)
# create the direct access dictionary
dInfo = getDirectAccessDic(readpar('copysetupin'))
# if copysetupin did not contain direct access info, try the copysetup instead
if not dInfo:
dInfo = getDirectAccessDic(readpar('copysetup'))
# check if we should use the copytool
if dInfo:
if not dInfo['useCopyTool']:
useCT = False
oldPrefix = dInfo['oldPrefix']
newPrefix = dInfo['newPrefix']
useFileStager = dInfo['useFileStager']
directIn = dInfo['directIn']
if useCT:
tolog("Copy tool will be used for stage-in")
else:
if useFileStager:
tolog("File stager mode: Copy tool will not be used for stage-in of root files")
else:
tolog("Direct access mode: Copy tool will not be used for stage-in of root files")
if oldPrefix == "" and newPrefix == "":
tolog("Will attempt to create a TURL based PFC")
return useCT, oldPrefix, newPrefix, useFileStager, directIn
def getDirectInAccessMode(self, prodDBlockToken, isRootFileName):
"""Get Direct Access mode"""
directIn = False
useFileStager = False
transfer_mode = None
useCT, oldPrefix, newPrefix, useFileStager, directIn = self.getCopyFileAccessInfo(stageIn=True)
if directIn:
if useCT:
directIn = False
tolog("Direct access mode is switched off (file will be transferred with the copy tool)")
#updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
transfer_mode = "copy_to_scratch"
else:
# determine if the file is a root file according to its name
rootFile = isRootFileName
if prodDBlockToken == 'local' or not rootFile:
directIn = False
tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)")
#updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
transfer_mode = "copy_to_scratch"
elif rootFile:
tolog("Found root file according to file name (will not be transferred in direct reading mode)")
if useFileStager:
#updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", type="input")
transfer_mode = "file_stager"
else:
#updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", type="input")
transfer_mode = "remote_io"
else:
tolog("Normal file transfer")
else:
tolog("not directIn")
return directIn, transfer_mode
# Optional
def getFileSystemRootPath(self):
""" Return the root path of the local file system """
# Can e.g. be used to return "/cvmfs" or "/(some path)/cvmfs" in case the expected file system root path is not
# where it usually is (e.g. on an HPC). See example implementation in ATLASSiteInformation
# E.g. site movers that have setup paths on CVMFS use this method to locate the setup script. See e.g. objectstoreSiteMover
return ""
# Required if a local ROOT setup is necessary from e.g. a site mover (FAXSiteMover, objectstoreSiteMover, ..)
def getLocalROOTSetup(self):
""" Prepare the local ROOT setup script """
# See example implementation in ATLASExperiment
# See example usage in objectstoreSiteMover
return ""
# Required if a local EMI setup is necessary (used in GFAL2iteMover)
def getLocalEMISetup(self):
""" Return the path for the local EMI setup """
return ""
# Required if use S3 objectstore
def getSecurityKey(self, privateKeyName, publicKeyName):
""" Return the key pair """
return {"publicKey": None, "privateKey": None}
# Required if use S3 objectstore
def setSecurityKey(self, privateKeyName, privateKey, publicKeyName, publicKey):
""" Return the key pair """
keyName=privateKeyName + "_" + publicKeyName
self.__securityKeys[keyName] = {"publicKey": publicKey, "privateKey": privateKey}
return {"publicKey": publicKey, "privateKey": privateKey}
if __name__ == "__main__":
from SiteInformation import SiteInformation
import os
os.environ['PilotHomeDir'] = os.getcwd()
s1 = SiteInformation()
print "copytool=",s1.readpar('copytool')
path = 'srm://srm-eosatlas.cern.ch/eos/atlas/atlasdatadisk/rucio/mc12_8TeV/8d/f4/NTUP_SMWZ.00836697._000601.root.1'
print path
ret = s1.getCopyPrefixPath(path, stageIn=True)
print "ret:" + ret
print
path = 'root://atlas-xrd-eos-rucio.cern.ch:1094//atlas/rucio/mc12_8TeV:NTUP_SMWZ.00836697._000601.root.1'
print path
ret = s1.getCopyPrefixPath(path, stageIn=True)
print "ret:" + ret
print
#bnl
s1.replaceQueuedataField("copyprefixin", "srm://dcsrm.usatlas.bnl.gov.*/pnfs/^root://dcgftp.usatlas.bnl.gov:1096/pnfs")
path = 'srm://dcsrm.usatlas.bnl.gov/pnfs/usatlas.bnl.gov/atlasuserdisk/rucio/panda/a7/bf/panda.0317011154.376400.lib._5118143.1962296626.lib.tgz'
print path
ret = s1.getCopyPrefixPath(path, stageIn=True)
print "ret:" + ret
print
path = 'root://dcxrd.usatlas.bnl.gov:1096///atlas/rucio/panda:panda.0317011154.376400.lib._5118143.1962296626.lib.tgz'
print path
ret = s1.getCopyPrefixPath(path, stageIn=True)
print "ret:" + ret
print
#EC2
s1.replaceQueuedataField("copyprefixin", "srm://aws01.racf.bnl.gov.*/mnt/atlasdatadisk,srm://aws01.racf.bnl.gov.*/mnt/atlasuserdisk,srm://aws01.racf.bnl.gov.*/mnt/atlasproddisk^s3://s3.amazonaws.com:80//s3-atlasdatadisk-racf,s3://s3.amazonaws.com:80//s3-atlasuserdisk-racf,s3://s3.amazonaws.com:80//s3-atlasproddisk-racf")
s1.replaceQueuedataField("copyprefix", "srm://aws01.racf.bnl.gov.*/mnt/atlasdatadisk,srm://aws01.racf.bnl.gov.*/mnt/atlasuserdisk,srm://aws01.racf.bnl.gov.*/mnt/atlasproddisk^s3://s3.amazonaws.com:80//s3-atlasdatadisk-racf,s3://s3.amazonaws.com:80//s3-atlasuserdisk-racf,s3://s3.amazonaws.com:80//s3-atlasproddisk-racf")
path = 'srm://aws01.racf.bnl.gov:8443/srm/managerv2?SFN=/mnt/atlasproddisk/rucio/panda/7b/c4/86c7b8a5-d955-41a5-9f0f-36d067b9931b_0.job.log.tgz'
print path
ret = s1.getCopyPrefixPathNew(path, stageIn=True)
print "ret:" + ret
print
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import threading
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
with errors.raise_exception_on_not_ok_status() as status:
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tensorflow.TFE_OpNameGetAttrType(
h, op_type, attr_name, status)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
return dtypes.as_dtype(value)
elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
return [dtypes.as_dtype(v) for v in value]
elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
return tensor_shape.as_shape(value).as_proto()
elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _magic_gradient_function(op_name, attr_tuple, num_inputs,
inputs, outputs, out_grads):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
return grad_fn(mock_op, *out_grads)
_gradient_functions = {}
_gradient_functions_lock = threading.Lock()
_tracing = False
# TODO(apassos) replace this with a mechanism which can happen at the op
# gradient function registration site, to be less error-prone
# TODO(apassos) add ops other than those in nn_grad and math_grad
_ops_which_dont_need_outputs = set([
"MatMul",
"Conv2DBackpropInput",
"Conv2DBackpropFilter",
"Conv3D",
"Conv3DBackpropInputV2",
"AvgPool3D",
"AvgPool3DGrad",
"MaxPool3D",
"MaxPool3DGrad",
"MaxPool3DGradGrad",
"BiasAdd",
"BiasAddV1",
"BiasAddGrad",
"Relu6",
"Softplus",
"SoftplusGrad",
"Softsign",
"ReluGrad",
"Conv2D",
"DepthwiseConv2dNative",
"Dilation2D",
"AvgPool",
"AvgPoolGrad",
"BatchNormWithGlobalNormalization",
"L2Loss",
"Sum",
"Prod",
"SegmentSum",
"SegmentMean",
"SparseSegmentSum",
"SparseSegmentMean",
"SparseSegmentSqrtN",
"SegmentMin",
"SegmentMax",
"UnsortedSegmentSum",
"UnsortedSegmentMax",
"Abs",
"Neg",
"ReciprocalGrad",
"Square",
"Expm1",
"Log",
"Log1p",
"TanhGrad",
"SigmoidGrad",
"Sign",
"Sin",
"Cos",
"Tan",
"Add",
"Sub",
"Mul",
"Div",
"RealDiv",
"Maximum",
"Minimum",
"SquaredDifference",
"Select",
"SparseMatMul",
"BatchMatMul",
"Complex",
"Real",
"Imag",
"Angle",
"Conj",
"Cast",
"Cross",
"Cumsum",
"Cumprod",
"ReadVariableOp",
"VarHandleOp",
"Shape",
])
_ops_which_dont_need_inputs = set([
"Softmax",
"LogSoftmax",
"BiasAdd",
"Relu",
"Elu",
"Selu",
"SparseSoftmaxCrossEntropyWithLogits",
"Neg",
"Inv",
"Reciprocal",
"Sqrt",
"Exp",
"Tanh",
"Sigmoid",
"Real",
"Imag",
"Conj",
"ReadVariableOp",
"VarHandleOp",
"Shape",
])
# TODO(agarwal): use an automatic mechanism for handling None arguments to
# gradient functions.
# Some gradient functions can accept None arguments for gradients. The following
# maps the operation name to the indices at which the corresponding gradient
# function can accept None values.
# e.g. FusedBatchNorm outputs 5 values and hence receives 5 gradient values
# during backprop. However the gradient function uses only the first of those
# values and ignores the rest. The entry, "FusedBatchNorm": [1, 2, 3, 4],
# indicates that only the gradient corresponding to index 0 is used, and the
# gradient values at indices 1-4 are ignored (and hence can be None). The
# backprop algorithm can then leverage this by not constructing zeros to
# pass for those indices.
_grad_fn_accepts_none_for_indices = {
"SoftmaxCrossEntropyWithLogits": [1],
"FusedBatchNorm": [1, 2, 3, 4]
}
def _record_gradient(op_name, inputs, attrs, results, name):
"""Records gradients for a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
inputs: A flat list of Tensor object inputs to the operation.
attrs: A tuple with alternating string attr names and attr values for this
operation.
results: The results of the operation (as a flat list).
name: Customized name for the operation.
Returns:
A list of maybe-wrapped results. Either Tensors or TensorNodes.
Raises:
An exception on error.
"""
if not tape.could_possibly_record():
return
if op_name in _ops_which_dont_need_outputs:
op_outputs = None
else:
# TODO(apassos) this line creates a weak circular reference where the
# backprop function keeps an output alive which in turn keeps the tape entry
# alive which keeps the backprop function alive. Figure out how to break
# this up without breaking second derivatives of ops like Exp whose
# gradients depend only on the outputs.
op_outputs = results
if op_name in _ops_which_dont_need_inputs:
op_inputs = None
else:
op_inputs = inputs
num_inputs = len(inputs)
def grad_fn(*orig_outputs):
"""Generated gradient function."""
result = _magic_gradient_function(op_name, attrs, num_inputs,
op_inputs, op_outputs, orig_outputs)
if _tracing:
print("Gradient for", (name if name else op_name), "inputs", op_inputs,
"output_grads", orig_outputs, "gradients", result)
return nest.flatten(result)
tape.record_operation(op_name, results, inputs, grad_fn)
if _tracing:
print("Computed op", (name if name else op_name), "inputs", inputs,
"outputs", results)
execute.record_gradient = _record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all TFE variables which
have `variable.watch()` called on them by f.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: The function to be differentiated.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args):
"""Computes the gradient of the wrapped function."""
tape.push_new_tape()
end_node = f(*args)
variables = tape.top_tape_watched_variables()
sources = [x.handle for x in variables]
if not sources:
raise ValueError("no trainable variables were accessed while the "
"function was being computed.")
grad = imperative_grad.imperative_grad(_default_vspace,
tape.pop_tape(),
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all TFE variables which have
`variable.watch()` called on them by f.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: The function to be differentiated.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
args = tf_inspect.getargspec(f).args
if params is None:
if not args:
return range(len(param_args))
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = args[i]._dup() # pylint: disable=protected-access
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and is derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns: function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
dy = kwds.pop("dy", None)
if dy is not None:
dy = ops.convert_to_tensor(dy)
assert not kwds, "The gradient function can't take keyword arguments."
tape.push_new_tape()
sources = []
args = [
ops.convert_to_tensor(args[i]) if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
return result, imperative_grad.imperative_grad(
_default_vspace, tape.pop_tape(), nest.flatten(result), sources,
output_gradients=nest.flatten(dy) if dy is not None else None)
return decorated
def make_vjp(f, params=None):
"""Returns a function that computes f and is vjp w.r.t. params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function rturns 6.0
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
tape.push_new_tape()
sources = []
args = [
ops.convert_to_tensor(args[i]) if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
t = tape.pop_tape()
def vjp(dy=None):
return imperative_grad.imperative_grad(
_default_vspace, t, nest.flatten(result), sources,
output_gradients=nest.flatten(dy) if dy is not None else None)
return result, vjp
return decorated
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all([isinstance(g, ops.Tensor) for g in gradients]):
return math_ops.add_n(gradients)
else:
assert all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in gradients])
indexed_slices_list = []
for grad in gradients:
# TODO(xpan): Support nested IndexedSlices and core IndexedSlices
if isinstance(grad, ops.Tensor):
indexed_slices = ops.IndexedSlices(
grad,
constant_op.constant(range(grad.shape[0])),
constant_op.constant(grad.shape.as_list()))
indexed_slices_list.append(indexed_slices)
else:
indexed_slices_list.append(grad)
# Dense shapes from all gradients should be the same.
dense_shape = indexed_slices_list[0].dense_shape
# For simplicity now, always cast to int64.
indices = array_ops.concat([math_ops.cast(x.indices, dtypes.int64)
for x in indexed_slices_list], 0)
values = array_ops.concat([x.values for x in indexed_slices_list], 0)
return ops.IndexedSlices(values, indices, dense_shape)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
return functools.reduce(operator.mul, grad._shape_tuple(), 1) # pylint: disable=protected-access
if isinstance(grad, ops.IndexedSlices):
return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access
raise ValueError("`grad` not a Tensor or IndexedSlices.")
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
tensor_id=ops.tensor_id,
zeros=array_ops.zeros,
ones_like=array_ops.ones_like)
class GradientTape(object):
"""Records operations to use to compute gradients.
Operations are recorded if:
- they happen in code marked by this context manager
- at least one of their inputs is being watched
Outputs of recorded operations are watched. Variables are automatically
watched and tensors can be manually watched by calling the watch method on the
context manager.
Example usage:
```python
with tfe.GradientTape() as g:
x = tf.constant(3.0)
g.watch(x)
y = x * x
grad = g.gradient(y, [x])[0]
assert grad.numpy() == 6.0
```
It is possible to use GradientTapes to compute higher-order derivatives as
follows:
```python
with tfe.GradientTape() as g:
x = tf.constant(3.0)
g.watch(x)
y = x * x
with tfe.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
assert inner_grad.numpy() == 2
y = y + inner_grad
grad = g.gradient(y, [x])[0]
assert grad.numpy() == 6.0
```
"""
def __init__(self):
self._tape = None
def __enter__(self):
tape.push_new_tape()
return self
def __exit__(self, typ, value, traceback):
self._tape = tape.pop_tape()
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or Variable a list of Tensors or Variables.
"""
for t in nest.flatten(tensor):
if isinstance(t, resource_variable_ops.ResourceVariable):
t = t.handle
tape.watch(t)
def gradient(self, target, sources):
"""Computes the gradient using information traced by the tape.
Args:
target: the tensor to be differentiated.
sources: a list of Tensors or Variables, the target will be
differentiated with respect to the sources.
Returns:
a list of Tensors (or IndexedSlices, or None), one for each element in
`sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once, and "
"only when the context manager has exited.")
sources = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in sources]
grad = imperative_grad.imperative_grad(
_default_vspace, self._tape, [target], sources)
self.tape = None
return grad
|
|
from streampy import Stream
from streamcollector import Collector
from compatibility import _comparer
import unittest
class CreationTest(unittest.TestCase):
def test_create_stream_without_params(self):
s = Stream()
self.assertEquals(0, s.size())
def test_create_stream_with_list_params(self):
s = Stream([])
self.assertEquals(0, s.size())
s = Stream([1])
self.assertEquals(1, s.size())
def test_create_stream_with_generator_params(self):
s = Stream.range(1000)
self.assertEquals(1000, s.size())
def test_create_stream_with_bad_type(self):
self.assertRaises(TypeError, Stream.__init__, Stream(), None)
def test_create_stream_with_more_than_one_param(self):
self.assertRaises(TypeError, Stream.__init__, Stream(), *([], []))
class SizeTest(unittest.TestCase):
def test_simple_size_1(self):
self.assertEquals(Stream.range(999).size(), 999)
def test_simple_size_2(self):
self.assertEquals(Stream.range(0).size(), 0)
def test_simple_size_3(self):
s = Stream("azertyuiop").filter(lambda x: x == 'a')
self.assertEquals(s.size(), 1)
class FilterTest(unittest.TestCase):
def test_simple_filter_1(self):
s = Stream.range(10).filter(lambda x: x % 2 == 0).list()
self.assertEquals(s, [0, 2, 4, 6, 8])
s = Stream(['this', 'is', 'a', 'pretty', 'cat']).filter(lambda x: x != 'cat').list()
self.assertEquals(s, ['this', 'is', 'a', 'pretty'])
def test_simple_filter_2(self):
s = Stream('a simple string').filter(lambda x: x == 'a').list()
self.assertEquals(s, ['a'])
def test_simple_filter_3(self):
s = Stream.range(100001).filter(lambda x: x > 50000)
self.assertEquals(50000, s.size())
class ExcludeTest(unittest.TestCase):
def test_simple_exclude_1(self):
s = Stream.range(10).exclude(lambda x: x % 2 == 0).list()
self.assertEquals(s, [1, 3, 5, 7, 9])
s = Stream(['this', 'is', 'a', 'pretty', 'cat']).exclude(lambda x: x == 'cat').list()
self.assertEquals(s, ['this', 'is', 'a', 'pretty'])
def test_simple_exclude_2(self):
s = ''.join(Stream('a simple string').exclude(lambda x: x == 'a').list())
self.assertEquals(s, ' simple string')
def test_simple_exclude_3(self):
s = Stream.range(100001).exclude(lambda x: x > 50000)
self.assertEquals(50001, s.size())
class MapTest(unittest.TestCase):
def test_simple_map_1(self):
def add(x, y):
return x + y
s = Stream.range(10000).map(lambda x: x * 2).reduce(add, initializer=0)
self.assertEquals(s, 99990000)
def test_simple_map_2(self):
def to_dict(obj):
return {str(obj): obj}
s = Stream.range(10000).map(to_dict).size()
self.assertEquals(s, 10000)
def test_simple_map_3(self):
def to_dict(obj):
return {str(obj): obj}
s = Stream([1, 2, 3]).map(to_dict).list()
self.assertEqual(s, [{'1': 1}, {'2': 2}, {'3': 3}])
class ChainTest(unittest.TestCase):
def test_simple_chain_1(self):
self.assertEqual(Stream([1]).chain([2]).size(), 2)
def test_simple_chain_2(self):
self.assertEqual(Stream([1]).chain([2]).chain(Stream([3, 4, 5])).size(), 5)
class SortTest(unittest.TestCase):
def test_simple_sort_1(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).sort(cmp=lambda x, y: _comparer(x, y)).list(), [1, 2, 3, 4, 5, 6])
def test_simple_sort_2(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).sort(cmp=lambda x, y: _comparer(y, x)).list(), [6, 5, 4, 3, 2, 1])
class LimitTest(unittest.TestCase):
def test_simple_limit_1(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).limit(2).list(), [1, 3])
def test_simple_limit_2(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).limit(0).list(), [])
def test_simple_limit_3(self):
self.assertEquals(Stream("yeah baby !").limit(4).list(), ['y', 'e', 'a', 'h'])
class AnyTest(unittest.TestCase):
def test_simple_any_1(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).limit(2).any(lambda x: x == 1), True)
def test_simple_any_2(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).limit(2).any(lambda x: x == 2), False)
def test_simple_any_3(self):
self.assertEquals(Stream("yeah baby !").any(lambda x: 'a' < x < 'c'), True)
class AllTest(unittest.TestCase):
def test_simple_all_1(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).limit(2).all(lambda x: 1 <= x <= 6), True)
def test_simple_all_2(self):
self.assertEquals(Stream([3, 3, 2, 5, 4, 6]).limit(2).all(lambda x: x == 3), True)
def test_simple_all_3(self):
self.assertEquals(Stream('yeah baby !').all(lambda x: 'a' < x < 'c'), False)
class MinTest(unittest.TestCase):
def test_simple_all_1(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).min(), 1)
def test_simple_all_2(self):
self.assertEquals(Stream([3, 3, 2, 5, 4, 6]).min(), 2)
class MaxTest(unittest.TestCase):
def test_simple_all_1(self):
self.assertEquals(Stream([1, 3, 2, 5, 4, 6]).max(), 6)
def test_simple_all_2(self):
self.assertEquals(Stream([4, 42, 2, 5, 4, 6]).max(), 42)
class RangeTest(unittest.TestCase):
def test_simple_range_1(self):
self.assertEquals(Stream.range(10).list(), list(range(10)))
def test_simple_range_2(self):
self.assertEquals(Stream.range(42000).list(), list(range(42000)))
class FirstTest(unittest.TestCase):
def test_simple_first_1(self):
self.assertEquals(Stream.range(10).first(), 0)
def test_simple_first_2(self):
self.assertEquals(Stream.range(10).first(predicate=lambda x: x > 1), 2)
class LastTest(unittest.TestCase):
def test_simple_last_1(self):
self.assertEquals(Stream.range(10).last(), 9)
def test_simple_last_2(self):
self.assertEquals(Stream.range(10).last(predicate=lambda x: x > 1), 9)
def test_simple_last_3(self):
self.assertEquals(Stream.range(423).last(predicate=lambda x: 42 < x < 46), 45)
class SkipTest(unittest.TestCase):
def test_simple_skip_1(self):
self.assertEquals(Stream.range(10).skip(9).list(), [9])
def test_simple_skip_2(self):
self.assertEquals(Stream.range(10).skip(15).list(), [])
class GetItemTest(unittest.TestCase):
def test_simple_getitem_1(self):
self.assertEquals(Stream.range(10)[0], 0)
def test_simple_getitem_2(self):
self.assertEquals(Stream.range(430)[50], 50)
def test_simple_getitem_3(self):
self.assertRaises(IndexError, Stream.__getitem__, Stream([]), 1)
class DistinctTest(unittest.TestCase):
def test_simple_distinct_1(self):
self.assertEquals(Stream.range(100).chain(Stream.range(100)).distinct().list(), Stream.range(100).list())
def test_simple_distinct_2(self):
self.assertEquals(Stream([1, 1, 1, 1, 1, 1.0, 2]).distinct().list(), [1, 2])
class SubstreamTest(unittest.TestCase):
def test_simple_substream_1(self):
self.assertEqual(Stream([1, 2, 3, 4]).substream(0, 1).list(), [1])
def test_simple_substream_2(self):
self.assertEqual(Stream([1, 2, 3, 4]).substream(1, 1).list(), [])
def test_simple_substream_3(self):
self.assertEqual(Stream([1, 2, 3, 4]).substream(1, 2).list(), [2])
def test_simple_substream_4(self):
s = Stream.range(100) \
.chain(Stream.range(100, 1000)) \
.substream(100, 200).list()
self.assertEqual(s, list(range(100, 200)))
class ChunkTest(unittest.TestCase):
def test_simple_chunk_1(self):
self.assertEqual(Stream.range(10).chunk(2).list(), [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
def test_simple_chunk_2(self):
self.assertEqual(Stream.range(10).chunk(3).list(), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]])
def test_simple_chunk_3(self):
self.assertEqual(Stream.range(1).chunk(2).list(), [[0]])
class FunctionnalTest(unittest.TestCase):
def test_simple_1(self):
element = Stream \
.range(100000) \
.filter(lambda x: x % 2 == 0) \
.map(lambda x: str(x)) \
.map(lambda x: 'hey{0}'.format(x)) \
.first()
self.assertEquals(element, 'hey0')
def test_simple_2(self):
element = Stream \
.range(100000) \
.filter(lambda x: x % 2 == 0) \
.map(lambda x: str(x)) \
.map(lambda x: 'hey{0}'.format(x)) \
.limit(10) \
.last()
self.assertEquals(element, 'hey18')
def test_simple_3(self):
element = Stream.range(100000) \
.filter(lambda x: x % 2 == 0) \
.map(lambda x: str(x)) \
.map(lambda x: 'Hi{0}'.format(x)) \
.map(lambda x: x.upper()) \
.filter(lambda x: x.endswith('8')) \
.limit(10) \
.map(lambda x: x[2]) \
.skip(1) \
.map(int) \
.list()
self.assertEquals(element, list(range(1, 10)))
def test_simple_4(self):
element = Stream(['You', 'shall', 'not', 'pass']) \
.map(lambda x: x.upper()) \
.exclude(lambda x: x == 'NOT') \
.exclude(lambda x: x == 'PASS') \
.chain(["pass"]) \
.map(lambda x: x.upper()) \
.list()
self.assertEquals(element, ['YOU', 'SHALL', 'PASS'])
class CollectTest(unittest.TestCase):
def test_collect_simple_list(self):
lst = Stream.range(10) \
.map(lambda x: x * 2) \
.collect(Collector.list())
self.assertEqual(lst, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
def test_collect_simple_group_by(self):
peoples = [
{'name': 'Camille', 'age': 24},
{'name': 'Laurent', 'age': 22},
{'name': 'Matthias', 'age': 21},
{'name': 'Bertrand', 'age': 25},
{'name': 'David', 'age': 22},
]
res = {
21: [{'name': 'Matthias', 'age': 21}],
22: [
{'name': 'Laurent', 'age': 22},
{'name': 'David', 'age': 22},
],
24: [{'name': 'Camille', 'age': 24}],
25: [{'name': 'Bertrand', 'age': 25}]
}
lst = Stream(peoples) \
.collect(Collector.group_by(lambda x: x['age']))
self.assertEqual(lst, res)
def test_collect_simple_count_by(self):
peoples = [
{'name': 'Camille', 'age': 24},
{'name': 'Laurent', 'age': 22},
{'name': 'Matthias', 'age': 21},
{'name': 'Bertrand', 'age': 25},
{'name': 'David', 'age': 22},
]
res = {
21: 1,
22: 2,
24: 1,
25: 1
}
lst = Stream(peoples) \
.collect(Collector.count_by(lambda x: x['age']))
self.assertEqual(lst, res)
|
|
import sys
import threading
import os
import ast
import imp
import inspect
import ctypes
import socket
import json
import re
import traceback
import ltipy
import time
import asyncore
import signal
import lttools
oldout = sys.stdout
olderr = sys.stderr
stop = False
local = True
threads = []
currentClient = 0
class Printer():
cur = ""
def write(self, text):
self.cur += text
if text == "\n":
self.flush()
def flush(self):
send(currentClient, "editor.eval.python.print", {"msg": self.cur, "file": name})
self.cur = ""
def readlines(self):
return None
def read(self):
return None
def asUnicode(s):
try:
return unicode(s)
except:
return str(s)
def ensureUtf(s):
try:
return s.encode('utf8', 'ignore')
except:
return str(s)
def findLoc(body, line, total):
for i in range(len(body)):
if body[i].lineno == line:
if i + 1 >= len(body):
return {"start": body[i].lineno, "end":total}
else:
return {"start": body[i].lineno, "end":body[i+1].lineno - 1}
elif body[i].lineno > line and line != 0:
return {"start": body[i-1].lineno, "end":body[i].lineno - 1}
elif body[i].lineno < line and i + 1 == len(body) and line <= total:
return {"start": body[i].lineno, "end":total}
return None
def toForm(lines, loc):
if loc:
end = loc["end"] - 1
start = loc["start"] - 1
if start == end:
return [{"start":start, "end":end}, "\n"*start + lines[start]]
else:
while (not re.search("\S", lines[end]) or re.search("^\s*#.*$", lines[end])) and end > start:
end -= 1
if start == end:
return [{"start":start, "end":end}, "\n"*(start) + lines[start]]
else:
return [{"start":start, "end":end}, "\n"*(start) + "\n".join(lines[start:end + 1])]
return []
def toModuleNameByPath(path):
cur = [os.path.splitext(os.path.basename(path))[0]]
p = os.path.dirname(path);
while os.path.exists(os.path.join(p, "__init__.py")):
cur.insert(0, os.path.basename(p))
p = os.path.dirname(p)
return ".".join(cur)
def toModule(path):
name = toModuleNameByPath(path)
if name in sys.modules:
exec("import sys", sys.modules[name].__dict__)
return sys.modules[name]
else:
parts = name.split(".")
for idx in range(len(parts)):
mname = ".".join(parts[:idx+1])
__import__(mname)
exec("import sys", sys.modules[name].__dict__)
return sys.modules[name]
def explodeCode(string):
lines = string.splitlines()
total = len(lines)
if total == 0:
return [None, None]
a = ast.parse(string)
forms = []
totalForms = len(a.body)
for i in range(totalForms):
start = a.body[i].lineno
if i >= totalForms - 1:
end = total
else:
end = a.body[i+1].lineno - 1
forms.append(toForm(lines, {"start": start, "end": end}))
return forms
def handlePos(string, pos):
lines = string.splitlines()
if len(lines) == 0:
return [None, None]
a = ast.parse(string)
loc = findLoc(a.body, pos["line"] + 1, len(lines))
if loc:
return toForm(lines, loc)
return [None, None]
def cleanCode(c):
return re.sub(r'(#.*coding.*)\n?', '\n', c)
def cleanTrace(t):
return t
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i+n]
def send(client, command, info):
tosend = (json.dumps([client, command, info]) + "\n").encode('utf-8')
for chunk in chunks(tosend, 1024):
s.send(chunk);
def stopped():
global stop
return stop
def handleEval(data):
result = None
code = cleanCode(data[2]["code"])
if "meta" in data[2]:
loc = data[2]["meta"]
else:
loc = {"start":1, "end":1}
toExec = []
if "pos" in data[2] and data[2]["pos"]:
try:
toExec.append(handlePos(code, data[2]["pos"]))
except:
e = traceback.format_exc()
return send(data[0], "editor.eval.python.exception", {"ex":cleanTrace(e), "meta": {"start": data[2]["pos"]["line"], "end": data[2]["pos"]["line"]}})
else:
try:
exp = code
if "meta" in data[2]:
exp = "\n" * data[2]["meta"]["start"] + code
toExec = explodeCode(exp)
except:
e = traceback.format_exc()
return send(data[0], "editor.eval.python.exception", {"ex":cleanTrace(e), "meta": data[2]["meta"]})
if not code or len(toExec) == 0:
return send(data[0], "editor.eval.python.result", None)
try:
if not "path" in data[2]:
module = sys.modules["__main__"]
else:
module = toModule(data[2]["path"])
except:
e = traceback.format_exc()
return send(data[0], "editor.eval.python.exception", {"ex":cleanTrace(e), "meta":loc})
for form in toExec:
code = form[1]
loc = form[0]
isEval = False
try:
code= compile(ensureUtf(code), ensureUtf(data[2]["name"]), 'eval')
isEval = True
except:
try:
code= compile(ensureUtf(code), ensureUtf(data[2]["name"]), 'exec')
except:
e = traceback.format_exc()
send(data[0], "editor.eval.python.exception", {"ex": cleanTrace(e), "meta": loc})
continue
try:
if isEval:
result = eval(code, module.__dict__)
send(data[0], "editor.eval.python.result", {"meta": loc, "result": asUnicode(result)})
else:
exec(code, module.__dict__)
send(data[0], "editor.eval.python.success", {"meta": loc})
except Exception as exc:
e = traceback.format_exc()
try:
send(data[0], "editor.eval.python.exception", {"ex":cleanTrace(e), "meta":loc})
continue
except:
pass
def ipyEval(data):
result = None
code = cleanCode(data[2]["code"])
if "meta" in data[2]:
loc = data[2]["meta"]
else:
loc = {"start":1, "end":1}
toExec = []
if "pos" in data[2] and data[2]["pos"]:
try:
toExec.append(handlePos(code, data[2]["pos"]))
except:
e = traceback.format_exc()
return send(data[0], "editor.eval.python.exception", {"ex":cleanTrace(e), "meta": {"start": data[2]["pos"]["line"], "end": data[2]["pos"]["line"]}})
else:
try:
exp = code
if "meta" in data[2]:
exp = "\n" * data[2]["meta"]["start"] + code
toExec = explodeCode(exp)
except:
e = traceback.format_exc()
return send(data[0], "editor.eval.python.exception", {"ex":cleanTrace(e), "meta": data[2]["meta"]})
if not code:
return send(data[0], "editor.eval.python.result", None)
try:
ltipy.setNs(data[2]['path'])
except:
pass
if "path" in data[2]:
path = data[2]["path"]
else:
path = "untitled"
for form in toExec:
code = form[1]
loc = form[0]
isEval = False
try:
compile(ensureUtf(code), ensureUtf(data[2]["name"]), 'eval')
isEval = True
except:
try:
compile(ensureUtf(code), ensureUtf(data[2]["name"]), 'exec')
except:
e = traceback.format_exc()
send(data[0], "editor.eval.python.exception", {"ex": cleanTrace(e), "meta": loc})
continue
ltipy.request({"meta": loc, "name": data[2]["name"], "path": path, "code": code, "client": data[0], "evaltype": "expression" if isEval else "statement"})
def shutdown():
global threads, oldout, s, stop
stop = True
killThreads(threads)
s.close()
sys.stdout = oldout
ltipy.killIPy()
safePrint("Disconnected")
sys.exit()
def signal_handler(signal, frame):
shutdown()
signal.signal(signal.SIGINT, signal_handler)
def handle(data, threads):
global currentClient
currentClient = data[0]
if data[1] == 'client.close':
shutdown()
elif data[1] == 'client.cancel-all':
killThreads(threads)
elif data[1] == 'editor.eval.python':
if local:
t = ThreadWithExc(target=handleEval, args=(data,))
t.start()
return t
else:
ipyEval(data)
def _async_raise(tid, exctype):
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None)
raise SystemError("PyThreadState_SetAsyncExc failed")
class ThreadWithExc(threading.Thread):
'''A thread class that supports raising exception in the thread from
another thread.
'''
def _get_my_tid(self):
if not self.isAlive():
raise threading.ThreadError("the thread is not active")
if hasattr(self, "_thread_id"):
return self._thread_id
for tid, tobj in threading._active.items():
if tobj is self:
self._thread_id = tid
return tid
raise AssertionError("could not determine the thread's id")
def raiseExc(self, exctype):
_async_raise( self._get_my_tid(), exctype )
def killThreads(threads):
for t in threads:
if t.isAlive():
t.raiseExc(Exception)
def removeFinished(threads):
return [t for t in threads if t.isAlive()]
def safePrint(s):
sys.stdout.write(s)
sys.stdout.flush()
def start(type):
sys.stdout.flush()
info["type"] = type
s.send((json.dumps(info)+ "\n").encode('utf-8'));
sys.stdout = Printer()
sys.stderr = Printer()
asyncore.loop()
def connected():
global local
local = False
t = threading.Thread(target=start, args=("ipython",))
t.start()
def disconnected():
global local
if not local:
return
local = True
start("python")
class Client(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect( (host, port) )
self.buffer = ""
self.cur = ""
def handle_connect(self):
pass
def handle_close(self):
shutdown()
def handle_read(self):
self.cur += self.recv(1024).decode('utf-8')
if self.cur[-1] == '\n':
global threads
t = handle(json.loads(self.cur[:-1]), threads)
if t:
threads.append(t)
self.cur = ""
def writable(self):
return (len(self.buffer) > 0)
def handle_write(self):
sent = self.send(self.buffer.encode('utf-8'))
self.buffer = self.buffer[sent:]
def sendoff(self, msg):
self.buffer += msg
if __name__ == "__main__":
curDir = os.getcwd()
sys.path.append(curDir)
name = os.path.basename(curDir)
try:
cid = int(sys.argv[2])
except:
cid = None
info = {'name':name, 'client-id':cid, 'dir':curDir, 'commands': ['editor.eval.python']}
s = Client('127.0.0.1', int(sys.argv[1]))
lttools.setRespond(send);
safePrint("Connected")
ltipy.startIPy({"respond": send,
"connected": connected,
"disconnected": disconnected})
#disconnected()
|
|
__author__ = 'cmantas'
from CassandraNode import CassandraNode as Node, get_script_text
from CassandraNode import get_all_nodes
from VM import Timer, get_all_vms
from time import sleep
from json import loads, dumps
from os import remove
from os.path import isfile
orchestrator = None
seeds = [] # the seed node(s) of the casssandra cluster !!! ONLY ONE IS SUPPORTED !!!
nodes = [] # the rest of the nodes of the Cassandra cluster
clients = [] # the clients of the cluster
stash = []
seed_name = "cassandra_seednode"
node_name = "cassandra_node_"
client_name = "cassandra_client_"
save_file = "files/saved_cluster.json"
def create_cluster(worker_count=0, client_count=0):
"""
Creates a Cassandra Cluster with a single Seed Node and 'worker_count' other nodes
:param worker_count: the number of the nodes to create-apart from the seednode
"""
#create the seed node
seeds.append(Node(seed_name, node_type="SEED", create=True))
#create the rest of the nodes
for i in range(worker_count):
name = node_name+str(len(nodes)+1)
nodes.append(Node(name, create=True))
for i in range(client_count):
name = client_name+str(len(clients)+1)
clients.append(Node(name, node_type="CLIENT", create=True))
#wait until everybody is ready
wait_everybody()
inject_hosts_files()
print "CLUSTER: Every node is ready for SSH"
save_cluster()
def wait_everybody():
for i in seeds + nodes + clients:
i.vm.wait_ready()
def bootstrap_cluster():
""" Runs the necessary boostrap commnands to each of the Seed Node and the other nodes """
print "CLUSTER: Running bootstrap scripts"
#bootstrap the seed node
seeds[0].bootstrap()
#bootstrap the rest of the nodes
for n in nodes+clients:
n.bootstrap(params={"seednode": seeds[0].vm.get_private_addr()})
print "CLUSTER: READY!!"
def resume_cluster():
"""
Re-Creates the cluster representation based on the VMs that already exist on the IaaS
:param worker_count the number of the nodes to include in the cluster
"""
find_orhcestrator()
if not isfile(save_file):
print "CLUSTER: No existing created cluster"
return
saved_cluster = loads(open(save_file, 'r').read())
saved_nodes = saved_cluster['nodes']
nodes[:] = []
seeds[:] = []
in_seeds, in_nodes, in_clients = get_all_nodes(check_active=True)
#check that all saved nodes actually exist and exit if not\
for n in saved_nodes:
if n not in [i.name for i in in_nodes]:
print "CLUSTER: ERROR, node %s does actually exist in the cloud, re-create the cluster" % n
remove(save_file)
exit(-1)
for n in in_nodes:
if n.name not in saved_nodes: in_nodes.remove(n)
nodes.extend(in_nodes)
seeds.extend(in_seeds)
clients.extend(in_clients)
def save_cluster():
cluster = dict()
cluster["seeds"] = [s.name for s in seeds]
cluster["nodes"] = [n.name for n in nodes]
cluster["clients"] = [c.name for c in clients]
cluster['note'] = "only the nodes are acually used"
string = dumps(cluster, indent=3)
f = open(save_file, 'w+')
f.write(string)
def kill_clients():
print "CLUSTER: Killing clients"
for c in clients: c.kill()
def kill_nodes():
print "CLUSTER: Killing cassandra nodes"
for n in seeds+nodes+stash:
n.kill()
def kill_all():
# kill 'em all
kill_clients()
kill_nodes()
def inject_hosts_files():
print "CLUSTER: Injectin host files"
hosts = dict()
for i in seeds+nodes + clients:
hosts[i.name] = i.vm.get_private_addr()
#add the host names to etc/hosts
orchestrator.inject_hostnames(hosts)
for i in seeds+nodes+clients:
i.vm.inject_hostnames(hosts)
seeds[0].vm.run_command("service ganglia-monitor restart")
orchestrator.run_command("service ganglia-monitor restart")
def find_orhcestrator():
vms = get_all_vms()
for vm in vms:
if "orchestrator" in vm.name:
global orchestrator
orchestrator = vm
return
def add_node():
name = node_name+str(len(nodes)+1)
print "CLUSTER: Adding node %s" % name
if not len(stash) == 0:
new_guy = stash[0]
del stash[0]
else:
new_guy = Node(name, create=True)
nodes.append(new_guy)
new_guy.vm.wait_ready()
#inject host files to everybody
inject_hosts_files()
new_guy.bootstrap()
print "CLUSTER: Node %s is live " % (name)
save_cluster()
def remove_node():
dead_guy = nodes[-1]
print "CLUSTER: Removing node %s" % dead_guy
dead_guy.decommission()
stash[:] = [nodes.pop()] + stash
print "CLUSTER: Node %s is removed" % dead_guy
save_cluster()
def run_load_phase(record_count):
#first inject the hosts file
host_text = ""
for h in seeds+nodes: host_text += h.vm.get_private_addr()+"\n"
start = 0
step = record_count/len(clients)
for c in clients:
load_command = "echo '%s' > /opt/hosts;" % host_text
load_command += get_script_text("ycsb_load") % (str(record_count), str(step), str(start), c.name[-1:])
print "CLUSTER: running load phase on %s" % c.name
c.vm.run_command(load_command, silent=True)
start += step
def run_sinusoid(target_total, offset_total, period):
target = target_total / len(clients)
offset = offset_total / len(clients)
#first inject the hosts file
host_text = ""
for h in seeds+nodes: host_text += h.vm.get_private_addr()+"\n"
start = 0
for c in clients:
load_command = "echo '%s' > /opt/hosts;" % host_text
load_command += get_script_text("ycsb_run_sin") % (target, offset, period, c.name[-1:])
print "CLUSTER: running workload on %s" % c.name
c.vm.run_command(load_command, silent=True)
def destroy_all():
for n in seeds+nodes+stash+clients:
n.vm.destroy()
remove(save_file)
def cluster_info():
"""
returns the available nodes and their addresses
:return:
"""
rv = orchestrator.name+ "\t:\t"+orchestrator.get_public_addr()+ "\t,\t"+ orchestrator.get_private_addr()+ "\n"
for n in seeds+nodes+clients:
rv += n.name+ "\t:\t"+n.vm.get_public_addr()+"\t,\t"+n.vm.get_private_addr()+ "\n"
return rv
#=============================== MAIN ==========================
#create_cluster(worker_count=1, client_count=2)
#resume active cluster
resume_cluster()
#kill all previous processes
# kill_all()
# #bootstrap cluster from scratch
# bootstrap_cluster()
# run_load_phase(100000)
# print "waiting 20 seconds for load phase to finish"
# sleep(20)
# run_sinusoid(target_total=200, offset_total=100, period=60)
# print "waiting to add node"
# sleep(30)
# add_node()
# print "waiting to remove node"
# sleep(30)
# remove_node()
#
# print "FINISED (%d seconds)" % timer.stop()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r""""Run analyses on learnt metric/score function.
We load a learnt metric and test responses which are repeated
presentations of a short stimuli, and perform various analyses such as:
* Accuracy of triplet ordering.
* Precision recall analysis of triplet ordering.
* Evaluating clustering of responses generated due to same stimulus.
* Retrieval of nearest responses in training data and
using it to decode the stimulus corresponding to test responses.
The output of all the analyses is stored in a pickle file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import pickle
import numpy as np
import tensorflow as tf
from absl import app
from absl import gfile
import sklearn
import sklearn.manifold as manifold
import retina.response_model.python.metric_learning.analyse_metric as analyse
import retina.response_model.python.metric_learning.config as config
import retina.response_model.python.metric_learning.data_util as du
from tensorflow.python.profiler import PrintModelAnalysis
FLAGS = tf.app.flags.FLAGS
def main(unused_argv=()):
# set random seed
np.random.seed(121)
print('random seed reset')
# Get details of stored model.
model_savepath, model_filename = config.get_filepaths()
# Load responses to two trials of long white noise.
data_wn = du.DataUtilsMetric(os.path.join(FLAGS.data_path, FLAGS.data_test))
# Quadratic score function.
with tf.Session() as sess:
# Define and restore/initialize the model.
tf.logging.info('Model : %s ' % FLAGS.model)
met = config.get_model(sess, model_savepath, model_filename,
data_wn, True)
print('IS_TRAINING = TRUE!!! ')
tf.logging.info('IS_TRAINING = TRUE!!! ')
PrintModelAnalysis(tf.get_default_graph())
# get triplets
# triplet A
outputs = data_wn.get_triplets(batch_size=FLAGS.batch_size_test,
time_window=FLAGS.time_window)
anchor_test, pos_test, neg_test, _, _, _ = outputs
triplet_a = (anchor_test, pos_test, neg_test)
# triplet B
outputs = data_wn.get_tripletsB(batch_size=FLAGS.batch_size_test,
time_window=FLAGS.time_window)
anchor_test, pos_test, neg_test, _, _, _ = outputs
triplet_b = (anchor_test, pos_test, neg_test)
triplets = [triplet_a, triplet_b]
triplet_labels = ['triplet A', 'triplet B']
analysis_results = {} # collect analysis results in a dictionary
# 1. Plot distances between positive and negative pairs.
# analyse.plot_pos_neg_distances(met, anchor_test, pos_test, neg_test)
# tf.logging.info('Distances plotted')
# 2. Accuracy of triplet orderings - fraction of triplets where
# distance with positive is smaller than distance with negative.
triplet_dict = {}
for iitriplet, itriplet in enumerate(triplets):
dist_pos, dist_neg, accuracy = analyse.compute_distances(met, *itriplet)
dist_analysis = {'pos': dist_pos,
'neg': dist_neg,
'accuracy': accuracy}
triplet_dict.update({triplet_labels[iitriplet]: dist_analysis})
analysis_results.update({'distances': triplet_dict})
tf.logging.info('Accuracy computed')
# 3. Precision-Recall analysis : declare positive if s(x,y)<t and
# negative otherwise. Vary threshold t, and plot precision-recall and
# ROC curves.
triplet_dict = {}
for iitriplet, itriplet in enumerate(triplets):
output = analyse.precision_recall(met, *itriplet, toplot=False)
precision_log, recall_log, f1_log, fpr_log, tpr_log, pr_data = output
pr = {'precision': precision_log, 'recall': recall_log,
'pr_data': pr_data}
roc = {'TPR': tpr_log, 'FPR': fpr_log}
pr_results = {'PR': pr, 'F1': f1_log, 'ROC': roc}
triplet_dict.update({triplet_labels[iitriplet]: pr_results})
analysis_results.update({'PR_analysis': triplet_dict})
tf.logging.info('Precision Recall, F1 score and ROC curves computed')
# 4. Clustering analysis: How well clustered are responses for a stimulus?
# Get all trials for a few (1000) stimuli and compute
# distances between all pairs of points.
# See how many of responses generated by same stimulus are actually
# near to each other.
n_tests = 10
p_log = []
r_log = []
s_log = []
resp_log = []
dist_log = []
embedding_log = []
for itest in range(n_tests):
n_stims = 10 # previously 100
tf.logging.info('Number of random samples is : %d' % n_stims)
resp_fcn = data_wn.get_response_all_trials
resp_all_trials, stim_id = resp_fcn(n_stims, FLAGS.time_window,
random_seed=itest)
# TODO(bhaishahster) : Remove duplicates from resp_all_trials
distance_pairs = analyse.get_pairwise_distances(met, resp_all_trials)
k_log = [1, 2, 3, 4, 5, 10, 15, 20, 50, 75, 100, 200, 300, 400, 500]
precision_log = []
recall_log = []
for k in k_log:
precision, recall = analyse.topK_retrieval(distance_pairs, k, stim_id)
precision_log += [precision]
recall_log += [recall]
p_log += [precision_log]
r_log += [recall_log]
s_log += [stim_id]
resp_log += [resp_all_trials]
dist_log += [distance_pairs]
#tf.logging.info('Getting 2D t-SNE embedding')
#model = manifold.TSNE(n_components=2)
#tSNE_embedding = model.fit_transform(distance_pairs)
#embedding_log += [tSNE_embedding]
all_trials = {'distances': dist_log, 'K': k_log,
'precision': p_log,
'recall': r_log,
'probe_stim_idx': s_log, 'probes': resp_log,
'embedding': embedding_log}
analysis_results_clustering = {'all_trials': all_trials}
pickle_file_clustering = (os.path.join(model_savepath, model_filename)
+ '_' + FLAGS.data_test +
'_analysis_clustering.pkl')
pickle.dump(analysis_results_clustering, gfile.Open(pickle_file_clustering, 'w'))
tf.logging.info('Clustering analysis done.')
'''
# sample few/all repeats of stimuli which are continous.
repeats = data_wn.get_repeats()
n_samples_max = 10
samples = np.random.randint(0, repeats.shape[0],
np.minimum(n_samples_max, repeats.shape[0]))
n_start_times = 5
time_window = 15
resps_cont = np.zeros((n_start_times, n_samples_max,
time_window, repeats.shape[-1]))
from IPython import embed; embed()
for istart in range(n_start_times):
start_tm = np.random.randint(repeats.shape[1] - time_window)
resps_cont[istart, :, :, :] = repeats[samples, start_tm:
start_tm+time_window, :]
resps_cont_2d = np.reshape(resps_cont, [-1, resps_cont.shape[-1]])
resps_cont_3d = np.expand_dims(resps_cont_2d, 2)
distances_cont_resp = analyse.get_pairwise_distances(met, resps_cont_3d)
n_components = 2
model = manifold.TSNE(n_components=n_components)
ts = model.fit_transform(distances_cont_resp)
tts = np.reshape(ts, [n_start_times, n_samples_max,
time_window, n_components])
from IPython import embed; embed()
plt.figure()
for istart in [1]: # range(n_start_times):
for isample in range(n_samples_max):
pts = tts[istart, isample, :, :]
plt.plot(pts[:, 0], pts[:, 1])
plt.show()
'''
# 5. Store the parameters of the score function.
score_params = met.get_parameters()
analysis_results.update({'score_params': score_params})
tf.logging.info('Got interesting parameters of score')
# 6. Retreival analysis on training data.
# Retrieve the nearest responses in training data for a probe test response.
# Load training data.
# data_wn_train = du.DataUtilsMetric(os.path.join(FLAGS.data_path,
# FLAGS.data_train))
#
# out_data = data_wn_train.get_all_responses(FLAGS.time_window)
# train_all_resp, train_stim_time = out_data
#
# # Get a few test stimuli. Here we use all repreats of a few stimuli.
# n_stims = 100
# resp_all_trials, stim_id = data_wn.get_response_all_trials(n_stims,
# FLAGS.time_window)
# k = 1000
# retrieved, retrieved_stim = analyse.topK_retrieval_probes(train_all_resp,
# train_stim_time,
# resp_all_trials,
# k, met)
# retrieval_dict = {'probe': resp_all_trials, 'probe_stim_idx': stim_id,
# 'retrieved': retrieved,
# 'retrieved_stim_idx': retrieved_stim}
# analysis_results.update({'retrieval': retrieval_dict})
# tf.logging.info('Retrieved nearest points in training data'
# ' for some probes in test data')
# TODO(bhaishahster) : Decode stimulus using retrieved responses.
# 7. Learn encoding model.
# Learn mapping from stimulus to response.
# from IPython import embed; embed()
'''
data_wn_train = du.DataUtilsMetric(os.path.join(FLAGS.data_path,
'example_long_wn_2rep_'
'ON_OFF_with_stim.mat'))
data_wn_test = du.DataUtilsMetric(os.path.join(FLAGS.data_path,
'example_wn_30reps_ON_'
'OFF_with_stimulus.mat'))
stimulus_test = data_wn_test.get_stimulus()
response_test = data_wn_test.get_repeats()
stimulus = data_wn_train.get_stimulus()
response = data_wn_train.get_repeats()
ttf = data_wn_train.ttf[::-1]
encoding_fcn = encoding_model.learn_encoding_model_ln
# Initialize ttf, RF using ttf and scale ttf to match firing rate
RF_np, ttf_np, model = encoding_fcn(sess, met, stimulus, response, ttf_in=ttf,
lr=0.1)
firing_rate_pred = sess.run(model.firing_rate,
feed_dict={model.stimulus: stimulus_test})
initialize_all = {'RF': RF_np, 'ttf': ttf,
'firing_rate_test': firing_rate_pred}
# Initialize ttf and do no other initializations
RF_np_noinit, ttf_np_noinit, model = encoding_fcn(sess,met, stimulus, response,
ttf_in=ttf,
initialize_RF_using_ttf=False,
scale_ttf=False, lr=0.1)
firing_rate_pred = sess.run(model.firing_rate,
feed_dict={model.stimulus: stimulus_test})
initialize_only_ttf = {'RF': RF_np_noinit, 'ttf': ttf_np_noinit,
'firing_rate_test': firing_rate_pred}
# Initialize ttf and do no other initializations
RF_np_noinit2, ttf_np_noinit2, model = encoding_fcn(sess, met, stimulus, response,
ttf_in=None,
initialize_RF_using_ttf=False,
scale_ttf=False, lr=0.1)
firing_rate_pred = sess.run(model.firing_rate,
feed_dict={model.stimulus: stimulus_test})
initialize_none = {'RF': RF_np_noinit2, 'ttf': ttf_np_noinit2,
'firing_rate_test': firing_rate_pred}
encoding_models = {'Init_all': initialize_all,
'Init_ttf': initialize_only_ttf,
'Init_none': initialize_none,
'responses_test': response_test}
analysis_results.update({'Encoding_models': encoding_models})
'''
# 8. Is similarity in images implicitly learnt in the metric ?
# Reconstruction done in colab notebook
'''
class StimulusMetric(object):
"""Compute MSE between stimuli."""
def get_distance(self, in1, in2):
return np.sqrt(np.sum(np.sum((in1 - in2)**2, 2), 1))
# TODO(bhaishahster) : Filtering by time is remaining!
stimuli_met = StimulusMetric()
stim_distance, resp_distance, times, responses = analyse.compare_stimulus_score_similarity(data_wn, stimuli_met,
met)
compare_stim_mse_resp_met = {'stimulus_mse': stim_distance,
'response_metric': resp_distance,
'times': times,
'response_pairs': responses}
analysis_results.update({'perception': compare_stim_mse_resp_met})
'''
# 9. Retrieve nearest responses from ALL possible response patterns
# Retrieve the nearest responses in training data for a probe test response.
'''
import itertools
lst = list(map(list, itertools.product([0, 1], repeat=data_wn.n_cells)))
all_resp = np.array(lst)
all_resp = np.expand_dims(all_resp, 2)
# Get a few test stimuli. Here we use all repreats of a few stimuli.
n_stims = 100
probe_responses, stim_id = data_wn.get_response_all_trials(n_stims,
FLAGS.time_window)
distances_corpus = analyse.compute_all_distances(all_resp, probe_responses,
met)
retrieval_dict = {'probe': probe_responses, 'probe_stim_idx': stim_id,
'corpus': all_resp,
'distance_corpus': distances_corpus}
analysis_results.update({'retrieval_ALL_responses': retrieval_dict})
tf.logging.info('Distance of probe to ALL possible response patterns')
'''
# 10. Get embedding for all possible responses,
# only if there are less than 15 cells
if data_wn.n_cells < 15:
import itertools
lst = list(map(list, itertools.product([0, 1], repeat=data_wn.n_cells)))
all_resp = np.expand_dims(np.array(lst), 2) # use time_window of 1.
all_resp_embedding = met.get_embedding(all_resp)
analysis_results.update({'all_resp_embedding': all_resp_embedding})
# save analysis in a pickle file
# from IPython import embed; embed()
pickle_file = (os.path.join(model_savepath, model_filename) + '_' +
FLAGS.data_test +
'_analysis.pkl')
pickle.dump(analysis_results, gfile.Open(pickle_file, 'w'))
# pickle.dump(analysis_results, file_io.FileIO(pickle_file, 'w'))
tf.logging.info('File: ' + pickle_file)
tf.logging.info('Analysis results saved')
print('File: ' + pickle_file)
if __name__ == '__main__':
app.run(main)
|
|
# -*- coding: latin-1 -*-
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing tests for keyboard module"""
from __future__ import unicode_literals
from __future__ import print_function
import sys
import os
import unittest
import subprocess
import time
sys.path.append(".")
from pywinauto.application import Application
if sys.platform == 'win32':
from pywinauto.keyboard import send_keys, parse_keys, KeySequenceError
from pywinauto.keyboard import KeyAction, VirtualKeyAction, PauseAction
from pywinauto.sysinfo import is_x64_Python, is_x64_OS
else:
from pywinauto import mouse
from pywinauto.linux.keyboard import send_keys, KeySequenceError, KeyAction
from pywinauto.linux import clipboard
def mfc_samples():
mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
return mfc_samples_folder
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
def _test_app():
test_folder = os.path.join(os.path.dirname
(os.path.dirname
(os.path.dirname
(os.path.abspath(__file__)))),
r"apps/SendKeysTester")
return os.path.join(test_folder, r"send_keys_test_app")
class SendKeysTests(unittest.TestCase):
"""Unit tests for the Sendkeys module"""
def setUp(self):
"""Start the application set some data and ensure the application is in the state we want it."""
self.app = Application()
if sys.platform == 'win32':
self.app.start(_notepad_exe())
self.dlg = self.app.UntitledNotepad
self.ctrl = self.dlg.Edit
else:
self.app.start(_test_app())
time.sleep(0.1)
mouse.click(coords=(300, 300))
time.sleep(0.1)
def tearDown(self):
"""Close the application after tests"""
if sys.platform == 'win32':
try:
self.dlg.close(0.1)
except Exception: # TimeoutError:
pass
try:
if self.app.Notepad["Do&n't Save"].exists():
self.app.Notepad["Do&n't Save"].click()
self.app.Notepad["Do&n't Save"].wait_not('visible')
except Exception: # TimeoutError:
pass
finally:
if self.dlg.exists(timeout=0.1):
self.app.kill()
else:
# call Popen.kill() on Linux since Application.kill() is not implemented yet
self.app.kill()
def receive_text(self):
"""Receive data from text field"""
received = ' '
if sys.platform == 'win32':
received = self.ctrl.text_block()
else:
time.sleep(0.2)
send_keys('^a')
time.sleep(0.2)
send_keys('^c')
send_keys('{RIGHT}')
received = clipboard.get_data()
return received
def __run_NormalCharacters_with_options(self, **args):
"""Make sure that sending any character in range """
#unused var: missed = []
for i in range(32, 127):
# skip characters that must be escaped
if chr(i) in '~!@#$%^&*()_+{}|:"<>? ':
continue
send_keys(chr(i), pause = .001, **args)
received = self.receive_text()[-1]
self.assertEqual(i, ord(received))
# Space tests
def testNormalWithSpaces(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_spaces = True)
def testNormalWithoutSpaces(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_spaces = False)
def testSpaceWithSpaces(self):
"""Make sure that with spaces option works"""
send_keys(" \t \t ", pause = .001, with_spaces = True)
received = self.receive_text()
self.assertEqual(" ", received)
def testSpaceWithoutSpaces(self):
"""Make sure that with spaces option works"""
send_keys(" \t \t ", pause = .001, with_spaces = False)
received = self.receive_text()
self.assertEqual("", received)
# Tab tests
def testNormalWithTabs(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_tabs = True)
def testNormalWithoutTabs(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_tabs = False)
def testTabWithTabs(self):
"""Make sure that with spaces option works"""
send_keys("\t \t \t", pause = .1, with_tabs = True)
received = self.receive_text()
self.assertEqual("\t\t\t", received)
def testTabWithoutTabs(self):
"""Make sure that with spaces option works"""
send_keys("\t a\t b\t", pause = .1, with_tabs = False)
received = self.receive_text()
self.assertEqual("ab", received)
def testTab(self):
"""Make sure that with spaces option works"""
send_keys("{TAB} {TAB} ", pause = .3)
received = self.receive_text()
self.assertEqual("\t\t", received)
# Newline tests
def testNormalWithNewlines(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_newlines = True)
def testNormalWithoutNewlines(self):
"""Make sure that with_newlines option works"""
self.__run_NormalCharacters_with_options(with_newlines = False)
def testNewlinesWithNewlines(self):
"""Make sure that with_newlines option works"""
send_keys("\t \t \t a~\tb\nc", pause = .5, with_newlines = True)
received = self.receive_text()
if sys.platform == 'win32':
self.assertEqual("a\r\nb\r\nc", received)
else:
self.assertEqual("a\nb\nc", received)
def testNewlinesWithoutNewlines(self):
""""Make sure that with_newlines option works"""
send_keys("\t \t \t\na", pause = .01, with_newlines = False)
received = self.receive_text()
self.assertEqual("a", received)
#def testANSIExtendedCharacters(self):
# "Make sure that sending any character in range "
# #self.cmd = Application()
# #self.cmd.start("cmd.exe", create_new_console=True, wait_for_idle=False)
# ActionLogger().log('Preferred encoding: ' + locale.getpreferredencoding())
#
# #os.system("chcp 850")
# matched = 0
# extended_chars = b"\x81\x82\x83\xa1\xe1\xff"
# for char in extended_chars:
# if six.PY3:
# c = str(char)
# else:
# c = char.decode(locale.getpreferredencoding()) #'cp850')
# send_keys(c, pause = .01)
# received = self.receive_text()[-1]
# if c == received:
# matched += 1
# else:
# print("expected %s, recieved %s"% (
# repr(c), repr(received)))
# self.assertEqual(matched, len(extended_chars))
def testCharsThatMustBeEscaped(self):
"""Make sure that escaping characters works"""
send_keys("{%}{^}{+}{(}{)}{{}{}}{~}")
received = self.receive_text()
self.assertEqual("%^+(){}~", received)
def testIncorrectCases(self):
"""Make sure that incorrect key sequences raise an exception"""
self.assertRaises(KeySequenceError, send_keys, "{ENTER")
self.assertRaises(KeySequenceError, send_keys, "ENTER)")
self.assertRaises(RuntimeError, send_keys, "%{Enterius}")
self.assertRaises(KeySequenceError, send_keys, "{PAUSE small}")
try:
send_keys("{ENTER five}")
except KeySequenceError as exc:
self.assertEqual("invalid repetition count five", str(exc))
try:
send_keys("ENTER}")
except KeySequenceError as exc:
self.assertEqual("`}` should be preceeded by `{`", str(exc))
def testKeyDescription(self):
"""Test KeyAction._"""
self.assertEqual("<X>", str(KeyAction("X")))
self.assertEqual("<Y down>", str(KeyAction("Y", up=False)))
self.assertEqual("<Y up>", str(KeyAction("Y", down=False)))
#self.assertEqual("<ENTER>", str(VirtualKeyAction(13))) # == "<VK_RETURN>" in Python 2.7 (TODO)
if sys.platform == 'win32':
self.assertEqual("<PAUSE 1.00>", str(PauseAction(1.0)))
def testRepetition(self):
"""Make sure that repeated action works"""
send_keys("{TAB 3}{PAUSE 0.5}{F 3}", pause = .3)
received = self.receive_text()
self.assertEqual("\t\t\tFFF", received)
def testShiftModifier(self):
"""Make sure that Shift modifier works"""
send_keys("+(a)")
received = self.receive_text()
self.assertEqual("A", received)
if sys.platform != 'win32':
def testAltModifier(self):
"""Make sure that alt modifier works"""
clipboard.set_data('abc')
# check alt via opening edit menu and paste text from clipboard
time.sleep(0.3)
send_keys('%(e)')
time.sleep(0.3)
send_keys('{ENTER}')
received = self.receive_text()
self.assertEqual('abc', received)
if sys.platform == 'win32':
class SendKeysModifiersTests(unittest.TestCase):
"""Unit tests for the Sendkeys module (modifiers)"""
def setUp(self):
"""Start the application and ensure it's in the state we want"""
self.app = Application().start(os.path.join(mfc_samples(), u"CtrlTest.exe"))
self.dlg = self.app.Control_Test_App
def tearDown(self):
"""Close the application after tests"""
try:
self.dlg.close(0.5)
except Exception:
pass
finally:
self.app.kill()
def testModifiersForFewChars(self):
"""Make sure that repeated action works"""
send_keys("%(SC)", pause = .3)
dlg = self.app.window(name='Using C++ Derived Class')
dlg.wait('ready')
dlg.Done.close_click()
dlg.wait_not('visible')
send_keys("%(H{LEFT}{UP}{ENTER})", pause = .3)
dlg = self.app.window(name='Sample Dialog with spin controls')
dlg.wait('ready')
dlg.Done.close_click()
dlg.wait_not('visible')
if sys.platform == 'win32':
class VkPacketTests(unittest.TestCase):
def testBasic(self):
keys = parse_keys('AAA', vk_packet=False)
self.assertEqual(3, len(keys))
for key in keys:
self.assertTrue(isinstance(key, VirtualKeyAction))
wVk, wScan, dwFlags = key._get_key_info()
self.assertEqual(ord('A'), wVk)
self.assertEqual(0, dwFlags)
def testRepeat(self):
keys = parse_keys('{A 3}', vk_packet=False)
self.assertEqual(3, len(keys))
for key in keys:
self.assertTrue(isinstance(key, VirtualKeyAction))
wVk, wScan, dwFlags = key._get_key_info()
self.assertEqual(ord('A'), wVk)
self.assertEqual(0, dwFlags)
def testSymbol(self):
key, = parse_keys('{=}', vk_packet=False)
self.assertTrue(isinstance(key, VirtualKeyAction))
wVk, wScan, dwFlags = key._get_key_info()
self.assertEqual(0xbb, wVk)
self.assertEqual(0, dwFlags)
def testNoVk(self):
key, = parse_keys('!', vk_packet=False)
self.assertTrue(isinstance(key, KeyAction))
wVk, wScan, dwFlags = key._get_key_info()
self.assertEqual(0, wVk)
#====================================================================
if __name__ == "__main__":
unittest.main()
#import doctest
#doctest.testmod()
|
|
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
from IECore import *
class TestPointsPrimitive( unittest.TestCase ) :
def testPrimitiveVariable( self ) :
v = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 1 ) )
self.assertEqual( v.interpolation, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( v.data, FloatData( 1 ) )
v.interpolation = PrimitiveVariable.Interpolation.Vertex
self.assertEqual( v.interpolation, PrimitiveVariable.Interpolation.Vertex )
v.data = IntVectorData( [ 1, 2, 3, 4 ] )
self.assertEqual( v.data, IntVectorData( [ 1, 2, 3, 4 ] ) )
def testPrimitive( self ) :
"""This test mainly tests the Primitive aspects of the PointPrimitive"""
p = PointsPrimitive( 10 )
self.assertEqual( p.numPoints, 10 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Constant ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Varying ), 10 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Vertex ), 10 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.FaceVarying ), 10 )
self.assertEqual( p, p )
self.assertEqual( p, p.copy() )
# try adding a primvar
self.assertEqual( len( p ), 0 )
self.assert_( not "P" in p )
p["P"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData() )
self.assertEqual( p, p )
self.assertEqual( p, p.copy() )
self.assertEqual( len( p ), 1 )
self.assert_( "P" in p )
self.assertEqual( p["P"].data, V3fVectorData() )
# and removing it
self.assertEqual( p["P"].interpolation, PrimitiveVariable.Interpolation.Vertex )
del p["P"]
self.assertEqual( len( p ), 0 )
self.assert_( not "P" in p )
# and adding it and another
p["P"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData() )
self.assert_( not "N" in p )
p["N"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, V3fVectorData() )
self.assert_( "N" in p )
self.assertEqual( len( p ), 2 )
self.assertEqual( p["N"].data, V3fVectorData() )
self.assertEqual( p["N"].interpolation, PrimitiveVariable.Interpolation.Vertex )
# and overwriting one with the other
p["N"] = p["P"]
self.assert_( p["N"].data.isSame( p["P"].data ) )
def testConstructors( self ) :
p = PointsPrimitive( 20 )
self.assertEqual( p.numPoints, 20 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Constant ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Varying ), 20 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Vertex ), 20 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.FaceVarying ), 20 )
self.assertEqual( len( p ), 0 )
p = PointsPrimitive( V3fVectorData( [ V3f( 1 ) ] ) )
self.assertEqual( p.numPoints, 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Constant ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Varying ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Vertex ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.FaceVarying ), 1 )
self.assertEqual( len( p ), 1 )
self.assert_( "P" in p )
self.assertEqual( p["P"].data, V3fVectorData( [ V3f( 1 ) ], GeometricData.Interpretation.Point ) )
self.assertEqual( p["P"].interpolation, PrimitiveVariable.Interpolation.Vertex )
p = PointsPrimitive( V3fVectorData( [ V3f( 1 ) ] ), FloatVectorData( [ 1 ] ) )
self.assertEqual( p.numPoints, 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Constant ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Varying ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.Vertex ), 1 )
self.assertEqual( p.variableSize( PrimitiveVariable.Interpolation.FaceVarying ), 1 )
self.assertEqual( len( p ), 2 )
self.assert_( "P" in p )
self.assert_( "r" in p )
self.assertEqual( p["P"].data, V3fVectorData( [ V3f( 1 ) ], GeometricData.Interpretation.Point ) )
self.assertEqual( p["P"].interpolation, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p["r"].data, FloatVectorData( [ 1 ] ) )
self.assertEqual( p["r"].interpolation, PrimitiveVariable.Interpolation.Vertex )
def testNumPointsAccess( self ) :
p = PointsPrimitive( 20 )
self.assertEqual( p.numPoints, 20 )
p.numPoints = 40
self.assertEqual( p.numPoints, 40 )
def testHash( self ) :
p = PointsPrimitive( 1 )
p2 = PointsPrimitive( 2 )
self.assertNotEqual( p.hash(), p2.hash() )
self.assertNotEqual( p.topologyHash(), p2.topologyHash() )
p3 = p2.copy()
self.assertEqual( p3.hash(), p2.hash() )
self.assertEqual( p3.topologyHash(), p2.topologyHash() )
p3["primVar"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, IntData( 10 ) )
self.assertNotEqual( p3.hash(), p2.hash() )
self.assertEqual( p3.topologyHash(), p2.topologyHash() )
def testBound( self ) :
p = PointsPrimitive( 2 )
self.assertEqual( p.bound(), Box3f() )
p["P"] = PrimitiveVariable(
PrimitiveVariable.Interpolation.Vertex,
V3fVectorData( [ V3f( 1, 2, 3 ), V3f( 12, 13, 14 ) ] )
)
# when no width is specified, it defaults to 1
self.assertEqual(
p.bound(),
Box3f(
V3f( 1, 2, 3 ) - V3f( 1 ) / 2.0,
V3f( 12, 13, 14 ) + V3f( 1 ) / 2.0
)
)
# constantwidth overrides the default
p["constantwidth"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, 2.0 )
self.assertEqual(
p.bound(),
Box3f(
V3f( 1, 2, 3 ) - V3f( 2 ) / 2.0,
V3f( 12, 13, 14 ) + V3f( 2 ) / 2.0
)
)
# vertex width works too, and multiplies with constantwidth
p["width"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, FloatVectorData( [ 2, 4 ] ) )
self.assertEqual(
p.bound(),
Box3f(
V3f( 1, 2, 3 ) - V3f( 2 * 2 ) / 2.0,
V3f( 12, 13, 14 ) + V3f( 2 * 4 ) / 2.0
)
)
# aspect ratio should have no effect whatsoever if type is not "patch"
p["patchaspectratio"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, 2.0 )
del p["width"]
del p["constantwidth"]
self.assertEqual(
p.bound(),
Box3f(
V3f( 1, 2, 3 ) - V3f( 1 ) / 2.0,
V3f( 12, 13, 14 ) + V3f( 1 ) / 2.0
)
)
# but it should take effect when type is "patch"
p["type"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, "patch" )
diagonal = math.sqrt( 1 ** 2 + 0.5 ** 2 )
self.assertEqual(
p.bound(),
Box3f(
V3f( 1, 2, 3 ) - V3f( diagonal ) / 2.0,
V3f( 12, 13, 14 ) + V3f( diagonal ) / 2.0
)
)
# and "constantwidth" should still be taken into account for patches
p["constantwidth"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, 2.0 )
self.assertEqual(
p.bound(),
Box3f(
V3f( 1, 2, 3 ) - V3f( 2 * diagonal ) / 2.0,
V3f( 12, 13, 14 ) + V3f( 2 * diagonal ) / 2.0
)
)
# as should "width"
p["width"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, FloatVectorData( [ 2, 4 ] ) )
self.assertEqual(
p.bound(),
Box3f(
V3f( 1, 2, 3 ) - V3f( 2 * 2 * diagonal ) / 2.0,
V3f( 12, 13, 14 ) + V3f( 2 * 4 * diagonal ) / 2.0
)
)
if __name__ == "__main__":
unittest.main()
|
|
## @file
# This file is used to define class objects of INF file [Guids] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
InfGuidObject
'''
from Library.ParserValidate import IsValidCVariableName
from Library.CommentParsing import ParseComment
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Library.Misc import Sdict
from Library import DataType as DT
import Logger.Log as Logger
from Logger import ToolError
from Logger import StringTable as ST
class InfGuidItemCommentContent():
def __init__(self):
#
# ## SOMETIMES_CONSUMES ## Variable:L"MemoryTypeInformation"
# TailString.
#
#
# SOMETIMES_CONSUMES
#
self.UsageItem = ''
#
# Variable
#
self.GuidTypeItem = ''
#
# MemoryTypeInformation
#
self.VariableNameItem = ''
#
# TailString
#
self.HelpStringItem = ''
def SetUsageItem(self, UsageItem):
self.UsageItem = UsageItem
def GetUsageItem(self):
return self.UsageItem
def SetGuidTypeItem(self, GuidTypeItem):
self.GuidTypeItem = GuidTypeItem
def GetGuidTypeItem(self):
return self.GuidTypeItem
def SetVariableNameItem(self, VariableNameItem):
self.VariableNameItem = VariableNameItem
def GetVariableNameItem(self):
return self.VariableNameItem
def SetHelpStringItem(self, HelpStringItem):
self.HelpStringItem = HelpStringItem
def GetHelpStringItem(self):
return self.HelpStringItem
class InfGuidItem():
def __init__(self):
self.Name = ''
self.FeatureFlagExp = ''
#
# A list contain instance of InfGuidItemCommentContent
#
self.CommentList = []
self.SupArchList = []
def SetName(self, Name):
self.Name = Name
def GetName(self):
return self.Name
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetCommentList(self, CommentList):
self.CommentList = CommentList
def GetCommentList(self):
return self.CommentList
def SetSupArchList(self, SupArchList):
self.SupArchList = SupArchList
def GetSupArchList(self):
return self.SupArchList
## ParseComment
#
# ParseComment
#
def ParseGuidComment(CommentsList, InfGuidItemObj):
#
# Get/Set Usage and HelpString
#
if CommentsList != None and len(CommentsList) != 0 :
CommentInsList = []
PreUsage = None
PreGuidType = None
PreHelpText = ''
BlockFlag = -1
Count = 0
for CommentItem in CommentsList:
Count = Count + 1
CommentItemUsage, \
CommentItemGuidType, \
CommentItemVarString, \
CommentItemHelpText = \
ParseComment(CommentItem,
DT.ALL_USAGE_TOKENS,
DT.GUID_TYPE_TOKENS,
[],
True)
if CommentItemHelpText == None:
CommentItemHelpText = ''
if Count == len(CommentsList) and CommentItemUsage == CommentItemGuidType == DT.ITEM_UNDEFINED:
CommentItemHelpText = DT.END_OF_LINE
if Count == len(CommentsList):
if BlockFlag == 1 or BlockFlag == 2:
if CommentItemUsage == CommentItemGuidType == DT.ITEM_UNDEFINED:
BlockFlag = 4
else:
BlockFlag = 3
if BlockFlag == -1:
BlockFlag = 4
if BlockFlag == -1 or BlockFlag == 1 or BlockFlag == 2:
if CommentItemUsage == CommentItemGuidType == DT.ITEM_UNDEFINED:
if BlockFlag == -1:
BlockFlag = 1
elif BlockFlag == 1:
BlockFlag = 2
else:
if BlockFlag == 1 or BlockFlag == 2:
BlockFlag = 3
elif BlockFlag == -1:
BlockFlag = 4
#
# Combine two comment line if they are generic comment
#
if CommentItemUsage == CommentItemGuidType == PreUsage == PreGuidType == DT.ITEM_UNDEFINED:
CommentItemHelpText = PreHelpText + DT.END_OF_LINE + CommentItemHelpText
PreHelpText = CommentItemHelpText
if BlockFlag == 4:
CommentItemIns = InfGuidItemCommentContent()
CommentItemIns.SetUsageItem(CommentItemUsage)
CommentItemIns.SetGuidTypeItem(CommentItemGuidType)
CommentItemIns.SetVariableNameItem(CommentItemVarString)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreGuidType = None
PreHelpText = ''
elif BlockFlag == 3:
#
# Add previous help string
#
CommentItemIns = InfGuidItemCommentContent()
CommentItemIns.SetUsageItem(DT.ITEM_UNDEFINED)
CommentItemIns.SetGuidTypeItem(DT.ITEM_UNDEFINED)
if PreHelpText == '' or PreHelpText.endswith(DT.END_OF_LINE):
PreHelpText += DT.END_OF_LINE
CommentItemIns.SetHelpStringItem(PreHelpText)
CommentInsList.append(CommentItemIns)
#
# Add Current help string
#
CommentItemIns = InfGuidItemCommentContent()
CommentItemIns.SetUsageItem(CommentItemUsage)
CommentItemIns.SetGuidTypeItem(CommentItemGuidType)
CommentItemIns.SetVariableNameItem(CommentItemVarString)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreGuidType = None
PreHelpText = ''
else:
PreUsage = CommentItemUsage
PreGuidType = CommentItemGuidType
PreHelpText = CommentItemHelpText
InfGuidItemObj.SetCommentList(CommentInsList)
else:
#
# Still need to set the USAGE/GUIDTYPE to undefined.
#
CommentItemIns = InfGuidItemCommentContent()
CommentItemIns.SetUsageItem(DT.ITEM_UNDEFINED)
CommentItemIns.SetGuidTypeItem(DT.ITEM_UNDEFINED)
InfGuidItemObj.SetCommentList([CommentItemIns])
return InfGuidItemObj
## InfGuidObject
#
# InfGuidObject
#
class InfGuidObject():
def __init__(self):
self.Guids = Sdict()
#
# Macro defined in this section should be only used in this section.
#
self.Macros = {}
def SetGuid(self, GuidList, Arch = None):
__SupportArchList = []
for ArchItem in Arch:
#
# Validate Arch
#
if (ArchItem == '' or ArchItem == None):
ArchItem = 'COMMON'
__SupportArchList.append(ArchItem)
for Item in GuidList:
#
# Get Comment content of this protocol
#
CommentsList = None
if len(Item) == 3:
CommentsList = Item[1]
CurrentLineOfItem = Item[2]
Item = Item[0]
InfGuidItemObj = InfGuidItem()
if len(Item) >= 1 and len(Item) <= 2:
#
# Only GuildName contained
#
if not IsValidCVariableName(Item[0]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_INVALID_CNAME%(Item[0]),
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
if (Item[0] != ''):
InfGuidItemObj.SetName(Item[0])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_CNAME_MISSING,
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
if len(Item) == 2:
#
# Contained CName and Feature Flag Express
# <statements> ::= <CName> ["|" <FeatureFlagExpress>]
# For GUID entry.
#
if Item[1].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
#
# Validate Feature Flag Express
#
FeatureFlagRtv = IsValidFeatureFlagExp(Item[1].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID%(FeatureFlagRtv[1]),
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
InfGuidItemObj.SetFeatureFlagExp(Item[1])
if len(Item) != 1 and len(Item) != 2:
#
# Invalid format of GUID statement
#
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_GUID_PPI_PROTOCOL_SECTION_CONTENT_ERROR,
File=CurrentLineOfItem[2],
Line=CurrentLineOfItem[1],
ExtraData=CurrentLineOfItem[0])
InfGuidItemObj = ParseGuidComment(CommentsList, InfGuidItemObj)
InfGuidItemObj.SetSupArchList(__SupportArchList)
#
# Determine GUID name duplicate. Follow below rule:
#
# A GUID must not be duplicated within a [Guids] section.
# A GUID may appear in multiple architectural [Guids]
# sections. A GUID listed in an architectural [Guids]
# section must not be listed in the common architectural
# [Guids] section.
#
# NOTE: This check will not report error now.
#
for Item in self.Guids:
if Item.GetName() == InfGuidItemObj.GetName():
ItemSupArchList = Item.GetSupArchList()
for ItemArch in ItemSupArchList:
for GuidItemObjArch in __SupportArchList:
if ItemArch == GuidItemObjArch:
#
# ST.ERR_INF_PARSER_ITEM_DUPLICATE
#
pass
if ItemArch.upper() == 'COMMON' or GuidItemObjArch.upper() == 'COMMON':
#
# ST.ERR_INF_PARSER_ITEM_DUPLICATE_COMMON
#
pass
if self.Guids.has_key((InfGuidItemObj)):
GuidList = self.Guids[InfGuidItemObj]
GuidList.append(InfGuidItemObj)
self.Guids[InfGuidItemObj] = GuidList
else:
GuidList = []
GuidList.append(InfGuidItemObj)
self.Guids[InfGuidItemObj] = GuidList
return True
def GetGuid(self):
return self.Guids
|
|
# coding=utf-8
import re
_RE_FIND_FIRST_CAP = re.compile('(.)([A-Z][a-z]+)')
_RE_SPAN_OF_CAPS = re.compile('([a-z0-9])([A-Z])')
def camelcase_to_underscore(name):
return _RE_SPAN_OF_CAPS.sub(r'\1_\2',
_RE_FIND_FIRST_CAP.sub(r'\1_\2', name)
).lower()
class binary:
"""
Store the value in bits so we can convert between things easily
"""
value = None
def __init__(self, value=None, unit=None):
self.do(value=value, unit=unit)
@staticmethod
def convert(value=None, oldUnit=None, newUnit=None):
convertor = binary(value=value, unit=oldUnit)
return convertor.get(unit=newUnit)
def set(self, value, unit=None):
return self.do(value=value, unit=unit)
def get(self, unit=None):
return self.do(unit=unit)
def do(self, value=None, unit=None):
if not unit:
return self.bit(value=value)
if unit in ['bit', 'b']:
return self.bit(value=value)
if unit in ['kilobit', 'kbit', 'Kibit']:
return self.kilobit(value=value)
if unit in ['megabit', 'Mbit', 'Mibit', 'Mbit']:
return self.megabit(value=value)
if unit in ['gigabit', 'Gbit', 'Gibit']:
return self.gigabit(value=value)
if unit in ['terabit', 'Tbit', 'Tibit']:
return self.terabit(value=value)
if unit in ['petabit', 'Pbit', 'Pibit']:
return self.petabit(value=value)
if unit in ['exabit', 'Ebit', 'Eibit']:
return self.exabit(value=value)
if unit in ['zettabit', 'Zbit', 'Zibit']:
return self.zettabit(value=value)
if unit in ['yottabit', 'Ybit', 'Yibit']:
return self.yottabit(value=value)
if unit in ['byte', 'B']:
return self.byte(value=value)
if unit in ['kilobyte', 'kB', 'KiB']:
return self.kilobyte(value=value)
if unit in ['megabyte', 'MB', 'MiB', 'Mbyte']:
return self.megabyte(value=value)
if unit in ['gigabyte', 'GB', 'GiB']:
return self.gigabyte(value=value)
if unit in ['terabyte', 'TB', 'TiB']:
return self.terabyte(value=value)
if unit in ['petabyte', 'PB', 'PiB']:
return self.petabyte(value=value)
if unit in ['exabyte', 'EB', 'EiB']:
return self.exabyte(value=value)
if unit in ['zettabyte', 'ZB', 'ZiB']:
return self.zettabyte(value=value)
if unit in ['yottabyte', 'YB', 'YiB']:
return self.yottabyte(value=value)
raise NotImplementedError("unit %s" % unit)
def bit(self, value=None):
if value is None:
return self.value
else:
self.value = float(value)
def kilobit(self, value=None):
if value is None:
return self.bit() / 1024
else:
self.bit(value * 1024)
def megabit(self, value=None):
if value is None:
return self.kilobit() / 1024
else:
self.kilobit(value * 1024)
def gigabit(self, value=None):
if value is None:
return self.megabit() / 1024
else:
self.megabit(value * 1024)
def terabit(self, value=None):
if value is None:
return self.gigabit() / 1024
else:
self.gigabit(value * 1024)
def petabit(self, value=None):
if value is None:
return self.terabit() / 1024
else:
self.terabit(value * 1024)
def exabit(self, value=None):
if value is None:
return self.petabit() / 1024
else:
self.petabit(value * 1024)
def zettabit(self, value=None):
if value is None:
return self.exabit() / 1024
else:
self.exabit(value * 1024)
def yottabit(self, value=None):
if value is None:
return self.zettabit() / 1024
else:
self.zettabit(value * 1024)
def byte(self, value=None):
if value is None:
return self.value / 8
else:
self.value = float(value) * 8
def kilobyte(self, value=None):
if value is None:
return self.byte() / 1024
else:
self.byte(value * 1024)
def megabyte(self, value=None):
if value is None:
return self.kilobyte() / 1024
else:
self.kilobyte(value * 1024)
def gigabyte(self, value=None):
if value is None:
return self.megabyte() / 1024
else:
self.megabyte(value * 1024)
def terabyte(self, value=None):
if value is None:
return self.gigabyte() / 1024
else:
self.gigabyte(value * 1024)
def petabyte(self, value=None):
if value is None:
return self.terabyte() / 1024
else:
self.terabyte(value * 1024)
def exabyte(self, value=None):
if value is None:
return self.petabyte() / 1024
else:
self.petabyte(value * 1024)
def zettabyte(self, value=None):
if value is None:
return self.exabyte() / 1024
else:
self.exabyte(value * 1024)
def yottabyte(self, value=None):
if value is None:
return self.zettabyte() / 1024
else:
self.zettabyte(value * 1024)
class time:
"""
Store the value in miliseconds so we can convert between things easily
"""
value = None
def __init__(self, value=None, unit=None):
self.do(value=value, unit=unit)
@staticmethod
def convert(value=None, oldUnit=None, newUnit=None):
convertor = time(value=value, unit=oldUnit)
return convertor.get(unit=newUnit)
def set(self, value, unit=None):
return self.do(value=value, unit=unit)
def get(self, unit=None):
return self.do(unit=unit)
def do(self, value=None, unit=None):
if not unit:
return self.millisecond(value=value)
else:
unit = unit.lower()
if unit in ['millisecond', 'milliseconds', 'ms']:
return self.millisecond(value=value)
if unit in ['second', 'seconds', 's']:
return self.second(value=value)
raise NotImplementedError("unit %s" % unit)
def millisecond(self, value=None):
if value is None:
return self.value
else:
self.value = float(value)
def second(self, value=None):
if value is None:
return self.millisecond() / 1000
else:
self.millisecond(value * 1000)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate a sitemap for osf.io"""
import boto3
import datetime
import gzip
import os
import shutil
import urlparse
import xml
import django
django.setup()
import logging
import tempfile
from framework import sentry
from framework.celery_tasks import app as celery_app
from osf.models import OSFUser, AbstractNode, PreprintService, PreprintProvider
from scripts import utils as script_utils
from website import settings
from website.app import init_app
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Sitemap(object):
def __init__(self):
self.sitemap_count = 0
self.errors = 0
self.new_doc()
if not settings.SITEMAP_TO_S3:
self.sitemap_dir = os.path.join(settings.STATIC_FOLDER, 'sitemaps')
if not os.path.exists(self.sitemap_dir):
print('Creating sitemap directory at `{}`'.format(self.sitemap_dir))
os.makedirs(self.sitemap_dir)
else:
self.sitemap_dir = tempfile.mkdtemp()
assert settings.SITEMAP_AWS_BUCKET, 'SITEMAP_AWS_BUCKET must be set for sitemap files to be sent to S3'
assert settings.AWS_ACCESS_KEY_ID, 'AWS_ACCESS_KEY_ID must be set for sitemap files to be sent to S3'
assert settings.AWS_SECRET_ACCESS_KEY, 'AWS_SECRET_ACCESS_KEY must be set for sitemap files to be sent to S3'
self.s3 = boto3.resource(
's3',
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name='us-east-1'
)
def cleanup(self):
if settings.SITEMAP_TO_S3:
shutil.rmtree(self.sitemap_dir)
def new_doc(self):
"""Creates new sitemap document and resets the url_count."""
self.doc = xml.dom.minidom.Document()
self.urlset = self.doc.createElement('urlset')
self.urlset.setAttribute('xmlns', 'http://www.sitemaps.org/schemas/sitemap/0.9')
self.doc.appendChild(self.urlset)
self.url_count = 0
def add_tag(self, name, text):
"""Adds a tag to the current url"""
tag = self.doc.createElement(name)
self.url.appendChild(tag)
tag_text = self.doc.createTextNode(text)
tag.appendChild(tag_text)
def add_url(self, config):
"""Adds a url to the current urlset"""
if self.url_count >= settings.SITEMAP_URL_MAX:
self.write_doc()
self.new_doc()
self.url = self.doc.createElement('url')
self.urlset.appendChild(self.url)
for k, v in config.iteritems():
self.add_tag(k, v)
self.url_count += 1
def write_doc(self):
"""Writes and gzips each sitemap xml file"""
file_name = 'sitemap_{}.xml'.format(str(self.sitemap_count))
file_path = os.path.join(self.sitemap_dir, file_name)
zip_file_name = file_name + '.gz'
zip_file_path = file_path + '.gz'
print('Writing and gzipping `{}`: url_count = {}'.format(file_path, str(self.url_count)))
xml_str = self.doc.toprettyxml(indent=" ", encoding='utf-8')
with open(file_path, 'wb') as f:
f.write(xml_str)
# Write zipped file
with open(file_path, 'rb') as f_in, gzip.open(zip_file_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if settings.SITEMAP_TO_S3:
self.ship_to_s3(file_name, file_path)
self.ship_to_s3(zip_file_name, zip_file_path)
self.sitemap_count += 1
def ship_to_s3(self, name, path):
data = open(path, 'rb')
try:
self.s3.Bucket(settings.SITEMAP_AWS_BUCKET).put_object(Key='sitemaps/{}'.format(name), Body=data)
except Exception as e:
logger.info('Error sending data to s3 via boto3')
logger.exception(e)
sentry.log_message('ERROR: Sitemaps could not be uploaded to s3, see `generate_sitemap` logs')
data.close()
def write_sitemap_index(self):
"""Writes the index file for all of the sitemap files"""
doc = xml.dom.minidom.Document()
sitemap_index = self.doc.createElement('sitemapindex')
sitemap_index.setAttribute('xmlns', 'http://www.sitemaps.org/schemas/sitemap/0.9')
doc.appendChild(sitemap_index)
for f in range(self.sitemap_count):
sitemap = doc.createElement('sitemap')
sitemap_index.appendChild(sitemap)
loc = doc.createElement('loc')
sitemap.appendChild(loc)
loc_text = self.doc.createTextNode(urlparse.urljoin(settings.DOMAIN, 'sitemaps/sitemap_{}.xml'.format(str(f))))
loc.appendChild(loc_text)
datemod = doc.createElement('lastmod')
sitemap.appendChild(datemod)
datemod_text = self.doc.createTextNode(datetime.datetime.now().strftime('%Y-%m-%d'))
datemod.appendChild(datemod_text)
print('Writing `sitemap_index.xml`')
file_name = 'sitemap_index.xml'
file_path = os.path.join(self.sitemap_dir, file_name)
xml_str = doc.toprettyxml(indent=" ", encoding='utf-8')
with open(file_path, 'wb') as f:
f.write(xml_str)
if settings.SITEMAP_TO_S3:
self.ship_to_s3(file_name, file_path)
def log_errors(self, obj, obj_id, error):
if not self.errors:
script_utils.add_file_logger(logger, __file__)
self.errors += 1
logger.info('Error on {}, {}:'.format(obj, obj_id))
logger.exception(error)
if self.errors <= 10:
sentry.log_message('Sitemap Error: {}'.format(error))
if self.errors == 1000:
sentry.log_message('ERROR: generate_sitemap stopped execution after reaching 1000 errors. See logs for details.')
raise Exception('Too many errors generating sitemap.')
def generate(self):
print('Generating Sitemap')
# Progress bar
progress = script_utils.Progress(precision=0)
# Static urls
progress.start(len(settings.SITEMAP_STATIC_URLS), 'STAT: ')
for config in settings.SITEMAP_STATIC_URLS:
config['loc'] = urlparse.urljoin(settings.DOMAIN, config['loc'])
self.add_url(config)
progress.increment()
progress.stop()
# User urls
objs = OSFUser.objects.filter(is_active=True).exclude(date_confirmed__isnull=True).values_list('guids___id', flat=True)
progress.start(objs.count(), 'USER: ')
for obj in objs:
try:
config = settings.SITEMAP_USER_CONFIG
config['loc'] = urlparse.urljoin(settings.DOMAIN, '/{}/'.format(obj))
self.add_url(config)
except Exception as e:
self.log_errors('USER', obj, e)
progress.increment()
progress.stop()
# AbstractNode urls (Nodes and Registrations, no Collections)
objs = (AbstractNode.objects
.filter(is_public=True, is_deleted=False, retraction_id__isnull=True)
.exclude(type__in=["osf.collection", "osf.quickfilesnode"])
.values('guids___id', 'modified'))
progress.start(objs.count(), 'NODE: ')
for obj in objs:
try:
config = settings.SITEMAP_NODE_CONFIG
config['loc'] = urlparse.urljoin(settings.DOMAIN, '/{}/'.format(obj['guids___id']))
config['lastmod'] = obj['modified'].strftime('%Y-%m-%d')
self.add_url(config)
except Exception as e:
self.log_errors('NODE', obj['guids___id'], e)
progress.increment()
progress.stop()
# Preprint urls
objs = (PreprintService.objects
.filter(node__isnull=False, node__is_deleted=False, node__is_public=True, is_published=True)
.select_related('node', 'provider', 'node__preprint_file'))
progress.start(objs.count() * 2, 'PREP: ')
osf = PreprintProvider.objects.get(_id='osf')
for obj in objs:
try:
preprint_date = obj.modified.strftime('%Y-%m-%d')
config = settings.SITEMAP_PREPRINT_CONFIG
preprint_url = obj.url
provider = obj.provider
domain = provider.domain if (provider.domain_redirect_enabled and provider.domain) else settings.DOMAIN
if provider == osf:
preprint_url = '/preprints/{}/'.format(obj._id)
config['loc'] = urlparse.urljoin(domain, preprint_url)
config['lastmod'] = preprint_date
self.add_url(config)
# Preprint file urls
try:
file_config = settings.SITEMAP_PREPRINT_FILE_CONFIG
file_config['loc'] = urlparse.urljoin(
obj.provider.domain or settings.DOMAIN,
os.path.join(
obj._id,
'download',
'?format=pdf'
)
)
file_config['lastmod'] = preprint_date
self.add_url(file_config)
except Exception as e:
self.log_errors(obj.primary_file, obj.primary_file._id, e)
except Exception as e:
self.log_errors(obj, obj._id, e)
progress.increment(2)
progress.stop()
# Final write
self.write_doc()
# Create index file
self.write_sitemap_index()
# TODO: once the sitemap is validated add a ping to google with sitemap index file location
# TODO: server side cursor query wrapper might be useful as the index gets larger.
# Sitemap indexable limit check
if self.sitemap_count > settings.SITEMAP_INDEX_MAX * .90: # 10% of urls remaining
sentry.log_message('WARNING: Max sitemaps nearly reached.')
print('Total url_count = {}'.format((self.sitemap_count - 1) * settings.SITEMAP_URL_MAX + self.url_count))
print('Total sitemap_count = {}'.format(str(self.sitemap_count)))
if self.errors:
sentry.log_message('WARNING: Generate sitemap encountered errors. See logs for details.')
print('Total errors = {}'.format(str(self.errors)))
else:
print('No errors')
@celery_app.task(name='scripts.generate_sitemap')
def main():
init_app(routes=False) # Sets the storage backends on all models
sitemap = Sitemap()
sitemap.generate()
sitemap.cleanup()
if __name__ == '__main__':
init_app(set_backends=True, routes=False)
main()
|
|
# Copyright (c) 2006-2007 Open Source Applications Foundation
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
# Copyright (c) 2009 Domen Kozar <domen@dev.si>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Note: This file is very trim now because we've broken wsgi_fileserver, wsgi_proxy, wsgi_jsonrpc, and wsgi_xmlrpc
## in to their own libraries which are now distributed on their own and treated as dependencies
import httplib
import copy
import socket
import random
import os
import logging
import threading
import sys
from time import sleep
if not sys.version.startswith('2.4'):
from urlparse import urlparse
else:
# python 2.4
from windmill.tools.urlparse_25 import urlparse
logger = logging.getLogger(__name__)
import windmill
from windmill.server import proxy
from windmill.dep import wsgi_jsonrpc
from windmill.dep import wsgi_xmlrpc
from windmill.dep import wsgi_fileserver
import jsmin
START_DST_PORT = 32000
CURRENT_DST_PORT = [random.randint(32000, 34000)]
def reconstruct_url(environ):
# From WSGI spec, PEP 333
from urllib import quote
url = environ['wsgi.url_scheme']+'://'
if environ.get('HTTP_HOST'): url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += environ.get('SCRIPT_NAME','')
url += environ.get('PATH_INFO','')
# Fix ;arg=value in url
if url.find('%3B') is not -1:
url, arg = url.split('%3B', 1)
url = ';'.join([url, arg.replace('%3D', '=')])
# Stick query string back in
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
# Stick it in environ for convenience
environ['reconstructed_url'] = url
return url
HTTPConnection = httplib.HTTPConnection
WindmillProxyApplication = proxy.WindmillProxyApplication
WindmillProxyApplication.ConnectionClass = HTTPConnection
add_namespace = None
class WindmillChooserApplication(object):
"""Application to handle choosing the proper application to handle each request"""
def __init__(self, apps, proxy):
self.namespaces = dict([ (arg.ns, arg) for arg in apps ])
self.proxy = proxy
def add_namespace(self, name, application):
"""Add an application to a specific url namespace in windmill"""
self.namespaces[name] = application
def handler(self, environ, start_response):
"""Windmill app chooser"""
sleep(.2)
reconstruct_url(environ)
for key in self.namespaces:
if environ['PATH_INFO'].find('/'+key+'/') is not -1:
logger.debug('dispatching request %s to %s' % (environ['reconstructed_url'], key))
return self.namespaces[key](environ, start_response)
logger.debug('dispatching request %s to WindmillProxyApplication' % reconstruct_url(environ))
response = self.proxy(environ, start_response)
return response
def __call__(self, environ, start_response):
response = self.handler(environ, start_response)
for x in response:
yield x
class WindmillCompressor(object):
"""Full JavaScript Compression Library"""
js_file_list = [
('lib', 'firebug', 'pi.js',),
('lib', 'firebug', 'firebug-lite.js',),
('lib', 'json2.js',),
('lib', 'browserdetect.js',),
('wm', 'windmill.js',), # fleegix
('lib', 'getXPath.js',),
('lib', 'elementslib.js',),
('lib', 'js-xpath.js',),
('controller', 'controller.js',),
('controller', 'commands.js',),
('controller', 'asserts.js',),
('controller', 'waits.js',), # fleegix
('controller', 'flex.js',),
('wm', 'registry.js',),
('extensions', 'extensions.js',),
('wm', 'utils.js',), # fleegix
('wm', 'ide', 'ui.js',), # fleegix
('wm', 'ide', 'recorder.js',), # fleegix
('wm', 'ide', 'remote.js',), # fleegix
('wm', 'ide', 'dx.js',), # fleegix
('wm', 'ide', 'ax.js',), # fleegix
('wm', 'ide', 'results.js',),
('wm', 'xhr.js',), # fleegix
('wm', 'metrics.js',),
('wm', 'events.js',),
('wm', 'global.js',), # fleegix
('wm', 'jstest.js',), # fleegix
('wm', 'load.js',),
]
def __init__(self, js_path, enabled=True):
self.enabled = enabled
self.js_path = js_path
self.compressed_windmill = None
if enabled:
self._thread = threading.Thread(target=self.compress_file)
self._thread.start()
def compress_file(self):
compressed_windmill = ''
for filename in self.js_file_list:
compressed_windmill += jsmin.jsmin(open(os.path.join(self.js_path, *filename), 'r').read())
self.compressed_windmill = compressed_windmill
def __call__(self, environ, start_response):
if not self.enabled:
start_response('404 Not Found', [('Content-Type', 'text/plain',), ('Content-Length', '0',)])
return ['']
# if self.compressed_windmill is None:
# self.compressed_windmill = ''
# for filename in self.js_file_list:
# self.compressed_windmill += jsmin.jsmin(open(os.path.join(self.js_path, *filename), 'r').read())
while not self.compressed_windmill:
sleep(.15)
start_response('200 Ok', [('Content-Type', 'application/x-javascript',),
('Content-Length', str(len(self.compressed_windmill)),)])
return [self.compressed_windmill]
def make_windmill_server(http_port=None, js_path=None, compression_enabled=None):
if http_port is None:
http_port = windmill.settings['SERVER_HTTP_PORT']
if js_path is None:
js_path = windmill.settings['JS_PATH']
if compression_enabled is None:
compression_enabled = not windmill.settings['DISABLE_JS_COMPRESS']
# Start up all the convergence objects
import convergence
test_resolution_suite = convergence.TestResolutionSuite()
command_resolution_suite = convergence.CommandResolutionSuite()
queue = convergence.ControllerQueue(command_resolution_suite, test_resolution_suite)
xmlrpc_methods_instance = convergence.XMLRPCMethods(queue, test_resolution_suite, command_resolution_suite)
jsonrpc_methods_instance = convergence.JSONRPCMethods(queue, test_resolution_suite, command_resolution_suite)
# Start up all the wsgi applications
windmill_serv_app = wsgi_fileserver.WSGIFileServerApplication(root_path=js_path, mount_point='/windmill-serv/')
windmill_proxy_app = WindmillProxyApplication()
windmill_xmlrpc_app = wsgi_xmlrpc.WSGIXMLRPCApplication(instance=xmlrpc_methods_instance)
windmill_jsonrpc_app = wsgi_jsonrpc.WSGIJSONRPCApplication(instance=jsonrpc_methods_instance)
windmill_compressor_app = WindmillCompressor(os.path.join(js_path, 'js'), compression_enabled)
windmill_serv_app.ns = 'windmill-serv'
windmill_xmlrpc_app.ns = 'windmill-xmlrpc'
windmill_jsonrpc_app.ns = 'windmill-jsonrpc'
windmill_compressor_app.ns = 'windmill-compressor'
global add_namespace
import https
if windmill.has_ssl:
import certificate
cc = certificate.CertificateCreator()
else:
cc = None
httpd = https.WindmillHTTPServer(('0.0.0.0', http_port),
https.WindmillHTTPRequestHandler, cc,
apps=[windmill_serv_app, windmill_jsonrpc_app,
windmill_xmlrpc_app, windmill_compressor_app],
proxy=https.WindmillHTTPSProxyApplication())
add_namespace = httpd.add_namespace
# Attach some objects to httpd for convenience
httpd.controller_queue = queue
httpd.test_resolution_suite = test_resolution_suite
httpd.command_resolution_suite = command_resolution_suite
httpd.xmlrpc_methods_instance = xmlrpc_methods_instance
httpd.jsonrpc_methods_instance = jsonrpc_methods_instance
return httpd
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from test_framework.blockstore import BlockStore
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 432)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 432)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.block_store.close()
stop_nodes(self.nodes)
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.block_store = BlockStore(self.options.tmpdir)
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
"""Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
"""
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
"""Modify the nSequence to make it fails once sequence lock rule is
activated (high timespan).
"""
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
"""Modify the nLockTime to make it fails once MTP rule is activated."""
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
|
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from androguard.decompiler.dad.instruction import (Variable, ThisParam, Param)
from androguard.decompiler.dad.util import build_path, common_dom
from androguard.decompiler.dad.node import Node
logger = logging.getLogger('dad.control_flow')
class BasicReachDef(object):
def __init__(self, graph, params):
self.g = graph
self.A = defaultdict(set)
self.R = defaultdict(set)
self.DB = defaultdict(set)
self.defs = defaultdict(lambda: defaultdict(set))
self.def_to_loc = defaultdict(set)
# Deal with special entry node
entry = graph.entry
self.A[entry] = set(range(-1, -len(params) - 1, -1))
for loc, param in enumerate(params, 1):
self.defs[entry][param].add(-loc)
self.def_to_loc[param].add(-loc)
# Deal with the other nodes
for node in graph.rpo:
for i, ins in node.get_loc_with_ins():
kill = ins.get_lhs()
if kill is not None:
self.defs[node][kill].add(i)
self.def_to_loc[kill].add(i)
for defs, values in self.defs[node].iteritems():
self.DB[node].add(max(values))
def run(self):
nodes = self.g.rpo[:]
while nodes:
node = nodes.pop(0)
newR = set()
for pred in self.g.all_preds(node):
newR.update(self.A[pred])
if newR and newR != self.R[node]:
self.R[node] = newR
for suc in self.g.all_sucs(node):
if suc not in nodes:
nodes.append(suc)
killed_locs = set()
for reg in self.defs[node]:
killed_locs.update(self.def_to_loc[reg])
A = set()
for loc in self.R[node]:
if loc not in killed_locs:
A.add(loc)
newA = A.union(self.DB[node])
if newA != self.A[node]:
self.A[node] = newA
for suc in self.g.all_sucs(node):
if suc not in nodes:
nodes.append(suc)
def update_chain(graph, loc, du, ud):
'''
Updates the DU chain of the instruction located at loc such that there is
no more reference to it so that we can remove it.
When an instruction is found to be dead (i.e it has no side effect, and the
register defined is not used) we have to update the DU chain of all the
variables that may me used by the dead instruction.
'''
ins = graph.get_ins_from_loc(loc)
for var in ins.get_used_vars():
# We get the definition points of the current variable
for def_loc in set(ud[var, loc]):
# We remove the use of the variable at loc from the DU chain of
# the variable definition located at def_loc
du[var, def_loc].remove(loc)
ud[var, loc].remove(def_loc)
if not ud.get((var, loc)):
ud.pop((var, loc))
# If the DU chain of the defined variable is now empty, this means
# that we may have created a new dead instruction, so we check that
# the instruction has no side effect and we update the DU chain of
# the new dead instruction, and we delete it.
# We also make sure that def_loc is not < 0. This is the case when
# the current variable is a method parameter.
if def_loc >= 0 and not du[var, def_loc]:
du.pop((var, def_loc))
def_ins = graph.get_ins_from_loc(def_loc)
if def_ins.is_call():
def_ins.remove_defined_var()
elif def_ins.has_side_effect():
continue
else:
update_chain(graph, def_loc, du, ud)
graph.remove_ins(def_loc)
def dead_code_elimination(graph, du, ud):
'''
Run a dead code elimination pass.
Instructions are checked to be dead. If it is the case, we remove them and
we update the DU & UD chains of its variables to check for further dead
instructions.
'''
for node in graph.rpo:
for i, ins in node.get_loc_with_ins()[:]:
reg = ins.get_lhs()
if reg is not None:
# If the definition is not used, we check that the instruction
# has no side effect. If there is one and this is a call, we
# remove only the unused defined variable. else, this is
# something like an array access, so we do nothing.
# Otherwise (no side effect) we can remove the instruction from
# the node.
if (reg, i) not in du:
if ins.is_call():
ins.remove_defined_var()
elif ins.has_side_effect():
continue
else:
# We can delete the instruction. First update the DU
# chain of the variables used by the instruction to
# `let them know` that they are not used anymore by the
# deleted instruction.
# Then remove the instruction.
update_chain(graph, i, du, ud)
graph.remove_ins(i)
def clear_path_node(graph, reg, loc1, loc2):
for loc in xrange(loc1, loc2):
ins = graph.get_ins_from_loc(loc)
logger.debug(' treat loc: %d, ins: %s', loc, ins)
if ins is None:
continue
logger.debug(' LHS: %s, side_effect: %s', ins.get_lhs(),
ins.has_side_effect())
if ins.get_lhs() == reg or ins.has_side_effect():
return False
return True
def clear_path(graph, reg, loc1, loc2):
'''
Check that the path from loc1 to loc2 is clear.
We have to check that there is no side effect between the two location
points. We also have to check that the variable `reg` is not redefined
along one of the possible pathes from loc1 to loc2.
'''
logger.debug('clear_path: reg(%s), loc1(%s), loc2(%s)', reg, loc1, loc2)
node1 = graph.get_node_from_loc(loc1)
node2 = graph.get_node_from_loc(loc2)
# If both instructions are in the same node, we only have to check that the
# path is clear inside the node
if node1 is node2:
return clear_path_node(graph, reg, loc1 + 1, loc2)
# If instructions are in different nodes, we also have to check the nodes
# in the path between the two locations.
if not clear_path_node(graph, reg, loc1 + 1, node1.ins_range[1]):
return False
path = build_path(graph, node1, node2)
for node in path:
locs = node.ins_range
end_loc = loc2 if (locs[0] <= loc2 <= locs[1]) else locs[1]
if not clear_path_node(graph, reg, locs[0], end_loc):
return False
return True
def register_propagation(graph, du, ud):
'''
Propagate the temporary registers between instructions and remove them if
necessary.
We process the nodes of the graph in reverse post order. For each
instruction in the node, we look at the variables that it uses. For each of
these variables we look where it is defined and if we can replace it with
its definition.
We have to be careful to the side effects some instructions may have.
To do the propagation, we use the computed DU and UD chains.
'''
change = True
while change:
change = False
for node in graph.rpo:
for i, ins in node.get_loc_with_ins()[:]:
logger.debug('Treating instruction %d: %s', i, ins)
logger.debug(' Used vars: %s', ins.get_used_vars())
for var in ins.get_used_vars():
# Get the list of locations this variable is defined at.
locs = ud[var, i]
logger.debug(' var %s defined in lines %s', var, locs)
# If the variable is uniquely defined for this instruction
# it may be eligible for propagation.
if len(locs) != 1:
continue
loc = locs[0]
# Methods parameters are defined with a location < 0.
if loc < 0:
continue
orig_ins = graph.get_ins_from_loc(loc)
logger.debug(' -> %s', orig_ins)
logger.debug(' -> DU(%s, %s) = %s', var, loc,
du[var, loc])
# We defined some instructions as not propagable.
# Actually this is the case only for array creation
# (new foo[x])
if not orig_ins.is_propagable():
logger.debug(' %s not propagable...', orig_ins)
continue
if not orig_ins.get_rhs().is_const():
# We only try to propagate constants and definition
# points which are used at only one location.
if len(du[var, loc]) > 1:
logger.debug(' => variable has multiple uses'
' and is not const => skip')
continue
# We check that the propagation is safe for all the
# variables that are used in the instruction.
# The propagation is not safe if there is a side effect
# along the path from the definition of the variable
# to its use in the instruction, or if the variable may
# be redifined along this path.
safe = True
orig_ins_used_vars = orig_ins.get_used_vars()
logger.debug(' variables used by the original '
'instruction: %s', orig_ins_used_vars)
for var2 in orig_ins_used_vars:
# loc is the location of the defined variable
# i is the location of the current instruction
if not clear_path(graph, var2, loc, i):
safe = False
break
if not safe:
logger.debug('Propagation NOT SAFE')
continue
# We also check that the instruction itself is
# propagable. If the instruction has a side effect it
# cannot be propagated if there is another side effect
# along the path
if orig_ins.has_side_effect():
if not clear_path(graph, None, loc, i):
logger.debug(' %s has side effect and the '
'path is not clear !', orig_ins)
continue
logger.debug(' => Modification of the instruction!')
logger.debug(' - BEFORE: %s', ins)
ins.replace(var, orig_ins.get_rhs())
logger.debug(' -> AFTER: %s', ins)
logger.debug('\t UD(%s, %s) : %s', var, i, ud[var, i])
ud[var, i].remove(loc)
logger.debug('\t -> %s', ud[var, i])
if len(ud[var, i]) == 0:
ud.pop((var, i))
for var2 in orig_ins.get_used_vars():
# We update the UD chain of the variables we
# propagate. We also have to take the
# definition points of all the variables used
# by the instruction and update the DU chain
# with this information.
old_ud = ud.get((var2, loc))
logger.debug('\t ud(%s, %s) = %s', var2, loc, old_ud)
# If the instruction use the same variable
# multiple times, the second+ time the ud chain
# will be None because already treated.
if old_ud is None:
continue
ud[var2, i].extend(old_ud)
logger.debug('\t - ud(%s, %s) = %s', var2, i,
ud[var2, i])
ud.pop((var2, loc))
for def_loc in old_ud:
du[var2, def_loc].remove(loc)
du[var2, def_loc].append(i)
new_du = du[var, loc]
logger.debug('\t new_du(%s, %s): %s', var, loc, new_du)
new_du.remove(i)
logger.debug('\t -> %s', new_du)
if not new_du:
logger.debug('\t REMOVING INS %d', loc)
du.pop((var, loc))
graph.remove_ins(loc)
change = True
class DummyNode(Node):
def __init__(self, name):
super(DummyNode, self).__init__(name)
def get_loc_with_ins(self):
return collections.deque()
def __repr__(self):
return '%s-dumnode' % self.name
def __str__(self):
return '%s-dummynode' % self.name
def group_variables(lvars, DU, UD):
treated = defaultdict(list)
variables = defaultdict(list)
for var, loc in sorted(DU):
if var not in lvars:
continue
if loc in treated[var]:
continue
defs = [loc]
uses = set(DU[var, loc])
change = True
while change:
change = False
for use in uses:
ldefs = UD[var, use]
for ldef in ldefs:
if ldef not in defs:
defs.append(ldef)
change = True
for ldef in defs[1:]:
luses = set(DU[var, ldef])
for use in luses:
if use not in uses:
uses.add(use)
change = True
treated[var].extend(defs)
variables[var].append((defs, list(uses)))
return variables
def split_variables(graph, lvars, DU, UD):
variables = group_variables(lvars, DU, UD)
if lvars:
nb_vars = max(lvars) + 1
else:
nb_vars = 0
for var, versions in variables.iteritems():
nversions = len(versions)
if nversions == 1:
continue
orig_var = lvars.pop(var)
for i, (defs, uses) in enumerate(versions):
if min(defs) < 0: # Param
if orig_var.this:
new_version = ThisParam(var, orig_var.type)
else:
new_version = Param(var, orig_var.type)
lvars[var] = new_version
else:
new_version = Variable(nb_vars)
new_version.type = orig_var.type
lvars[nb_vars] = new_version # add new version to variables
nb_vars += 1
new_version.name = '%d_%d' % (var, i)
for loc in defs:
if loc < 0:
continue
ins = graph.get_ins_from_loc(loc)
ins.replace_lhs(new_version)
DU[(new_version.value(), loc)] = DU.pop((var, loc))
for loc in uses:
ins = graph.get_ins_from_loc(loc)
ins.replace_var(var, new_version)
UD[(new_version.value(), loc)] = UD.pop((var, loc))
def reach_def_analysis(graph, lparams):
# We insert two special nodes : entry & exit, to the graph.
# This is done to simplify the reaching definition analysis.
old_entry = graph.entry
old_exit = graph.exit
new_entry = DummyNode('entry')
graph.add_node(new_entry)
graph.add_edge(new_entry, old_entry)
graph.entry = new_entry
if old_exit:
new_exit = DummyNode('exit')
graph.add_node(new_exit)
graph.add_edge(old_exit, new_exit)
graph.rpo.append(new_exit)
analysis = BasicReachDef(graph, set(lparams))
analysis.run()
# The analysis is done, We can now remove the two special nodes.
graph.remove_node(new_entry)
if old_exit:
graph.remove_node(new_exit)
graph.entry = old_entry
return analysis
def build_def_use(graph, lparams):
'''
Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the
method.
'''
analysis = reach_def_analysis(graph, lparams)
UD = defaultdict(list)
for node in graph.rpo:
for i, ins in node.get_loc_with_ins():
for var in ins.get_used_vars():
# var not in analysis.def_to_loc: test that the register
# exists. It is possible that it is not the case, when a
# variable is of a type which is stored on multiple registers
# e.g: a 'double' stored in v3 is also present in v4, so a call
# to foo(v3), will in fact call foo(v3, v4).
if var not in analysis.def_to_loc:
continue
ldefs = analysis.defs[node]
prior_def = -1
for v in ldefs.get(var, set()):
if prior_def < v < i:
prior_def = v
if prior_def >= 0:
UD[var, i].append(prior_def)
else:
intersect = analysis.def_to_loc[var].intersection(
analysis.R[node])
UD[var, i].extend(intersect)
DU = defaultdict(list)
for var_loc, defs_loc in UD.items():
var, loc = var_loc
for def_loc in defs_loc:
DU[var, def_loc].append(loc)
return UD, DU
def place_declarations(graph, dvars, du, ud):
idom = graph.immediate_dominators()
for node in graph.post_order():
for loc, ins in node.get_loc_with_ins():
for var in ins.get_used_vars():
if (not isinstance(dvars[var], Variable) or
isinstance(dvars[var], Param)):
continue
var_defs_locs = ud[var, loc]
def_nodes = set()
for def_loc in var_defs_locs:
def_node = graph.get_node_from_loc(def_loc)
# TODO: place declarations in catch if needed
if def_node.in_catch:
continue
def_nodes.add(def_node)
if not def_nodes:
continue
common_dominator = def_nodes.pop()
for def_node in def_nodes:
common_dominator = common_dom(
idom, common_dominator, def_node)
if any(var in range(*common_dominator.ins_range)
for var in ud[var, loc]):
continue
common_dominator.add_variable_declaration(dvars[var])
|
|
"""
a series of tests which assert the behavior of moving objects between
collections and scalar attributes resulting in the expected state w.r.t.
backrefs, add/remove events, etc.
there's a particular focus on collections that have "uselist=False", since in
these cases the re-assignment of an attribute means the previous owner needs an
UPDATE in the database.
"""
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import eq_
from sqlalchemy.testing import is_
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
class O2MCollectionTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, backref="user")),
)
def test_collection_move_hitslazy(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
a2 = Address(email_address="address2")
a3 = Address(email_address="address3")
u1 = User(name="jack", addresses=[a1, a2, a3])
u2 = User(name="ed")
sess.add_all([u1, a1, a2, a3])
sess.commit()
# u1.addresses
def go():
u2.addresses.append(a1)
u2.addresses.append(a2)
u2.addresses.append(a3)
self.assert_sql_count(testing.db, go, 0)
def test_collection_move_preloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", addresses=[a1])
u2 = User(name="ed")
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.addresses collection
u1.addresses
u2.addresses.append(a1)
# backref fires
assert a1.user is u2
# a1 removed from u1.addresses as of [ticket:2789]
assert a1 not in u1.addresses
assert a1 in u2.addresses
def test_collection_move_notloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", addresses=[a1])
u2 = User(name="ed")
sess.add_all([u1, u2])
sess.commit() # everything is expired
u2.addresses.append(a1)
# backref fires
assert a1.user is u2
# u1.addresses wasn't loaded,
# so when it loads its correct
assert a1 not in u1.addresses
assert a1 in u2.addresses
def test_collection_move_commitfirst(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", addresses=[a1])
u2 = User(name="ed")
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.addresses collection
u1.addresses
u2.addresses.append(a1)
# backref fires
assert a1.user is u2
# everything expires, no changes in
# u1.addresses, so all is fine
sess.commit()
assert a1 not in u1.addresses
assert a1 in u2.addresses
def test_scalar_move_preloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User(name="jack")
u2 = User(name="ed")
a1 = Address(email_address="a1")
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# u1.addresses is loaded
u1.addresses
# direct set - the "old" is "fetched",
# but only from the local session - not the
# database, due to the PASSIVE_NO_FETCH flag.
# this is a more fine grained behavior introduced
# in 0.6
a1.user = u2
assert a1 not in u1.addresses
assert a1 in u2.addresses
def test_plain_load_passive(self):
"""test that many-to-one set doesn't load the old value."""
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User(name="jack")
u2 = User(name="ed")
a1 = Address(email_address="a1")
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# in this case, a lazyload would
# ordinarily occur except for the
# PASSIVE_NO_FETCH flag.
def go():
a1.user = u2
self.assert_sql_count(testing.db, go, 0)
assert a1 not in u1.addresses
assert a1 in u2.addresses
def test_set_none(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User(name="jack")
a1 = Address(email_address="a1")
a1.user = u1
sess.add_all([u1, a1])
sess.commit()
# works for None too
def go():
a1.user = None
self.assert_sql_count(testing.db, go, 0)
assert a1 not in u1.addresses
def test_scalar_move_notloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User(name="jack")
u2 = User(name="ed")
a1 = Address(email_address="a1")
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# direct set - the fetching of the
# "old" u1 here allows the backref
# to remove it from the addresses collection
a1.user = u2
assert a1 not in u1.addresses
assert a1 in u2.addresses
def test_scalar_move_commitfirst(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User(name="jack")
u2 = User(name="ed")
a1 = Address(email_address="a1")
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# u1.addresses is loaded
u1.addresses
# direct set - the fetching of the
# "old" u1 here allows the backref
# to remove it from the addresses collection
a1.user = u2
sess.commit()
assert a1 not in u1.addresses
assert a1 in u2.addresses
def test_collection_assignment_mutates_previous_one(self):
User, Address = self.classes.User, self.classes.Address
u1 = User(name="jack")
u2 = User(name="ed")
a1 = Address(email_address="a1")
u1.addresses.append(a1)
is_(a1.user, u1)
u2.addresses = [a1]
eq_(u1.addresses, [])
is_(a1.user, u2)
def test_collection_assignment_mutates_previous_two(self):
User, Address = self.classes.User, self.classes.Address
u1 = User(name="jack")
a1 = Address(email_address="a1")
u1.addresses.append(a1)
is_(a1.user, u1)
u1.addresses = []
is_(a1.user, None)
def test_del_from_collection(self):
User, Address = self.classes.User, self.classes.Address
u1 = User(name="jack")
a1 = Address(email_address="a1")
u1.addresses.append(a1)
is_(a1.user, u1)
del u1.addresses[0]
is_(a1.user, None)
def test_del_from_scalar(self):
User, Address = self.classes.User, self.classes.Address
u1 = User(name="jack")
a1 = Address(email_address="a1")
u1.addresses.append(a1)
is_(a1.user, u1)
del a1.user
assert a1 not in u1.addresses
def test_tuple_assignment_w_reverse(self):
User, Address = self.classes.User, self.classes.Address
u1 = User()
a1 = Address(email_address="1")
a2 = Address(email_address="2")
a3 = Address(email_address="3")
u1.addresses.append(a1)
u1.addresses.append(a2)
u1.addresses.append(a3)
u1.addresses[1], u1.addresses[2] = u1.addresses[2], u1.addresses[1]
assert a3.user is u1
eq_(u1.addresses, [a1, a3, a2])
def test_straight_remove(self):
User, Address = self.classes.User, self.classes.Address
u1 = User()
a1 = Address(email_address="1")
a2 = Address(email_address="2")
a3 = Address(email_address="3")
u1.addresses.append(a1)
u1.addresses.append(a2)
u1.addresses.append(a3)
del u1.addresses[2]
assert a3.user is None
eq_(u1.addresses, [a1, a2])
def test_append_del(self):
User, Address = self.classes.User, self.classes.Address
u1 = User()
a1 = Address(email_address="1")
a2 = Address(email_address="2")
a3 = Address(email_address="3")
u1.addresses.append(a1)
u1.addresses.append(a2)
u1.addresses.append(a3)
u1.addresses.append(a2)
del u1.addresses[1]
assert a2.user is u1
eq_(u1.addresses, [a1, a3, a2])
def test_bulk_replace(self):
User, Address = self.classes.User, self.classes.Address
u1 = User()
a1 = Address(email_address="1")
a2 = Address(email_address="2")
a3 = Address(email_address="3")
u1.addresses.append(a1)
u1.addresses.append(a2)
u1.addresses.append(a3)
u1.addresses.append(a3)
assert a3.user is u1
u1.addresses = [a1, a2, a1]
assert a3.user is None
eq_(u1.addresses, [a1, a2, a1])
@testing.combinations(
(
"legacy_style",
True,
),
(
"new_style",
False,
),
argnames="name, _legacy_inactive_history_style",
id_="sa",
)
class O2OScalarBackrefMoveTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
Address,
backref=backref("user"),
uselist=False,
_legacy_inactive_history_style=(
cls._legacy_inactive_history_style
),
)
},
)
def test_collection_move_preloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
u2 = User(name="ed")
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.address
u1.address
# reassign
u2.address = a1
assert u2.address is a1
# backref fires
assert a1.user is u2
# doesn't extend to the previous attribute tho.
# flushing at this point means its anyone's guess.
assert u1.address is a1
assert u2.address is a1
def test_scalar_move_preloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
a2 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
sess.add_all([u1, a1, a2])
sess.commit() # everything is expired
# load a1.user
a1.user
# reassign
a2.user = u1
# backref fires
assert u1.address is a2
# stays on both sides
assert a1.user is u1
assert a2.user is u1
def test_collection_move_notloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
u2 = User(name="ed")
sess.add_all([u1, u2])
sess.commit() # everything is expired
# reassign
u2.address = a1
assert u2.address is a1
# backref fires
assert a1.user is u2
# u1.address loads now after a flush
assert u1.address is None
assert u2.address is a1
def test_scalar_move_notloaded(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
a2 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
sess.add_all([u1, a1, a2])
sess.commit() # everything is expired
# reassign
a2.user = u1
# backref fires
assert u1.address is a2
eq_(
a2._sa_instance_state.committed_state["user"],
attributes.PASSIVE_NO_RESULT,
)
if not self._legacy_inactive_history_style:
# autoflush during the a2.user
assert a1.user is None
assert a2.user is u1
else:
# stays on both sides
assert a1.user is u1
assert a2.user is u1
def test_collection_move_commitfirst(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
u2 = User(name="ed")
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.address
u1.address
# reassign
u2.address = a1
assert u2.address is a1
# backref fires
assert a1.user is u2
# the commit cancels out u1.addresses
# being loaded, on next access its fine.
sess.commit()
assert u1.address is None
assert u2.address is a1
def test_scalar_move_commitfirst(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
a2 = Address(email_address="address2")
u1 = User(name="jack", address=a1)
sess.add_all([u1, a1, a2])
sess.commit() # everything is expired
# load
assert a1.user is u1
# reassign
a2.user = u1
# backref fires
assert u1.address is a2
# didn't work this way tho
assert a1.user is u1
# moves appropriately after commit
sess.commit()
assert u1.address is a2
assert a1.user is None
assert a2.user is u1
@testing.combinations(
(
"legacy_style",
True,
),
(
"new_style",
False,
),
argnames="name, _legacy_inactive_history_style",
id_="sa",
)
class O2OScalarMoveTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
Address,
uselist=False,
_legacy_inactive_history_style=(
cls._legacy_inactive_history_style
),
)
},
)
def test_collection_move_commitfirst(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
u2 = User(name="ed")
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.address
u1.address
# reassign
u2.address = a1
assert u2.address is a1
# the commit cancels out u1.addresses
# being loaded, on next access its fine.
sess.commit()
assert u1.address is None
assert u2.address is a1
class O2OScalarOrphanTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
Address,
uselist=False,
backref=backref(
"user",
single_parent=True,
cascade="all, delete-orphan",
),
)
},
)
def test_m2o_event(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="address1")
u1 = User(name="jack", address=a1)
sess.add(u1)
sess.commit()
sess.expunge(u1)
u2 = User(name="ed")
# the _SingleParent extension sets the backref get to "active" !
# u1 gets loaded and deleted
u2.address = a1
sess.commit()
assert sess.query(User).count() == 1
class M2MCollectionMoveTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
keywords, items, item_keywords, Keyword, Item = (
cls.tables.keywords,
cls.tables.items,
cls.tables.item_keywords,
cls.classes.Keyword,
cls.classes.Item,
)
cls.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, backref="items"
)
},
)
cls.mapper_registry.map_imperatively(Keyword, keywords)
def test_add_remove_pending_backref(self):
"""test that pending doesn't add an item that's not a net add."""
Item, Keyword = (self.classes.Item, self.classes.Keyword)
session = fixture_session(autoflush=False)
i1 = Item(description="i1")
session.add(i1)
session.commit()
session.expire(i1, ["keywords"])
k1 = Keyword(name="k1")
k1.items.append(i1)
k1.items.remove(i1)
eq_(i1.keywords, [])
def test_remove_add_pending_backref(self):
"""test that pending doesn't remove an item that's not a net remove."""
Item, Keyword = (self.classes.Item, self.classes.Keyword)
session = fixture_session(autoflush=False)
k1 = Keyword(name="k1")
i1 = Item(description="i1", keywords=[k1])
session.add(i1)
session.commit()
session.expire(i1, ["keywords"])
k1.items.remove(i1)
k1.items.append(i1)
eq_(i1.keywords, [k1])
def test_pending_combines_with_flushed(self):
"""test the combination of unflushed pending + lazy loaded from DB."""
Item, Keyword = (self.classes.Item, self.classes.Keyword)
session = Session(testing.db, autoflush=False)
k1 = Keyword(name="k1")
k2 = Keyword(name="k2")
i1 = Item(description="i1", keywords=[k1])
session.add(i1)
session.add(k2)
session.commit()
k2.items.append(i1)
# the pending
# list is still here.
eq_(
set(
attributes.instance_state(i1)
._pending_mutations["keywords"]
.added_items
),
set([k2]),
)
# because autoflush is off, k2 is still
# coming in from pending
eq_(i1.keywords, [k1, k2])
# prove it didn't flush
eq_(session.scalar(text("select count(*) from item_keywords")), 1)
# the pending collection was removed
assert (
"keywords" not in attributes.instance_state(i1)._pending_mutations
)
def test_duplicate_adds(self):
Item, Keyword = (self.classes.Item, self.classes.Keyword)
session = Session(testing.db, autoflush=False)
k1 = Keyword(name="k1")
i1 = Item(description="i1", keywords=[k1])
session.add(i1)
session.commit()
k1.items.append(i1)
eq_(i1.keywords, [k1, k1])
session.expire(i1, ["keywords"])
k1.items.append(i1)
eq_(i1.keywords, [k1, k1])
session.expire(i1, ["keywords"])
k1.items.append(i1)
eq_(i1.keywords, [k1, k1])
eq_(k1.items, [i1, i1, i1, i1])
session.commit()
eq_(k1.items, [i1])
def test_bulk_replace(self):
Item, Keyword = (self.classes.Item, self.classes.Keyword)
k1 = Keyword(name="k1")
k2 = Keyword(name="k2")
k3 = Keyword(name="k3")
i1 = Item(description="i1", keywords=[k1, k2])
i2 = Item(description="i2", keywords=[k3])
i1.keywords = [k2, k3]
assert i1 in k3.items
assert i2 in k3.items
assert i1 not in k1.items
class M2MScalarMoveTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
keywords, items, item_keywords, Keyword, Item = (
cls.tables.keywords,
cls.tables.items,
cls.tables.item_keywords,
cls.classes.Keyword,
cls.classes.Item,
)
cls.mapper_registry.map_imperatively(
Item,
items,
properties={
"keyword": relationship(
Keyword,
secondary=item_keywords,
uselist=False,
backref=backref("item", uselist=False),
)
},
)
cls.mapper_registry.map_imperatively(Keyword, keywords)
def test_collection_move_preloaded(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
sess = fixture_session()
k1 = Keyword(name="k1")
i1 = Item(description="i1", keyword=k1)
i2 = Item(description="i2")
sess.add_all([i1, i2, k1])
sess.commit() # everything is expired
# load i1.keyword
assert i1.keyword is k1
i2.keyword = k1
assert k1.item is i2
# nothing happens.
assert i1.keyword is k1
assert i2.keyword is k1
def test_collection_move_notloaded(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
sess = fixture_session()
k1 = Keyword(name="k1")
i1 = Item(description="i1", keyword=k1)
i2 = Item(description="i2")
sess.add_all([i1, i2, k1])
sess.commit() # everything is expired
i2.keyword = k1
assert k1.item is i2
assert i1.keyword is None
assert i2.keyword is k1
def test_collection_move_commit(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
sess = fixture_session()
k1 = Keyword(name="k1")
i1 = Item(description="i1", keyword=k1)
i2 = Item(description="i2")
sess.add_all([i1, i2, k1])
sess.commit() # everything is expired
# load i1.keyword
assert i1.keyword is k1
i2.keyword = k1
assert k1.item is i2
sess.commit()
assert i1.keyword is None
assert i2.keyword is k1
class O2MStaleBackrefTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, backref="user")),
)
def test_backref_pop_m2o(self):
User, Address = self.classes.User, self.classes.Address
u1 = User()
u2 = User()
a1 = Address()
u1.addresses.append(a1)
u2.addresses.append(a1)
# a1 removed from u1.addresses as of [ticket:2789]
assert a1 not in u1.addresses
assert a1.user is u2
assert a1 in u2.addresses
class M2MStaleBackrefTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
keywords, items, item_keywords, Keyword, Item = (
cls.tables.keywords,
cls.tables.items,
cls.tables.item_keywords,
cls.classes.Keyword,
cls.classes.Item,
)
cls.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, backref="items"
)
},
)
cls.mapper_registry.map_imperatively(Keyword, keywords)
def test_backref_pop_m2m(self):
Keyword, Item = self.classes.Keyword, self.classes.Item
k1 = Keyword()
k2 = Keyword()
i1 = Item()
k1.items.append(i1)
k2.items.append(i1)
k2.items.append(i1)
i1.keywords = []
k2.items.remove(i1)
assert len(k2.items) == 0
|
|
import logging
import time
import Queue
import threading
import sys
from Tkinter import *
from pystates import StateMachine
if sys.platform=='linux2':
from neopixel import *
import math
import random
class NullClass:
def set(self,state):
pass
class QuickChange:
def __init__(self, handle_pixel):
self.set_next(self.color_wipe_blue)
self.curr_func = self.color_wipe_blue
self.wait_ms = 50
self.handle_pixel = handle_pixel
def main(self):
while True:
self.next_func()
self.curr_func = self.next_func
def set_next(self, next_func):
self.next_func = next_func
#print(next_func)
def set_strip(self, strip):
self.strip = strip
def int_color(self, color):
"""returns color to a truple of integers"""
return (int(bin(color)[2:].zfill(24)[:-16],2), int(bin(color)[2:].zfill(24)[-16:-8],2), int(bin(color)[2:].zfill(24)[-8:],2))
def Color(self,red, green, blue):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (red << 16) | (green << 8) | blue
def wheel(self, pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return self.Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return self.Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return self.Color(0, pos * 3, 255 - pos * 3)
#----------------------------------------------------------------------------------------------------
def color_wipe_to_handle_green(self):
"""Wipe color across display a pixel at a time."""
self.color_wipe_to_handle(self.Color(0,255,0))
def theatre_chase_white(self):
"""Movie theatre light style chaser animation."""
self.theatre_chase(self.Color(255,255,255))
def flash_colors_red_black(self):
"""Cycle between two colors"""
self.flash_colors(self.Color(255,0,0), self.Color(128,0,0))
def color_wipe_red(self):
"""Wipe color across display a pixel at a time."""
self.color_wipe(self.Color(255, 0, 0))
def color_wipe_green(self):
"""Wipe color across display a pixel at a time."""
self.color_wipe(self.Color(0, 255, 0))
def color_wipe_blue(self):
"""Wipe color across display a pixel at a time."""
self.color_wipe(self.Color(0, 0, 255))
def set_color_green(self):
"""Sets the color of all pixels."""
self.set_strip_color(self.Color(0,255,0))
def fade_green_to_red(self):
"""fade from one color to another"""
self.fade_time = self.stuck_open_timeout
self.fade(self.Color(0,255,0),self.Color(255,0,0))
def rainbow(self):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, self.wheel((i+j) & 255))
self.strip.show()
time.sleep(self.wait_ms/1000.0)
if self.next_func != self.curr_func:
break
def rainbow_cycle(self):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*5):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, self.wheel(((i * 256 / self.strip.numPixels()) + j) & 255))
self.strip.show()
time.sleep(self.wait_ms/1000.0)
if self.next_func != self.curr_func:
break
def color_wipe_to_handle(self, color):
"""Wipe color across display a pixel at a time."""
handle = self.handle_pixel
pointing = 10
# turn all green
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
if self.next_func != self.curr_func:
break
for i in range(pointing):
self.strip.setPixelColor(handle - i, self.Color(0,0,0))
self.strip.setPixelColor(handle + i, self.Color(0,0,0))
if self.next_func != self.curr_func:
break
self.strip.show()
for i in range(pointing -1, -1, -1):
self.strip.setPixelColor(handle - i, color)
self.strip.setPixelColor(handle + i, color)
self.strip.show()
time.sleep(self.wait_ms/1000.0)
if self.next_func != self.curr_func:
break
def flash_colors(self, color1, color2):
"""Cycle between two colors"""
iterations=10
for j in range(iterations):
for q in range(2):
for i in range(0, self.strip.numPixels(), 2):
if q:
self.strip.setPixelColor(i, color1)
else:
self.strip.setPixelColor(i, color2)
for i in range(1, self.strip.numPixels(), 2):
if q:
self.strip.setPixelColor(i, color2)
else:
self.strip.setPixelColor(i, color1)
self.strip.show()
time.sleep(self.wait_ms/1000.0)
if self.next_func != self.curr_func:
break
if self.next_func != self.curr_func:
break
def theatre_chase(self, color):
"""Movie theatre light style chaser animation."""
iterations=10
for j in range(iterations):
for q in range(3):
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, color)
self.strip.show()
time.sleep(self.wait_ms/1000.0)
if self.next_func != self.curr_func:
break
for i in range(0, self.strip.numPixels(), 3):
self.strip.setPixelColor(i+q, 0)
if self.next_func != self.curr_func:
break
def color_wipe(self, color):
"""Wipe color across display a pixel at a time."""
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show()
time.sleep(self.wait_ms/1000.0)
if self.next_func != self.curr_func:
break
def set_strip_color(self, color):
"""Sets the color of all pixels."""
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, color)
self.strip.show()
if self.next_func != self.curr_func:
break
def fade(self, color1, color2):
"""fade from one color to another"""
def interval(index):
return abs((x[index]-y[index])/tot_frames) or 1
x = self.int_color(color1)
y = self.int_color(color2)
temp = []
time_0 = time.time()
self.set_strip_color(self.Color(*x))
frametime = time.time() - time_0
tot_frames = float((self.fade_time or 15)/(frametime + self.wait_ms/1000.0))
i = [interval(0), interval(1), interval(2)]
#print("x:{0} |y:{1} |frametime:{2} |tot_frames:{3} |i:{4}".format(x,y,frametime,tot_frames,i))
#print(0,x)
for t in range(int(tot_frames)):
for j in range(3):
if x[j]>y[j]:
z = x[j] - i[j]
elif x[j]<y[j]:
z = x[j] + i[j]
elif x[j]==y[j]:
z = x[j]
if z >255 or z<0:
temp.append(y[j])
else:
temp.append(z)
x = temp
temp = []
self.set_strip_color(self.Color(*[int(round(n)) for n in x]))
if self.next_func != self.curr_func:
return
time.sleep(self.wait_ms/1000.0)#----------why is this necessary?--BK
self.set_strip_color(color2)
while True:
if self.next_func != self.curr_func:
return
time.sleep(self.wait_ms/1000.0)
class BlinkenLights(StateMachine):
def VALID_KEY(self):
self.state.set("VALID_KEY")
self.qc.set_next(self.qc.color_wipe_to_handle_green)
while True:
ev = yield
if ev['event'] == "DOOR_OPENED":
self.transition(self.DOOR_OPENED)
if ev['event'] == "DOOR_CLOSED":
self.transition(self.WAITING)
if ev['event'] == "MAIN_DOOR_CLOSED_LOCKED":
self.transition(self.WAITING)
def INVALID_KEY(self):
self.state.set("INVALID_KEY")
self.qc.set_next(self.qc.flash_colors_red_black)
while True:
ev = yield
if self.duration() > 2:
self.transition(self.WAITING)
def MAIN_DOOR_FORCED_OPEN(self):
self.state.set("MAIN_DOOR_FORCED_OPEN")
self.qc.set_next(self.qc.flash_colors_red_black)
#self.qc.set_next(self.qc.flash_colors_red_black)
while True:
ev = yield
if ev['event'] == "VALID_KEY":
self.transition(self.VALID_KEY)
def DOOR_OPENED(self):
self.state.set("DOOR_OPENED")
self.qc.set_next(self.qc.fade_green_to_red)
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.transition(self.WAITING)
if ev['event'] == "MAIN_DOOR_STUCK_OPEN":
self.transition(self.MAIN_DOOR_STUCK_OPEN)
def MAIN_DOOR_STUCK_OPEN(self):
self.state.set("DOOR_OPENED")
self.qc.set_next(self.qc.flash_colors_red_black)
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.transition(self.WAITING)
def WAITING(self):
self.state.set("WAITING")
self.qc.set_next(self.qc.rainbow_cycle)
while True:
ev = yield
if ev['event'] == "VALID_KEY":
self.transition(self.VALID_KEY)
if ev['event'] == "INVALID_KEY":
self.transition(self.INVALID_KEY)
if ev['event'] == "MAIN_DOOR_FORCED_OPEN":
self.transition(self.MAIN_DOOR_FORCED_OPEN)
"""if ev['event'] == "VALID_KEY":
self.transition(self.VALID_KEY)
if ev['event'] == "VALID_KEY":
self.transition(self.VALID_KEY)
if ev['event'] == "VALID_KEY":
self.transition(self.VALID_KEY)
if ev['event'] == "VALID_KEY":
self.transition(self.VALID_KEY)"""
def config_gui(self, root):
# Set up the GUI part
frame = LabelFrame(root, text="STATE", padx=5, pady=5)
frame.pack(fill=X)
self.state = StringVar()
self.state.set("[STATE]")
label = Label(frame, textvariable = self.state)
label.pack(side=LEFT)
self.info_frame = frame
frame2 = LabelFrame(root, text=self.name, padx=5, pady=5)
frame2.pack(fill=X)
self.wait_ms = 50
self.next_func = 1
self.curr_func = 1
self.strip = MockStrip(self.led_count, frame2)
def setup(self, out_queue, name, led_count, led_pin, led_freq_hz, led_dma, led_invert, led_brightness, handle_pixel, stuck_open_timeout):
self.log = logging.getLogger("BlinkenLights")
self.out_queue = out_queue
self.name = name
# the pixel closest to the handle
self.handle_pixel = int(handle_pixel)
self.led_count=int(led_count) # Number of LED pixels.
self.led_pin = int(led_pin) # GPIO pin connected to the pixels (must support PWM!).
self.led_freq_hz = int(led_freq_hz) # LED signal frequency in hertz (usually 800khz)
self.led_dma = int(led_dma) # DMA channel to use for generating signal (try 5)
self.led_brightness = int(led_brightness) # Set to 0 for darkest and 255 for brightest
self.led_invert = led_invert.lower() in ("yes", "true", "t", "1") # True to invert the signal (when using NPN transistor level shift)
self.stuck_open_timeout = int(stuck_open_timeout)
self.state = NullClass()
# Create NeoPixel object with appropriate configuration.
if sys.platform=='linux2':
self.strip = Adafruit_NeoPixel(self.led_count, self.led_pin, self.led_freq_hz, self.led_dma, self.led_invert, self.led_brightness)
#self.strip = Adafruit_NeoPixel(self.led_count, 18, 800000, 5, False, 255)
""" Perform initialization here, detect the current state and send that
to the super class start.
"""
def start(self):
# Intialize the library (must be called once before other functions).
self.strip.begin()
self.log.debug("start called")
self.qc = QuickChange(self.handle_pixel)
self.qc.set_strip(self.strip)
self.qc.stuck_open_timeout = self.stuck_open_timeout
self.thread = threading.Thread(target=self.qc.main)
self.thread.setDaemon(True)
self.thread.start()
self.log.debug("thread started")
super(BlinkenLights, self).start(self.WAITING)
class MockStrip:
def __init__(self, led_count, frame):
self.led_count = led_count
self.rand = random.Random()
self.pending = []
self.labels = []
for i in range(led_count):
self.pending.append("#000000")
for i in range(led_count):
lbl = Label(frame, text=str(i), width = 2)
self.labels.append(lbl)
lbl.pack(side=LEFT, expand = True, fill = X)
lbl.configure(bg="#000000")
self.show()
def show(self):
i=0
for label in self.labels:
#print "i is " + str(i)
#print "color is " + self.pending[i]
label.configure(bg=self.pending[i])
i=i+1
def begin(self):
pass
def setPixelColor(self, pixel, color):
self.pending[pixel]=self.tk_color(color)
def getPixelColor(self, pixel):
return self.pending[pixel]
def tk_color(self,color):
red=(color & 0xff0000) >> 16
green=(color & 0x00ff00) >> 8
blue=(color & 0x0000ff)
newcolor='#%02X%02X%02X' % (red,green,blue)
return newcolor
def numPixels(self):
return self.led_count
def main():
out_queue = Queue.Queue()
logging.basicConfig(level=logging.DEBUG)
name = "BLINKENLIGHTS"
machine = BlinkenLights(name=name)
machine.setup(out_queue, name=name, led_count=16, led_pin=18, led_freq_hz=800000, led_dma=5, led_invert="False", led_brightness=255, handle_pixel = 8, stuck_open_timeout = 15)
machine.start()
machine.send_message({"event": "VALID_KEY"})
time.sleep(15)
if __name__=='__main__':
main()
|
|
"""Test AdaNet summary single graph implementation for TF 2.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import struct
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.summary import _ScopedSummaryV2
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
def simple_value(summary_value):
"""Returns the scalar parsed from the summary proto tensor_value bytes."""
return struct.unpack("<f", summary_value.tensor.tensor_content)[0]
class ScopedSummaryV2Test(tu.AdanetTestCase):
def read_single_event_from_eventfile(self, summary):
dir_ = self.test_subdirectory
if summary.namespace:
dir_ = os.path.join(dir_, summary.namespace)
if summary.scope:
dir_ = os.path.join(dir_, summary.scope)
event_files = sorted(tf.io.gfile.glob(os.path.join(dir_, "*.v2")))
events = list(tf.compat.v1.train.summary_iterator(event_files[-1]))
# Expect a boilerplate event for the file_version, then the summary one.
self.assertGreaterEqual(len(events), 2)
return events[1:]
def write_summaries(self, summary):
summary_ops = []
writer = tf.summary.create_file_writer(summary.logdir)
with writer.as_default():
for summary_fn, tensor in summary.summary_tuples():
summary_ops.append(summary_fn(tensor, step=10))
writer_flush = writer.flush()
self.evaluate([tf.compat.v1.global_variables_initializer(), writer.init()])
self.evaluate(summary_ops)
self.evaluate(writer_flush)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_scope(self, scope):
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
self.assertEqual(scope, scoped_summary.scope)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_scalar_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.constant(3)
with tf.name_scope("outer"):
scoped_summary.scalar("inner", i)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "outer/inner")
self.assertEqual(simple_value(values[0]), 3.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summarizing_variable(self, scope):
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
c = tf.constant(42.0)
v = tf.Variable(c)
scoped_summary.scalar("summary", v)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
value = values[0]
self.assertEqual(value.tag, "summary")
self.assertEqual(simple_value(value), 42.0)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_image_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
scoped_summary.image("inner", i, max_outputs=3)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual("outer/inner", values[0].tag)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_histogram_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 4, 4, 3))
with tf.name_scope("outer"):
scoped_summary.histogram("inner", i)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual("outer/inner", values[0].tag)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
}, {
"testcase_name": "skip_summary",
"scope": None,
"skip_summary": True,
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_audio_summary(self, scope, skip_summary=False):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(
self.test_subdirectory, scope=scope, skip_summary=skip_summary)
i = tf.ones((5, 3, 4))
with tf.name_scope("outer"):
scoped_summary.audio("inner", i, sample_rate=2, max_outputs=3)
self.write_summaries(scoped_summary)
if skip_summary:
return
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "outer/inner")
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summary_name_conversion(self, scope):
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
c = tf.constant(3)
scoped_summary.scalar("name with spaces", c)
scoped_summary.scalar("name with many $#illegal^: characters!", c)
scoped_summary.scalar("/name/with/leading/slash", c)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
self.assertLen(events, 3)
tags = [event.summary.value[0].tag for event in events]
# Characters that were illegal in TF 1 are now valid in TF 2.
self.assertIn("name with spaces", tags)
self.assertIn("name with many $#illegal^: characters!", tags)
self.assertIn("name/with/leading/slash", tags)
@parameterized.named_parameters(
{
"testcase_name": "without_scope",
"scope": None,
}, {
"testcase_name": "with_scope",
"scope": "with_scope",
})
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_current_scope(self, scope):
with context.graph_mode():
scoped_summary = _ScopedSummaryV2(self.test_subdirectory, scope=scope)
i = tf.constant(3)
with tf.compat.v1.variable_scope("outer1"):
with tf.compat.v1.variable_scope("outer2"):
with scoped_summary.current_scope():
with tf.compat.v1.variable_scope("inner1"):
scoped_summary.scalar("inner2/a/b/c", i)
self.write_summaries(scoped_summary)
events = self.read_single_event_from_eventfile(scoped_summary)
values = events[0].summary.value
self.assertLen(values, 1)
self.assertEqual(values[0].tag, "inner1/inner2/a/b/c")
self.assertEqual(simple_value(values[0]), 3.0)
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summary_args(self):
summary = _ScopedSummaryV2(self.test_subdirectory)
summary.scalar("scalar", 1, "family")
summary.image("image", 1, 3, "family")
summary.histogram("histogram", 1, "family")
summary.audio("audio", 1, 3, 3, "family")
self.assertLen(summary.summary_tuples(), 4)
@tf_compat.skip_for_tf1
@test_util.run_in_graph_and_eager_modes
def test_summary_kwargs(self):
summary = _ScopedSummaryV2(self.test_subdirectory)
summary.scalar(name="scalar", tensor=1, family="family")
summary.image(name="image", tensor=1, max_outputs=3, family="family")
summary.histogram(name="histogram", values=1, family="family")
summary.audio(
name="audio", tensor=1, sample_rate=3, max_outputs=3, family="family")
self.assertLen(summary.summary_tuples(), 4)
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
|
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import re
import time
import random
import hashlib
import scraper
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib import jsunpack
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import XHR
from salts_lib.constants import QUALITIES
logger = log_utils.Logger.get_logger()
BASE_URL = 'http://iomovies.net'
VID_URL = '/api/get_episode/{data_i}/{data_e}'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'IOMovies'
def get_sources(self, video):
sources = []
streams = {}
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return sources
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=8)
match = dom_parser2.parse_dom(html, 'a', {'title': re.compile('click to play', re.I)}, req='href')
if not match: return sources
page_url = scraper_utils.urljoin(self.base_url, match[0].attrs['href'])
headers = {'Referer': page_url}
html = self._http_get(page_url, headers=headers, cache_limit=.02)
for attrs, _content in dom_parser2.parse_dom(html, 'a', {'class': 'mw-episode-btn'}, req=['data-target-i', 'data-target-e', 'title']):
try:
if "wasn't alive" in attrs['title']: continue
vid_url = scraper_utils.urljoin(self.base_url, VID_URL)
vid_url = vid_url.format(data_i=attrs['data-target-i'], data_e=attrs['data-target-e'])
headers = {'Referer': page_url}
headers.update(XHR)
cookies = self.__get_cookies(html, attrs)
vid_html = self._http_get(vid_url, headers=headers, cookies=cookies, cache_limit=.02)
streams.update(self.__get_js_sources(vid_html, vid_url, cookies, page_url))
except scraper.ScrapeError as e:
logger.log('IOMovies Error (%s): %s in %s' % (e, vid_url, page_url))
for stream_url, values in streams.iteritems():
if values['direct']:
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(values['label'])
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
else:
host = urlparse.urlparse(stream_url).hostname
quality = QUALITIES.HIGH
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': values['direct']}
sources.append(source)
return sources
def __get_js_sources(self, html, url, cookies, page_url, allow_framed=True):
streams = {}
js_data = scraper_utils.parse_json(html, url)
for key, stream_url in js_data.get('data', {}).get('sources', {}).iteritems():
if key == 'framed' and allow_framed:
streams.update(self.__get_framed_streams(url, cookies, stream_url, page_url))
elif key == 'OpenLoad':
direct = False
else:
direct = True
if not stream_url.startswith('http'): continue
streams[stream_url] = {'label': key, 'direct': direct}
return streams
def __get_framed_streams(self, vid_url, cookies, html, page_url):
streams = {}
iframe_url = dom_parser2.parse_dom(html, 'iframe', req='src')
if not iframe_url: raise scraper.ScrapeError('No Iframe in: %s' % (vid_url))
iframe_url = iframe_url[0].attrs['src']
html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=.02)
match = re.search('getScript\("([^"]+)', html)
if not match: raise scraper.ScrapeError('No Script in: %s' % (iframe_url))
script_url = match.group(1)
html = self._http_get(script_url, headers={'Referer': iframe_url}, cache_limit=.02)
match = re.search("responseJson\s*=\s*'([^']+)", html)
if not match: raise scraper.ScrapeError('No JSON in: %s' % (script_url))
js_data = scraper_utils.parse_json(match.group(1), script_url)
media = js_data.get('medias', {})
if media:
headers = {'Referer': page_url}
headers.update(XHR)
data = {'data': json.dumps({'medias': media, 'original': ''})}
vid_html = self._http_get(vid_url, data=data, headers=headers, cookies=cookies, cache_limit=.02)
streams.update(self.__get_js_sources(vid_html, vid_url, cookies, page_url, allow_framed=False))
return streams
def __get_cookies(self, html, attrs):
ts = int(time.time()) - random.randint(1, 60)
token = hashlib.md5(self.__get_slice(html) + attrs['data-target-e'] + attrs['data-target-i'] + str(ts)).hexdigest()
return {'timestamp': ts, 'token': token}
def __get_slice(self, html):
alphabet = re.search("alphabet\s*=\s*'([^']+)", html)
if not alphabet: raise scraper.ScrapeError('No Alphabet Found')
alphabet = alphabet.group(1)
js_code = ''
for match in re.finditer('(eval\(function\(.*?)</script>', html, re.DOTALL):
js_data = jsunpack.unpack(match.group(1))
js_data = js_data.replace('\\', '')
js_code += js_data
if 'charCodeAt' in js_code:
s = self.__get_slice1(js_code, alphabet)
else:
s = self.__get_slice2(js_code, alphabet)
return s
def __get_slice1(self, js_code, alphabet):
values = {}
for var in re.finditer("var\s+([^=]+)='([^']+)'\.charCodeAt\((\d+)\)", js_code):
values[var.group(1)] = ord(var.group(2)[int(var.group(3))])
if not values: raise scraper.ScrapeError('No Vars in js_data')
match = re.search('slice\(([^,]+),([^)]+)\)', js_code)
if not match: raise scraper.ScrapeError('No Slice in js_data')
start, end = match.groups()
for key, value in values.iteritems():
start = start.replace(key, str(value))
end = end.replace(key, str(value))
try:
start = eval(start)
end = eval(end)
except Exception as e:
raise scraper.ScrapeError('Eval Failed (%s): |%s|%s|' % (e, start, end))
return alphabet[start: end]
def __get_slice2(self, js_code, alphabet):
s = ''
alpha_len = str(len(alphabet))
for match in re.finditer('slice\(([^,]+),([^)]+)\)', js_code):
start, end = match.groups()
start = start.replace('input.length', alpha_len)
end = end.replace('input.length', alpha_len)
try:
start = eval(start)
end = eval(end)
except Exception as e:
raise scraper.ScrapeError('Eval Failed (%s): |%s|%s|' % (e, start, end))
s += alphabet[start: end]
if not s: raise scraper.ScrapeError('No Slice from: %s' % (js_code))
return s
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/search')
html = self._http_get(search_url, params={'q': title}, cache_limit=8)
for _attrs, item in dom_parser2.parse_dom(html, 'div', {'class': 'movie-item'}):
match = dom_parser2.parse_dom(item, 'a', {'itemprop': 'url'}, req='href')
if not match: continue
match_url, match_title_year = match[0].attrs['href'], match[0].content
match_title, match_year = scraper_utils.extra_year(match_title_year)
if not match_year:
try: match_year = dom_parser2.parse_dom(item, 'div', {'class': 'overlay-year'})[0].content
except: match_year = ''
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import collections
import qiime2.core.type.grammar as grammar
class TestTypeBase(unittest.TestCase):
def setUp(self):
class Example(grammar._TypeBase):
__getitem__ = __or__ = __and__ = lambda s, x: x
def __eq__(self, other):
return False
self.Example = Example
def test_ne(self):
example = self.Example()
self.assertNotEqual(example, 42)
self.assertNotEqual(42, example)
def test_rmod(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'right-hand'):
42 % example
def test_rand(self):
self.assertEqual(42 & self.Example(), 42)
def test_ror(self):
self.assertEqual(42 | self.Example(), 42)
def test_delattr(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'immutable'):
del example.foo
def test_setitem(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'immutable'):
example['foo'] = 1
def test_delitem(self):
example = self.Example()
with self.assertRaisesRegex(TypeError, 'immutable'):
del example['foo']
def test_getitem(self):
example = self.Example()
self.assertEqual(example[1], 1)
def test_freeze(self):
example = self.Example()
example.foo = 1
self.assertEqual(example.foo, 1)
example._freeze_()
self.assertEqual(example.foo, 1)
with self.assertRaisesRegex(TypeError, 'immutable'):
example.foo = 1
with self.assertRaisesRegex(TypeError, 'immutable'):
example.bar = 1
# These tests are not concerned with rewriting properties on the class,
# that behaviour is left unspecified to match Python.
class TestCompositeType(unittest.TestCase):
def test_immutable(self):
# If this test fails, then the hiearchy has been rearranged and the
# properties tested for `_TypeBase` should be tested for
# this class.
# - Your Friendly Dead Man's Switch
self.assertIsInstance(grammar.CompositeType('Example', ('foo',)),
grammar._TypeBase)
def test_field_sanity(self):
with self.assertRaisesRegex(ValueError, 'empty'):
grammar.CompositeType('Example', ())
def test_mod(self):
with self.assertRaisesRegex(TypeError, 'predicate'):
grammar.CompositeType('Example', ('foo',)) % None
def test_or(self):
with self.assertRaisesRegex(TypeError, 'union'):
grammar.CompositeType('Example', ('foo',)) | None
def test_and(self):
with self.assertRaisesRegex(TypeError, 'intersect'):
grammar.CompositeType('Example', ('foo',)) & None
def test_repr(self):
self.assertEqual(repr(grammar.CompositeType('Example', ('foo',))),
'Example[{foo}]')
self.assertEqual(repr(grammar.CompositeType('Example', ('f', 'b'))),
'Example[{f}, {b}]')
def test_validate_field_w_typeexp(self):
Example = grammar.CompositeType('Example', ('foo',))
# Check that no error is raised:
Example._validate_field_('foo', grammar.TypeExpression('X'))
# Test passed if we reach this line.
def test_validate_field_w_comptype(self):
Example = grammar.CompositeType('Example', ('foo',))
with self.assertRaisesRegex(TypeError, 'Incomplete'):
Example._validate_field_('foo', Example)
def test_validate_field_w_nonsense(self):
Example = grammar.CompositeType('Example', ('foo',))
with self.assertRaisesRegex(TypeError, 'Ellipsis'):
Example._validate_field_('foo', Ellipsis)
def test_apply_fields(self):
X = grammar.TypeExpression('X')
Example = grammar.CompositeType('Example', ('foo',))
result = Example._apply_fields_((X,))
self.assertEqual(result.fields, (X,))
self.assertEqual(result.name, 'Example')
self.assertIsInstance(result, grammar.TypeExpression)
def test_iter_symbols(self):
Example = grammar.CompositeType('Example', ('foo',))
self.assertEqual(list(Example.iter_symbols()), ['Example'])
class TestCompositeTypeGetItem(unittest.TestCase):
def setUp(self):
self.local = {}
def test_wrong_length(self):
X = grammar.TypeExpression('X')
composite_type = grammar.CompositeType('C', ['foo', 'bar'])
with self.assertRaisesRegex(TypeError, '1'):
composite_type[X]
composite_type = grammar.CompositeType('C', ['foo'])
with self.assertRaisesRegex(TypeError, '2'):
composite_type[X, X]
def test_nested_expression(self):
X = grammar.TypeExpression('X')
C = grammar.CompositeType('C', ['foo', 'bar'])
self.assertEqual(repr(C[X, C[C[X, X], X]]), 'C[X, C[C[X, X], X]]')
def test_validate_field_called(self):
class MyCompositeType(grammar.CompositeType):
def _validate_field_(s, name, value):
self.local['name'] = name
self.local['value'] = value
my_type = MyCompositeType('MyType', ['foo'])
my_type[...]
self.assertEqual(self.local['name'], 'foo')
self.assertEqual(self.local['value'], ...)
def test_apply_fields_called(self):
class MyCompositeType(grammar.CompositeType):
def _validate_field_(*args):
pass # Let anything through
def _apply_fields_(s, fields):
self.local['fields'] = fields
return ...
my_type = MyCompositeType('MyType', ['foo'])
type_exp = my_type['!'] # '!' is not a `TypeExpression`
self.assertEqual(self.local['fields'], ('!',))
self.assertEqual(type_exp, ...)
class TestTypeExpression(unittest.TestCase):
def test_immutable(self):
# If this test fails, then the hiearchy has been rearranged and the
# properties tested for `_TypeBase` should be tested for
# this class.
# - Your Friendly Dead Man's Switch
self.assertIsInstance(grammar.TypeExpression('X'),
grammar._TypeBase)
def test_hashable(self):
a = grammar.TypeExpression('X')
b = grammar.TypeExpression('Y', fields=(a,))
c = grammar.TypeExpression('Y', fields=(a,))
d = grammar.TypeExpression('Z', predicate=grammar.Predicate())
self.assertIsInstance(a, collections.Hashable)
# There really shouldn't be a collision between these:
self.assertNotEqual(hash(a), hash(d))
self.assertEqual(b, c)
self.assertEqual(hash(b), hash(c))
# TODO: Test dictionaries work well
def test_eq_nonsense(self):
X = grammar.TypeExpression('X')
self.assertIs(X.__eq__(42), NotImplemented)
self.assertFalse(X == 42)
def test_eq_different_instances(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertIsNot(X, X_)
self.assertEqual(X, X_)
# TODO: Add more equality tests
def test_mod(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'fields'):
X['scikit-bio/assets/.no.gif']
Y = grammar.TypeExpression('Y', fields=(X,))
with self.assertRaisesRegex(TypeError, 'fields'):
Y[';-)']
def test_repr(self):
# Subclass elements to demonstrate dispatch occurs correctly.
class Face1(grammar.TypeExpression):
def __repr__(self):
return "-_-"
class Exclaim(grammar.TypeExpression):
def __repr__(self):
return '!'
class Face2(grammar.Predicate):
def __repr__(self):
return '(o_o)'
self.assertEqual(
repr(grammar.TypeExpression('!')),
'!')
self.assertEqual(
repr(grammar.TypeExpression('!', fields=(Face1(''),))),
'![-_-]')
self.assertEqual(
repr(grammar.TypeExpression('!',
fields=(Face1(''), Exclaim('!')))),
'![-_-, !]')
self.assertEqual(
repr(grammar.TypeExpression('!',
fields=(Face1(''), Exclaim('!')),
predicate=Face2(True))),
'![-_-, !] % (o_o)')
self.assertEqual(
repr(grammar.TypeExpression('(o_-)',
predicate=Face2(True))),
'(o_-) % (o_o)')
def test_validate_union_w_nonsense(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'expression'):
X._validate_union_(42)
def test_validate_union_w_composite_type(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'incomplete'):
X._validate_union_(grammar.CompositeType('A', field_names=('X',)))
def test_validate_union_w_valid(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
X._validate_union_(Y)
def test_validate_union_implements_handshake(self):
local = {}
X = grammar.TypeExpression('X')
class Example(grammar.TypeExpression):
def _validate_union_(self, other, handshake=False):
local['other'] = other
local['handshake'] = handshake
X._validate_union_(Example('Example'))
self.assertIs(local['other'], X)
self.assertTrue(local['handshake'])
def test_build_union(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
union = X._build_union_((X, Y))
self.assertIsInstance(union, grammar.UnionTypeExpression)
self.assertEqual(union.members, frozenset({X, Y}))
def test_validate_intersection_w_nonsense(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'expression'):
X._validate_intersection_(42)
def test_validate_intersection_w_composite_type(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'incomplete'):
X._validate_intersection_(
grammar.CompositeType('A', field_names=('X',)))
def test_validate_intersection_w_valid(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
X._validate_intersection_(Y)
def test_validate_intersection_implements_handshake(self):
local = {}
X = grammar.TypeExpression('X')
class Example(grammar.TypeExpression):
def _validate_intersection_(self, other, handshake=False):
local['other'] = other
local['handshake'] = handshake
X._validate_intersection_(Example('Example'))
self.assertIs(local['other'], X)
self.assertTrue(local['handshake'])
def test_build_intersection(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
intersection = X._build_intersection_((X, Y))
self.assertIsInstance(intersection, grammar.IntersectionTypeExpression)
self.assertEqual(intersection.members, frozenset({X, Y}))
def test_validate_predicate_w_nonsense(self):
X = grammar.TypeExpression('X')
with self.assertRaisesRegex(TypeError, 'predicate'):
X._validate_predicate_(42)
def test_validate_predicate_w_valid(self):
predicate = grammar.Predicate(True)
X = grammar.TypeExpression('X')
X._validate_predicate_(predicate)
# Test passed.
def test_apply_predicate(self):
predicate = grammar.Predicate(True)
Y = grammar.TypeExpression('Y')
X = grammar.TypeExpression('X', fields=(Y,))
result = X._apply_predicate_(predicate)
self.assertIsInstance(result, grammar.TypeExpression)
self.assertEqual(result.fields, (Y,))
def test_is_subtype_wrong_name(self):
Y = grammar.TypeExpression('Y')
X = grammar.TypeExpression('X')
self.assertFalse(Y._is_subtype_(X))
self.assertFalse(X._is_subtype_(Y))
def test_is_subtype_diff_fields(self):
F1 = grammar.TypeExpression('F1')
F2 = grammar.TypeExpression('F2')
X = grammar.TypeExpression('X', fields=(F1,))
X_ = grammar.TypeExpression('X', fields=(F2,))
self.assertFalse(X_._is_subtype_(X))
self.assertFalse(X._is_subtype_(X_))
def test_is_subtype_diff_predicates(self):
class Pred(grammar.Predicate):
def __init__(self, value):
self.value = value
super().__init__(value)
def _is_subtype_(self, other):
return self.value <= other.value
P1 = Pred(1)
P2 = Pred(2)
X = grammar.TypeExpression('X', predicate=P1)
X_ = grammar.TypeExpression('X', predicate=P2)
self.assertFalse(X_._is_subtype_(X))
self.assertTrue(X._is_subtype_(X_))
def test_is_subtype_matches(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertTrue(X._is_subtype_(X))
self.assertTrue(X_._is_subtype_(X))
self.assertTrue(X._is_subtype_(X_))
self.assertTrue(X_._is_subtype_(X_))
def test_is_subtype_matches_w_fields(self):
F1 = grammar.TypeExpression('F1')
F2 = grammar.TypeExpression('F2')
X = grammar.TypeExpression('X', fields=(F1,))
X_ = grammar.TypeExpression('X', fields=(F2,))
self.assertFalse(X_._is_subtype_(X))
self.assertFalse(X._is_subtype_(X_))
def test_is_subtype_matches_w_predicate(self):
class Pred(grammar.Predicate):
def __init__(self, value=0):
self.value = value
super().__init__(value)
def _is_subtype_(self, other):
return self.value <= other.value
P1 = Pred(1)
P1_ = Pred(1)
X = grammar.TypeExpression('X', predicate=P1)
X_ = grammar.TypeExpression('X', predicate=P1_)
self.assertTrue(X._is_subtype_(X))
self.assertTrue(X_._is_subtype_(X))
self.assertTrue(X._is_subtype_(X_))
self.assertTrue(X_._is_subtype_(X_))
class TestTypeExpressionMod(unittest.TestCase):
def setUp(self):
self.local = {}
def test_mod_w_existing_predicate(self):
X = grammar.TypeExpression('X', predicate=grammar.Predicate('Truthy'))
with self.assertRaisesRegex(TypeError, 'predicate'):
X % grammar.Predicate('Other')
def test_mod_w_falsy_predicate(self):
X = grammar.TypeExpression('X', predicate=grammar.Predicate())
predicate = grammar.Predicate("Truthy")
self.assertIs((X % predicate).predicate, predicate)
def test_mod_w_none(self):
X = grammar.TypeExpression('X')
self.assertEqual(X % None, X)
def test_validate_predicate_called(self):
class Example(grammar.TypeExpression):
def _validate_predicate_(s, predicate):
self.local['predicate'] = predicate
example = Example('Example')
example % 42
self.assertEqual(self.local['predicate'], 42)
def test_apply_predicate_called(self):
class Example(grammar.TypeExpression):
def _validate_predicate_(s, predicate):
pass # Let anything through
def _apply_predicate_(s, predicate):
self.local['predicate'] = predicate
return ...
example = Example('Example')
new_type_expr = example % 'Foo'
self.assertEqual(self.local['predicate'], 'Foo')
self.assertIs(new_type_expr, ...)
class TestTypeExpressionOr(unittest.TestCase):
def setUp(self):
self.local = {}
def test_identity(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertIs(X | X_, X)
def test_several(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
Z = grammar.TypeExpression('Z')
self.assertIsInstance(X | Y | Z, grammar.UnionTypeExpression)
self.assertEqual(X | Y | Z | X | Z, Y | Z | X)
def test_validate_union_called(self):
class Example(grammar.TypeExpression):
def _validate_union_(s, other, handshake):
self.local['other'] = other
self.local['handshake'] = handshake
example = Example('Example')
example | 42
self.assertEqual(self.local['other'], 42)
self.assertFalse(self.local['handshake'])
def test_build_union_called(self):
class Example(grammar.TypeExpression):
def _validate_union_(s, other, handshake):
pass # Let anything through
def _build_union_(s, members):
self.local['members'] = members
return ...
example = Example('Example')
new_type_expr = example | 42
self.assertEqual(self.local['members'], (example, 42))
self.assertIs(new_type_expr, ...)
class TestTypeExpressionAnd(unittest.TestCase):
def setUp(self):
self.local = {}
def test_identity(self):
X = grammar.TypeExpression('X')
X_ = grammar.TypeExpression('X')
self.assertIs(X & X_, X_)
def test_several(self):
X = grammar.TypeExpression('X')
Y = grammar.TypeExpression('Y')
Z = grammar.TypeExpression('Z')
self.assertIsInstance(X & Y & Z, grammar.IntersectionTypeExpression)
self.assertEqual(X & Y & Z & X & Z, Y & Z & X)
def test_validate_intersection_called(self):
class Example(grammar.TypeExpression):
def _validate_intersection_(s, other, handshake):
self.local['other'] = other
self.local['handshake'] = handshake
example = Example('Example')
example & 42
self.assertEqual(self.local['other'], 42)
self.assertFalse(self.local['handshake'])
def test_build_intersection_called(self):
class Example(grammar.TypeExpression):
def _validate_intersection_(s, other, handshake):
pass # Let anything through
def _build_intersection_(s, members):
self.local['members'] = members
return ...
example = Example('Example')
new_type_expr = example & 42
self.assertEqual(self.local['members'], (example, 42))
self.assertIs(new_type_expr, ...)
class TestTypeExpressionLE(unittest.TestCase):
def setUp(self):
self.local = {}
def test_is_subtype_called(self):
class Example(grammar.TypeExpression):
def _is_subtype_(s, other):
self.local['other'] = other
return self.local['return']
example = Example('Example')
other = Example('Other')
self.local['return'] = True
result = example <= other
self.assertEqual(self.local['other'], other)
self.assertTrue(result)
self.local['return'] = False
result = example <= other
self.assertEqual(self.local['other'], other)
self.assertFalse(result)
class TestTypeExpressionGE(unittest.TestCase):
def setUp(self):
self.local = {}
def test_is_subtype_called(self):
class Example(grammar.TypeExpression):
def _is_subtype_(s, other):
self.local['other'] = other
return self.local['return']
example = Example('Example')
other = Example('Other')
self.local['return'] = True
result = example >= other
self.assertEqual(self.local['other'], example)
self.assertTrue(result)
self.local['return'] = False
result = example >= other
self.assertEqual(self.local['other'], example)
self.assertFalse(result)
# TODO: test the following:
# - _SetOperationBase
# - UnionTypeExpression
# - IntersectionTypeExpression
# - MappingTypeExpression
# - Predicate
if __name__ == '__main__':
unittest.main()
|
|
from collections import OrderedDict
import sys
import json
import random
import numpy as np
import tensorflow as tf
from opts import distributed
from opts.sgd_opt import SgdOpt
from opts.momentum_opt import MomentumOpt
from opts.adam_opt import AdamOpt
from opts.adamng_opt import AdamNGOpt
from opts.rnnprop_opt import RNNPropOpt
import util
import util.tf_utils as tf_utils
import util.paths as paths
import optimizees as optim
def pickle_test_results(filename, data):
f = hpy.File(str(filename), 'w')
f.attrs['problem'] = data['problem']
f.attrs['mode'] = data['mode']
if data.get('compare_with') is not None:
f.attrs['compare_with'] = data['compare_with']
for opt_name, rets in data['results'].items():
grp = f.create_group(opt_name)
for i, ret in enumerate(rets):
for key, value in ret.items():
data = grp.create_dataset('{}/{}'.format(i, key), value.shape, dtype=value.dtype)
data[...] = value
f.close()
def get_tests(test_problem, compare_with, with_rnnprop=False):
def make_opt(name, learning_rate):
#pylint: disable=missing-docstring
return {
'sgd': SgdOpt,
'momentum': MomentumOpt,
'adam': AdamOpt,
'adamng': AdamNGOpt,
'adam_reduce': lambda *args, **kwargs: AdamOpt(enable_reduce=True, patience_max=10, epsilon=1e-4, factor=0.5, *args, **kwargs)
}[name](lr=learning_rate, name='{}_lr_{}'.format(name, learning_rate))
#problems = {
# 'rosenbrock', 'quadratic', 'beale', 'booth', 'matyas', 'logreg',
# 'stoch_logreg', 'stoch_linear',
# 'digits_classifier', 'mnist_classifier', 'digits_classifier_2',
# 'digits_classifier_relu', 'digits_classifier_relu_2',
# 'conv_digits_classifier', 'conv_digits_classifier_2',
# 'digits_classifier_3', 'digits_classifier_relu_3',
#}
lrs = np.logspace(start=-1, stop=-4, num=4)
tests = [make_opt(compare_with, lr) for lr in lrs]
if with_rnnprop:
tests.append(RNNPropOpt(eid=340))
return tests
#opts = {'sgd', 'momentum', 'adam', 'adamng'}
#tests = {}
#for problem in optim.problems:
# tests[problem] = {}
# for opt in opts:
# #if problem.startswith('mnist') or problem.startswith('digits'):
# # tests[problem][opt] = [make_opt(opt, 1e-3)]
# #else:
# tests[problem][opt] = [make_opt(opt, lr) for lr in lrs]
# if with_rnnprop:
# tests[problem][opt].append(RNNPropOpt(eid=1500))
#return tests[test_problem][compare_with]
def run_cv_testing(opt, flags):
results = OrderedDict()
random_state = np.random.get_state()
for eid in range(flags.start_eid, flags.eid + 1, flags.step):
np.random.set_state(random_state)
kwargs = util.get_kwargs(opt.test, flags)
kwargs['eid'] = eid
rets = opt.test(**kwargs)
name = '{name}_{eid}'.format(name=flags.name, eid=eid)
results[name] = rets
return results
def run_many_testing(opt, s_opts, flags):
results = OrderedDict()
random_state = np.random.get_state()
for optimizer in [opt] + s_opts:
np.random.set_state(random_state)
#if hasattr(flags, 'seed') and flags.seed is not None:
# tf.set_random_seed(flags.seed)
kwargs = util.get_kwargs(optimizer.test, flags)
#results[optimizer.name] = optimizer.test(include_x=True, **kwargs)
results[optimizer.name] = optimizer.test(include_x=False, **kwargs)
return results
def setup_experiment(flags):
if flags.eid == 0:
raise ValueError("eid must be > 0")
model_path = paths.model_path(flags.name)
experiment_path = paths.experiment_path(flags.experiment_name)
print("Model path: ", model_path)
print("Experiment path: ", experiment_path)
paths.make_dirs(experiment_path)
for opt_name in flags.problems:
prefix = opt_name + "_" + flags.mode
if flags.mode == 'many':
prefix += "_" + flags.compare_with
if not flags.force and (experiment_path / (prefix + '_results.pkl')).exists():
print("You will overwrite existing results. Add -f/--force to force it.")
sys.exit(1)
with (experiment_path / 'config').open('w') as conf:
testing_options = {'eid', 'n_batches', 'n_steps', 'problems'}
config = {k: v for k, v in vars(flags).items() if k in testing_options}
config['model_path'] = str(model_path)
json.dump(config, conf, sort_keys=True, indent=4)
opt = util.load_opt(model_path)
return experiment_path, opt
@tf_utils.with_tf_graph
def testing(flags, opt, s_opts, optimizees):
for optimizee in optimizees.values():
optimizee.build()
opt.build(optimizees, inference_only=True, adam_only=flags.adam_only, n_bptt_steps=1, ema_step=flags.ema_step, ema_lr=flags.ema_lr)
for i, s_opt in enumerate(s_opts):
#s_opt.build(optimizees, inference_only=True, devices=tf_utils.get_devices(flags))
with tf.variable_scope('s_opt_{}'.format(i)):
s_opt.build(optimizees, inference_only=True, n_bptt_steps=1)
session = tf.get_default_session()
session.run(tf.global_variables_initializer())
for i, s_opt in enumerate(s_opts):
if hasattr(s_opt, 'eid'):
s_opt.restore(s_opt.eid)
if flags.mode == 'many':
results = run_many_testing(opt, s_opts, flags)
else:
results = run_cv_testing(opt, flags)
return results
def run_test(flags):
if not hasattr(flags, 'seed') or flags.seed is None:
flags.seed = random.getstate()
if flags.problems is None or flags.problems == 'all':
flags.problems = [
'rosenbrock', 'quadratic',
'beale', 'booth', 'matyas',
'logreg',
'stoch_logreg', 'stoch_linear'
]
for problem in flags.problems:
try:
assert problem in optim.problems
except Exception as e:
print('problem: ', problem)
raise
experiment_path, opt = setup_experiment(flags)
#opt = distributed.distribute(opt, tf_utils.get_devices(flags))
opt = distributed.distribute(opt, ['/cpu:0'])
optimizees = optim.get_optimizees(flags.problems,
clip_by_value=False,
random_scale=flags.enable_random_scaling,
noisy_grad=flags.noisy_grad)
for opt_name in flags.problems:
optimizee = optimizees[opt_name]
print("Running testing on: ", opt_name)
s_opts = get_tests(opt_name, flags.compare_with, with_rnnprop=flags.with_rnnprop)
results = testing(flags, opt, s_opts, {opt_name: optimizee})
data = {
'problem': opt_name,
'mode': flags.mode,
'results': results
}
prefix = opt_name + "_" + flags.mode
if flags.mode == 'many':
data['compare_with'] = flags.compare_with
prefix += "_" + flags.compare_with
util.dump_results(experiment_path, data, prefix=prefix)
|
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import unittest
import warnings
from django import test
from django import forms
from django.core.exceptions import ValidationError
from django.db import connection, models, IntegrityError
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
CommaSeparatedIntegerField, DateField, DateTimeField, DecimalField,
EmailField, FilePathField, FloatField, IntegerField, IPAddressField,
GenericIPAddressField, NOT_PROVIDED, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils.functional import lazy
from .models import (
Foo, Bar, Whiz, BigD, BigS, BigInt, Post, NullBooleanModel,
BooleanModel, PrimaryKeyCharModel, DataModel, Document, RenamedField,
VerboseNameField, FksToBooleans, FkToChar)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 23):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_field_str(self):
from django.utils.encoding import force_str
f = Foo._meta.get_field('a')
self.assertEqual(force_str(f), "model_fields.Foo.a")
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
from django.db import connection
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
@test.skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Test that foreign key values to empty strings don't get converted
to None (#19299)
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
class DateTimeFieldTests(unittest.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
class BooleanFieldTests(unittest.TestCase):
def _test_get_db_prep_lookup(self, f):
from django.db import connection
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertTrue(f.to_python(1) is True)
self.assertTrue(f.to_python(0) is False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_charfield_textfield_max_length_passed_to_formfield(self):
"""
Test that CharField and TextField pass their max_length attributes to
form fields created using their .formfield() method (#22206).
"""
cf1 = models.CharField()
cf2 = models.CharField(max_length=1234)
self.assertIsNone(cf1.formfield().max_length)
self.assertEqual(1234, cf2.formfield().max_length)
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield().max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=True)
self.assertEqual(f.formfield().choices, [('', '---------')] + choices)
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.bfield, bool)
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.bfield, bool)
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.nbfield, bool)
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.nbfield, bool)
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_col': 'string'})[0]
self.assertFalse(isinstance(b5.pk, bool))
def test_select_related(self):
"""
Test type of boolean fields when retrieved via select_related() (MySQL,
#15040)
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# Test select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
# verify types -- should't be 0/1
self.assertIsInstance(ma.bf.bfield, bool)
self.assertIsInstance(ma.nbf.nbfield, bool)
# verify values
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# Test select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(mb.bf.bfield, bool)
self.assertIsInstance(mb.nbf.nbfield, bool)
self.assertIsInstance(mc.bf.bfield, bool)
self.assertIsInstance(mc.nbf.nbfield, bool)
# verify values
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
Check that a BooleanField defaults to None -- which isn't
a valid value (#15124).
"""
# Patch the boolean field's default value. We give it a default
# value when defining the model to satisfy the check tests
# #20895.
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertTrue(boolean_field.has_default())
old_default = boolean_field.default
try:
boolean_field.default = NOT_PROVIDED
# check patch was successful
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with self.assertRaises(IntegrityError):
b.save()
finally:
boolean_field.default = old_default
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ChoicesTests(test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
class SlugFieldTests(test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class ValidationTest(test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1,
choices=[('a', 'A'), ('b', 'B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a', 'A'), ('b', 'B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_charfield_get_choices_with_blank_defined(self):
f = models.CharField(choices=[('', '<><>'), ('a', 'A')])
self.assertEqual(f.get_choices(True), [('', '<><>'), ('a', 'A')])
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(
choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertEqual(None, f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
self.assertRaises(ValidationError, f.clean, '0', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class BigIntegerFieldTests(test.TestCase):
def test_limits(self):
# Ensure that values that are right at the limits can be saved
# and then retrieved without corruption.
maxval = 9223372036854775807
minval = -maxval - 1
BigInt.objects.create(value=maxval)
qs = BigInt.objects.filter(value__gte=maxval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, maxval)
BigInt.objects.create(value=minval)
qs = BigInt.objects.filter(value__lte=minval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, minval)
def test_types(self):
b = BigInt(value=0)
self.assertIsInstance(b.value, six.integer_types)
b.save()
self.assertIsInstance(b.value, six.integer_types)
b = BigInt.objects.all()[0]
self.assertIsInstance(b.value, six.integer_types)
def test_coercing(self):
BigInt.objects.create(value='10')
b = BigInt.objects.get(value='10')
self.assertEqual(b.value, 10)
class TypeCoercionTests(test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
class FileFieldTests(unittest.TestCase):
def test_clearable(self):
"""
Test that FileField.save_form_data will clear its instance attribute
value if passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
Test that FileField.save_form_data considers None to mean "no change"
rather than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
Test that FileField.save_form_data, if passed a truthy value, updates
its instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
try:
d.myfile.delete()
except OSError:
self.fail("Deleting an unset FileField should not raise OSError.")
class BinaryFieldTests(test.TestCase):
binary_data = b'\x00\x46\xFE'
def test_set_and_retrieve(self):
data_set = (self.binary_data, six.memoryview(self.binary_data))
for bdata in data_set:
dm = DataModel(data=bdata)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Resave (=update)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Test default value
self.assertEqual(bytes(dm.short_data), b'\x08')
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
test_set_and_retrieve = unittest.expectedFailure(test_set_and_retrieve)
def test_max_length(self):
dm = DataModel(short_data=self.binary_data * 4)
self.assertRaises(ValidationError, dm.full_clean)
class GenericIPAddressFieldTests(test.TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
Test that GenericIPAddressField with a specified protocol does not
generate a formfield with no specified protocol. See #20740.
"""
model_field = models.GenericIPAddressField(protocol='IPv4')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '::1')
model_field = models.GenericIPAddressField(protocol='IPv6')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '127.0.0.1')
class PromiseTest(test.TestCase):
def test_AutoField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
AutoField(primary_key=True).get_prep_value(lazy_func()),
int)
@unittest.skipIf(six.PY3, "Python 3 has no `long` type.")
def test_BigIntegerField(self):
lazy_func = lazy(lambda: long(9999999999999999999), long)
self.assertIsInstance(
BigIntegerField().get_prep_value(lazy_func()),
long)
def test_BinaryField(self):
lazy_func = lazy(lambda: b'', bytes)
self.assertIsInstance(
BinaryField().get_prep_value(lazy_func()),
bytes)
def test_BooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
BooleanField().get_prep_value(lazy_func()),
bool)
def test_CharField(self):
lazy_func = lazy(lambda: '', six.text_type)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
def test_CommaSeparatedIntegerField(self):
lazy_func = lazy(lambda: '1,2', six.text_type)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
def test_DateField(self):
lazy_func = lazy(lambda: datetime.date.today(), datetime.date)
self.assertIsInstance(
DateField().get_prep_value(lazy_func()),
datetime.date)
def test_DateTimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now(), datetime.datetime)
self.assertIsInstance(
DateTimeField().get_prep_value(lazy_func()),
datetime.datetime)
def test_DecimalField(self):
lazy_func = lazy(lambda: Decimal('1.2'), Decimal)
self.assertIsInstance(
DecimalField().get_prep_value(lazy_func()),
Decimal)
def test_EmailField(self):
lazy_func = lazy(lambda: 'mailbox@domain.com', six.text_type)
self.assertIsInstance(
EmailField().get_prep_value(lazy_func()),
six.text_type)
def test_FileField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
def test_FilePathField(self):
lazy_func = lazy(lambda: 'tests.py', six.text_type)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
def test_FloatField(self):
lazy_func = lazy(lambda: 1.2, float)
self.assertIsInstance(
FloatField().get_prep_value(lazy_func()),
float)
def test_ImageField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
ImageField().get_prep_value(lazy_func()),
six.text_type)
def test_IntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
IntegerField().get_prep_value(lazy_func()),
int)
def test_IPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_GenericIPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_NullBooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
NullBooleanField().get_prep_value(lazy_func()),
bool)
def test_PositiveIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveIntegerField().get_prep_value(lazy_func()),
int)
def test_PositiveSmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveSmallIntegerField().get_prep_value(lazy_func()),
int)
def test_SlugField(self):
lazy_func = lazy(lambda: 'slug', six.text_type)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
def test_SmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
SmallIntegerField().get_prep_value(lazy_func()),
int)
def test_TextField(self):
lazy_func = lazy(lambda: 'Abc', six.text_type)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
def test_TimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now().time(), datetime.time)
self.assertIsInstance(
TimeField().get_prep_value(lazy_func()),
datetime.time)
def test_URLField(self):
lazy_func = lazy(lambda: 'http://domain.com', six.text_type)
self.assertIsInstance(
URLField().get_prep_value(lazy_func()),
six.text_type)
class CustomFieldTests(unittest.TestCase):
def test_14786(self):
"""
Regression test for #14786 -- Test that field values are not prepared
twice in get_db_prep_lookup().
"""
class NoopField(models.TextField):
def __init__(self, *args, **kwargs):
self.prep_value_count = 0
super(NoopField, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
self.prep_value_count += 1
return super(NoopField, self).get_prep_value(value)
field = NoopField()
field.get_db_prep_lookup(
'exact', 'TEST', connection=connection, prepared=False
)
self.assertEqual(field.prep_value_count, 1)
|
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helpers related to multiprocessing."""
import atexit
import builtins
import itertools
import logging
import multiprocessing
import multiprocessing.dummy
import os
import sys
import threading
import traceback
DISABLE_ASYNC = os.environ.get('SUPERSIZE_DISABLE_ASYNC') == '1'
if DISABLE_ASYNC:
logging.debug('Running in synchronous mode.')
_all_pools = None
_is_child_process = False
_silence_exceptions = False
# Used to pass parameters to forked processes without pickling.
_fork_params = None
_fork_kwargs = None
class _ImmediateResult(object):
def __init__(self, value):
self._value = value
def get(self):
return self._value
def wait(self):
pass
def ready(self):
return True
def successful(self):
return True
class _ExceptionWrapper(object):
"""Used to marshal exception messages back to main process."""
def __init__(self, msg, exception_type=None):
self.msg = msg
self.exception_type = exception_type
def MaybeThrow(self):
if self.exception_type:
raise getattr(builtins,
self.exception_type)('Originally caused by: ' + self.msg)
class _FuncWrapper(object):
"""Runs on the fork()'ed side to catch exceptions and spread *args."""
def __init__(self, func):
global _is_child_process
_is_child_process = True
self._func = func
def __call__(self, index, _=None):
try:
return self._func(*_fork_params[index], **_fork_kwargs)
except Exception as e:
# Only keep the exception type for builtin exception types or else risk
# further marshalling exceptions.
exception_type = None
if type(e).__name__ in dir(builtins):
exception_type = type(e).__name__
# multiprocessing is supposed to catch and return exceptions automatically
# but it doesn't seem to work properly :(.
return _ExceptionWrapper(traceback.format_exc(), exception_type)
except: # pylint: disable=bare-except
return _ExceptionWrapper(traceback.format_exc())
class _WrappedResult(object):
"""Allows for host-side logic to be run after child process has terminated.
* Unregisters associated pool _all_pools.
* Raises exception caught by _FuncWrapper.
* Allows for custom unmarshalling of return value.
"""
def __init__(self, result, pool=None, decode_func=None):
self._result = result
self._pool = pool
self._decode_func = decode_func
def get(self):
self.wait()
value = self._result.get()
_CheckForException(value)
if not self._decode_func or not self._result.successful():
return value
return self._decode_func(value)
def wait(self):
self._result.wait()
if self._pool:
_all_pools.remove(self._pool)
self._pool = None
def ready(self):
return self._result.ready()
def successful(self):
return self._result.successful()
def _TerminatePools():
"""Calls .terminate() on all active process pools.
Not supposed to be necessary according to the docs, but seems to be required
when child process throws an exception or Ctrl-C is hit.
"""
global _silence_exceptions
_silence_exceptions = True
# Child processes cannot have pools, but atexit runs this function because
# it was registered before fork()ing.
if _is_child_process:
return
def close_pool(pool):
try:
pool.terminate()
except: # pylint: disable=bare-except
pass
for i, pool in enumerate(_all_pools):
# Without calling terminate() on a separate thread, the call can block
# forever.
thread = threading.Thread(
name='Pool-Terminate-{}'.format(i), target=close_pool, args=(pool, ))
thread.daemon = True
thread.start()
def _CheckForException(value):
if isinstance(value, _ExceptionWrapper):
global _silence_exceptions
if not _silence_exceptions:
value.MaybeThrow()
_silence_exceptions = True
logging.error('Subprocess raised an exception:\n%s', value.msg)
sys.exit(1)
def _MakeProcessPool(job_params, **job_kwargs):
global _all_pools
global _fork_params
global _fork_kwargs
assert _fork_params is None
assert _fork_kwargs is None
pool_size = min(len(job_params), multiprocessing.cpu_count())
_fork_params = job_params
_fork_kwargs = job_kwargs
ret = multiprocessing.Pool(pool_size)
_fork_params = None
_fork_kwargs = None
if _all_pools is None:
_all_pools = []
atexit.register(_TerminatePools)
_all_pools.append(ret)
return ret
def ForkAndCall(func, args, decode_func=None):
"""Runs |func| in a fork'ed process.
Returns:
A Result object (call .get() to get the return value)
"""
if DISABLE_ASYNC:
pool = None
result = _ImmediateResult(func(*args))
else:
pool = _MakeProcessPool([args]) # Omit |kwargs|.
result = pool.apply_async(_FuncWrapper(func), (0, ))
pool.close()
return _WrappedResult(result, pool=pool, decode_func=decode_func)
def BulkForkAndCall(func, arg_tuples, **kwargs):
"""Calls |func| in a fork'ed process for each set of args within |arg_tuples|.
Args:
kwargs: Common key word arguments to be passed to |func|.
Yields the return values as they come in.
"""
arg_tuples = list(arg_tuples)
if not len(arg_tuples):
return
if DISABLE_ASYNC:
for args in arg_tuples:
yield func(*args, **kwargs)
return
pool = _MakeProcessPool(arg_tuples, **kwargs)
wrapped_func = _FuncWrapper(func)
try:
for result in pool.imap_unordered(wrapped_func, range(len(arg_tuples))):
_CheckForException(result)
yield result
finally:
pool.close()
pool.join()
_all_pools.remove(pool)
def CallOnThread(func, *args, **kwargs):
"""Calls |func| on a new thread and returns a promise for its return value."""
if DISABLE_ASYNC:
return _ImmediateResult(func(*args, **kwargs))
pool = multiprocessing.dummy.Pool(1)
result = pool.apply_async(func, args=args, kwds=kwargs)
pool.close()
return result
def EncodeDictOfLists(d, key_transform=None, value_transform=None):
"""Serializes a dict where values are lists of strings.
Does not support '' as keys, nor [''] as values.
"""
assert '' not in d
assert [''] not in iter(d.values())
keys = iter(d)
if key_transform:
keys = (key_transform(k) for k in keys)
keys = '\x01'.join(keys)
if value_transform:
values = '\x01'.join(
'\x02'.join(value_transform(y) for y in x) for x in d.values())
else:
values = '\x01'.join('\x02'.join(x) for x in d.values())
return keys, values
def JoinEncodedDictOfLists(encoded_values):
assert isinstance(encoded_values, list), 'Does not work with generators'
return ('\x01'.join(x[0] for x in encoded_values if x[0]),
'\x01'.join(x[1] for x in encoded_values if x[1]))
def DecodeDictOfLists(encoded_keys_and_values,
key_transform=None,
value_transform=None):
"""Deserializes a dict where values are lists of strings."""
encoded_keys, encoded_values = encoded_keys_and_values
if not encoded_keys:
return {}
keys = encoded_keys.split('\x01')
if key_transform:
keys = (key_transform(k) for k in keys)
encoded_lists = encoded_values.split('\x01')
ret = {}
for key, encoded_list in zip(keys, encoded_lists):
if not encoded_list:
values = []
else:
values = encoded_list.split('\x02')
if value_transform:
for i in range(len(values)):
values[i] = value_transform(values[i])
ret[key] = values
return ret
EMPTY_ENCODED_DICT = EncodeDictOfLists({})
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from sarlacc.openstack.common.gettextutils import _
"""
import copy
import gettext
import logging.handlers
import os
import UserString
_localedir = os.environ.get('sarlacc'.upper() + '_LOCALEDIR')
_t = gettext.translation('sarlacc', localedir=_localedir, fallback=True)
def _(msg):
return _t.ugettext(msg)
def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
"""
gettext.install(domain,
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
unicode=True)
"""
Lazy gettext functionality.
The following is an attempt to introduce a deferred way
to do translations on messages in OpenStack. We attempt to
override the standard _() function and % (format string) operation
to build Message objects that can later be translated when we have
more information. Also included is an example LogHandler that
translates Messages to an associated locale, effectively allowing
many logs, each with their own locale.
"""
def get_lazy_gettext(domain):
"""Assemble and return a lazy gettext function for a given domain.
Factory method for a project/module to get a lazy gettext function
for its own translation domain (i.e. nova, glance, cinder, etc.)
"""
def _lazy_gettext(msg):
"""Create and return a Message object.
Message encapsulates a string so that we can translate it later when
needed.
"""
return Message(msg, domain)
return _lazy_gettext
class Message(UserString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
self._msg = msg
self._left_extra_msg = ''
self._right_extra_msg = ''
self.params = None
self.locale = None
self.domain = domain
@property
def data(self):
# NOTE(mrodden): this should always resolve to a unicode string
# that best represents the state of the message currently
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
if self.locale:
lang = gettext.translation(self.domain,
localedir=localedir,
languages=[self.locale],
fallback=True)
else:
# use system locale for translations
lang = gettext.translation(self.domain,
localedir=localedir,
fallback=True)
full_msg = (self._left_extra_msg +
lang.ugettext(self._msg) +
self._right_extra_msg)
if self.params is not None:
full_msg = full_msg % self.params
return unicode(full_msg)
def _save_parameters(self, other):
# we check for None later to see if
# we actually have parameters to inject,
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
else:
self.params = copy.deepcopy(other)
return self
# overrides to be more string-like
def __unicode__(self):
return self.data
def __str__(self):
return self.data.encode('utf-8')
def __getstate__(self):
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
'domain', 'params', 'locale']
new_dict = self.__dict__.fromkeys(to_copy)
for attr in to_copy:
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
return new_dict
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
# operator overloads
def __add__(self, other):
copied = copy.deepcopy(self)
copied._right_extra_msg += other.__str__()
return copied
def __radd__(self, other):
copied = copy.deepcopy(self)
copied._left_extra_msg += other.__str__()
return copied
def __mod__(self, other):
# do a format string to catch and raise
# any possible KeyErrors from missing parameters
self.data % other
copied = copy.deepcopy(self)
return copied._save_parameters(other)
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __getitem__(self, key):
return self.data[key]
def __getslice__(self, start, end):
return self.data.__getslice__(start, end)
def __getattribute__(self, name):
# NOTE(mrodden): handle lossy operations that we can't deal with yet
# These override the UserString implementation, since UserString
# uses our __class__ attribute to try and build a new message
# after running the inner data string through the operation.
# At that point, we have lost the gettext message id and can just
# safely resolve to a string instead.
ops = ['capitalize', 'center', 'decode', 'encode',
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
if name in ops:
return getattr(self.data, name)
else:
return UserString.UserString.__getattribute__(self, name)
class LocaleHandler(logging.Handler):
"""Handler that can have a locale associated to translate Messages.
A quick example of how to utilize the Message class above.
LocaleHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating the internal Message.
"""
def __init__(self, locale, target):
"""Initialize a LocaleHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
logging.Handler.__init__(self)
self.locale = locale
self.target = target
def emit(self, record):
if isinstance(record.msg, Message):
# set the locale and resolve to a string
record.msg.locale = self.locale
self.target.emit(record)
|
|
import numpy as np
from .utils import expand_array
import os
import json
from pkg_resources import resource_stream, Requirement
DEFAULT_START_YEAR = 2013
class Parameters(object):
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
PARAM_FILENAME = "params.json"
params_path = os.path.join(CUR_PATH, PARAM_FILENAME)
#Mapping of year to inflation rate
__rates = {2013:0.015, 2014:0.020, 2015:0.022, 2016:0.020, 2017:0.021,
2018:0.022, 2019:0.023, 2020:0.024, 2021:0.024, 2022:0.024,
2023:0.024, 2024:0.024}
@classmethod
def from_file(cls, file_name, **kwargs):
if file_name:
with open(file_name) as f:
params = json.loads(f.read())
else:
params = None
return cls(data=params, **kwargs)
def __init__(self, start_year=DEFAULT_START_YEAR, budget_years=12,
inflation_rate=None, inflation_rates=None, data=None,
**kwargs):
if inflation_rate and inflation_rates:
raise ValueError("Can only specify either one constant inflation"
" rate or a list of inflation rates")
self._inflation_rates = None
if inflation_rate:
self._inflation_rates = [inflation_rate] * budget_years
if inflation_rates:
assert len(inflation_rates) == budget_years
self._inflation_rates = [inflation_rates[start_year + i]
for i in range(0, budget_years)]
if not self._inflation_rates:
self._inflation_rates = [self.__rates[start_year + i]
for i in range(0, budget_years)]
self._current_year = start_year
self._start_year = start_year
self._budget_years = budget_years
if data:
self._vals = data
else:
self._vals = default_data(metadata=True)
# INITIALIZE
for name, data in self._vals.items():
cpi_inflated = data.get('cpi_inflated', False)
values = data['value']
setattr(self, name, expand_array(values,
inflate=cpi_inflated, inflation_rates=self._inflation_rates,
num_years=budget_years))
self.set_year(start_year)
def update(self, year_mods):
"""
Take a dictionary of year: {name:val} mods and set them on this Params object.
'year_mods' is a dictionary of year: mods where mods is a dict of key:value pairs
and key_cpi:Bool pairs. The key_cpi:Bool pairs indicate if the value for 'key'
should be inflated
Parameters:
----------
mods: dict
"""
if not all(isinstance(k, int) for k in year_mods.keys()):
raise ValueError("Every key must be a year, e.g. 2011, 2012, etc.")
defaults = default_data(metadata=True)
for year, mods in year_mods.items():
num_years_to_expand = (self.start_year + self.budget_years) - year
for name, values in mods.items():
if name.endswith("_cpi"):
continue
if name in defaults:
default_cpi = defaults[name].get('cpi_inflated', False)
else:
default_cpi = False
cpi_inflated = mods.get(name + "_cpi", default_cpi)
if year == self.start_year and year == self.current_year:
nval = expand_array(values,
inflate=cpi_inflated,
inflation_rates=self._inflation_rates,
num_years=num_years_to_expand)
setattr(self, name, nval)
elif year <= self.current_year and year >= self.start_year:
# advance until the parameter is in line with the current
# year
num_years_to_skip=self.current_year - year
inf_rates = [self.__rates[year + i]
for i in range(0, num_years_to_expand)]
nval = expand_array(values,
inflate=cpi_inflated,
inflation_rates=inf_rates,
num_years=num_years_to_expand)
if self.current_year > self.start_year:
cur_val = getattr(self, name)
offset = self.current_year - self.start_year
cur_val[offset:] = nval[num_years_to_skip:]
else:
setattr(self, name, nval[num_years_to_skip:])
else: # year > current_year
msg = ("Can't specify a parameter for a year that is in the"
" future because we don't know how to fill in the "
" values for the years between {0} and {1}.")
raise ValueError(msg.format(self.current_year, year))
# Set up the '_X = [a, b,...]' variables as 'X = a'
self.set_year(self._current_year)
@property
def current_year(self):
return self._current_year
@property
def start_year(self):
return self._start_year
@property
def budget_years(self):
return self._budget_years
def increment_year(self):
self._current_year += 1
self.set_year(self._current_year)
def set_year(self, yr):
for name, vals in self._vals.items():
arr = getattr(self, name)
setattr(self, name[1:], arr[yr-self._start_year])
def default_data(metadata=False, start_year=None):
""" Retreive of default parameters """
parampath = Parameters.params_path
if not os.path.exists(parampath):
path_in_egg = os.path.join("taxcalc", Parameters.PARAM_FILENAME)
buf = resource_stream(Requirement.parse("taxcalc"), path_in_egg)
_bytes = buf.read()
as_string = _bytes.decode("utf-8")
params = json.loads(as_string)
else:
with open(Parameters.params_path) as f:
params = json.load(f)
if start_year:
for k, v in params.items():
first_year = v.get('start_year', DEFAULT_START_YEAR)
assert isinstance(first_year, int)
if start_year < first_year:
msg = "Can't set a start year of {0}, because it is before {1}"
raise ValueError(msg.format(start_year, first_year))
#Set the new start year:
v['start_year'] = start_year
#Work with the values
vals = v['value']
last_year_for_data = first_year + len(vals) - 1
if last_year_for_data < start_year:
if v['row_label']:
v['row_label'] = ["2015"]
#Need to produce new values
new_val = vals[-1]
if v['cpi_inflated'] is True:
if isinstance(new_val, list):
for y in range(last_year_for_data, start_year):
new_val = [x * (1.0 + Parameters._Parameters__rates[y]) for x in new_val]
else:
for y in range(last_year_for_data, start_year):
new_val *= 1.0 + Parameters._Parameters__rates[y]
#Set the new values
v['value'] = [new_val]
else:
#Need to get rid of [first_year, ..., start_year-1] values
years_to_chop = start_year - first_year
if v['row_label']:
v['row_label'] = v['row_label'][years_to_chop:]
v['value'] = v['value'][years_to_chop:]
if (metadata):
return params
else:
return { k: v['value'] for k,v in params.items()}
|
|
#!/usr/bin/env python
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates speech transcription using the
Google Cloud API.
Usage Examples:
python beta_snippets.py transcription \
gs://python-docs-samples-tests/video/googlework_tiny.mp4
python beta_snippets.py video-text-gcs \
gs://python-docs-samples-tests/video/googlework_tiny.mp4
python beta_snippets.py streaming-labels resources/cat.mp4
python beta_snippets.py streaming-shot-change resources/cat.mp4
python beta_snippets.py streaming-objects resources/cat.mp4
python beta_snippets.py streaming-explicit-content resources/cat.mp4
python beta_snippets.py streaming-annotation-storage resources/cat.mp4 \
gs://mybucket/myfolder
python beta_snippets.py streaming-automl-classification resources/cat.mp4 \
$PROJECT_ID $MODEL_ID
python beta_snippets.py streaming-automl-object-tracking resources/cat.mp4 \
$PROJECT_ID $MODEL_ID
python beta_snippets.py streaming-automl-action-recognition \
resources/cat.mp4 $PROJECT_ID $MODEL_ID
"""
import argparse
import io
def speech_transcription(input_uri, timeout=180):
# [START video_speech_transcription_gcs_beta]
"""Transcribe speech from a video stored on GCS."""
from google.cloud import videointelligence_v1p1beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.SPEECH_TRANSCRIPTION]
config = videointelligence.SpeechTranscriptionConfig(
language_code="en-US", enable_automatic_punctuation=True
)
video_context = videointelligence.VideoContext(speech_transcription_config=config)
operation = video_client.annotate_video(
request={
"features": features,
"input_uri": input_uri,
"video_context": video_context,
}
)
print("\nProcessing video for speech transcription.")
result = operation.result(timeout)
# There is only one annotation_result since only
# one video is processed.
annotation_results = result.annotation_results[0]
for speech_transcription in annotation_results.speech_transcriptions:
# The number of alternatives for each transcription is limited by
# SpeechTranscriptionConfig.max_alternatives.
# Each alternative is a different possible transcription
# and has its own confidence score.
for alternative in speech_transcription.alternatives:
print("Alternative level information:")
print("Transcript: {}".format(alternative.transcript))
print("Confidence: {}\n".format(alternative.confidence))
print("Word level information:")
for word_info in alternative.words:
word = word_info.word
start_time = word_info.start_time
end_time = word_info.end_time
print(
"\t{}s - {}s: {}".format(
start_time.seconds + start_time.microseconds * 1e-6,
end_time.seconds + end_time.microseconds * 1e-6,
word,
)
)
# [END video_speech_transcription_gcs_beta]
def video_detect_text_gcs(input_uri):
# [START video_detect_text_gcs_beta]
"""Detect text in a video stored on GCS."""
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.TEXT_DETECTION]
operation = video_client.annotate_video(
request={"features": features, "input_uri": input_uri}
)
print("\nProcessing video for text detection.")
result = operation.result(timeout=300)
# The first result is retrieved because a single video was processed.
annotation_result = result.annotation_results[0]
# Get only the first result
text_annotation = annotation_result.text_annotations[0]
print("\nText: {}".format(text_annotation.text))
# Get the first text segment
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print(
"start_time: {}, end_time: {}".format(
start_time.seconds + start_time.microseconds * 1e-6,
end_time.seconds + end_time.microseconds * 1e-6,
)
)
print("Confidence: {}".format(text_segment.confidence))
# Show the result for the first frame in this segment.
frame = text_segment.frames[0]
time_offset = frame.time_offset
print(
"Time offset for the first frame: {}".format(
time_offset.seconds + time_offset.microseconds * 1e-6
)
)
print("Rotated Bounding Box Vertices:")
for vertex in frame.rotated_bounding_box.vertices:
print("\tVertex.x: {}, Vertex.y: {}".format(vertex.x, vertex.y))
# [END video_detect_text_gcs_beta]
return annotation_result.text_annotations
def video_detect_text(path):
# [START video_detect_text_beta]
"""Detect text in a local video."""
from google.cloud import videointelligence_v1p2beta1 as videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.Feature.TEXT_DETECTION]
video_context = videointelligence.VideoContext()
with io.open(path, "rb") as file:
input_content = file.read()
operation = video_client.annotate_video(
request={
"features": features,
"input_content": input_content,
"video_context": video_context,
}
)
print("\nProcessing video for text detection.")
result = operation.result(timeout=300)
# The first result is retrieved because a single video was processed.
annotation_result = result.annotation_results[0]
# Get only the first result
text_annotation = annotation_result.text_annotations[0]
print("\nText: {}".format(text_annotation.text))
# Get the first text segment
text_segment = text_annotation.segments[0]
start_time = text_segment.segment.start_time_offset
end_time = text_segment.segment.end_time_offset
print(
"start_time: {}, end_time: {}".format(
start_time.seconds + start_time.microseconds * 1e-6,
end_time.seconds + end_time.microseconds * 1e-6,
)
)
print("Confidence: {}".format(text_segment.confidence))
# Show the result for the first frame in this segment.
frame = text_segment.frames[0]
time_offset = frame.time_offset
print(
"Time offset for the first frame: {}".format(
time_offset.seconds + time_offset.microseconds * 1e-6
)
)
print("Rotated Bounding Box Vertices:")
for vertex in frame.rotated_bounding_box.vertices:
print("\tVertex.x: {}, Vertex.y: {}".format(vertex.x, vertex.y))
# [END video_detect_text_beta]
return annotation_result.text_annotations
def detect_labels_streaming(path):
# [START video_streaming_label_detection_beta]
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
# Set streaming config.
config = videointelligence.StreamingVideoConfig(
feature=(videointelligence.StreamingFeature.STREAMING_LABEL_DETECTION)
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
label_annotations = response.annotation_results.label_annotations
# label_annotations could be empty
if not label_annotations:
continue
for annotation in label_annotations:
# Each annotation has one frame, which has a timeoffset.
frame = annotation.frames[0]
time_offset = (
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)
description = annotation.entity.description
confidence = annotation.frames[0].confidence
# description is in Unicode
print(
u"{}s: {} (confidence: {})".format(time_offset, description, confidence)
)
# [END video_streaming_label_detection_beta]
def detect_shot_change_streaming(path):
# [START video_streaming_shot_change_detection_beta]
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
# Set streaming config.
config = videointelligence.StreamingVideoConfig(
feature=(videointelligence.StreamingFeature.STREAMING_SHOT_CHANGE_DETECTION)
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
for annotation in response.annotation_results.shot_annotations:
start = (
annotation.start_time_offset.seconds
+ annotation.start_time_offset.microseconds / 1e6
)
end = (
annotation.end_time_offset.seconds
+ annotation.end_time_offset.microseconds / 1e6
)
print("Shot: {}s to {}s".format(start, end))
# [END video_streaming_shot_change_detection_beta]
def track_objects_streaming(path):
# [START video_streaming_object_tracking_beta]
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
# Set streaming config.
config = videointelligence.StreamingVideoConfig(
feature=(videointelligence.StreamingFeature.STREAMING_OBJECT_TRACKING)
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=900)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
object_annotations = response.annotation_results.object_annotations
# object_annotations could be empty
if not object_annotations:
continue
for annotation in object_annotations:
# Each annotation has one frame, which has a timeoffset.
frame = annotation.frames[0]
time_offset = (
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)
description = annotation.entity.description
confidence = annotation.confidence
# track_id tracks the same object in the video.
track_id = annotation.track_id
# description is in Unicode
print("{}s".format(time_offset))
print(u"\tEntity description: {}".format(description))
print("\tTrack Id: {}".format(track_id))
if annotation.entity.entity_id:
print("\tEntity id: {}".format(annotation.entity.entity_id))
print("\tConfidence: {}".format(confidence))
# Every annotation has only one frame
frame = annotation.frames[0]
box = frame.normalized_bounding_box
print("\tBounding box position:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}\n".format(box.bottom))
# [END video_streaming_object_tracking_beta]
def detect_explicit_content_streaming(path):
# [START video_streaming_explicit_content_detection_beta]
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
# Set streaming config.
config = videointelligence.StreamingVideoConfig(
feature=(
videointelligence.StreamingFeature.STREAMING_EXPLICIT_CONTENT_DETECTION
)
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=900)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
for frame in response.annotation_results.explicit_annotation.frames:
time_offset = (
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)
pornography_likelihood = videointelligence.Likelihood(
frame.pornography_likelihood
)
print("Time: {}s".format(time_offset))
print("\tpornogaphy: {}".format(pornography_likelihood.name))
# [END video_streaming_explicit_content_detection_beta]
def annotation_to_storage_streaming(path, output_uri):
# [START video_streaming_annotation_to_storage_beta]
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
# output_uri = 'gs://path_to_output'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
# Set streaming config specifying the output_uri.
# The output_uri is the prefix of the actual output files.
storage_config = videointelligence.StreamingStorageConfig(
enable_storage_annotation_result=True,
annotation_result_storage_directory=output_uri,
)
# Here we use label detection as an example.
# All features support output to GCS.
config = videointelligence.StreamingVideoConfig(
feature=(videointelligence.StreamingFeature.STREAMING_LABEL_DETECTION),
storage_config=storage_config,
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
print("Storage URI: {}".format(response.annotation_results_uri))
# [END video_streaming_annotation_to_storage_beta]
def streaming_automl_classification(path, project_id, model_id):
# [START video_streaming_automl_classification_beta]
import io
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
# project_id = 'gcp_project_id'
# model_id = 'automl_classification_model_id'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
model_path = "projects/{}/locations/us-central1/models/{}".format(
project_id, model_id
)
# Here we use classification as an example.
automl_config = videointelligence.StreamingAutomlClassificationConfig(
model_name=model_path
)
video_config = videointelligence.StreamingVideoConfig(
feature=videointelligence.StreamingFeature.STREAMING_AUTOML_CLASSIFICATION,
automl_classification_config=automl_config,
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=video_config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
# Note: Input videos must have supported video codecs. See
# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
# for more details.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
for label in response.annotation_results.label_annotations:
for frame in label.frames:
print(
"At {:3d}s segment, {:5.1%} {}".format(
frame.time_offset.seconds,
frame.confidence,
label.entity.entity_id,
)
)
# [END video_streaming_automl_classification_beta]
def streaming_automl_object_tracking(path, project_id, model_id):
# [START video_streaming_automl_object_tracking_beta]
import io
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
# project_id = 'project_id'
# model_id = 'automl_object_tracking_model_id'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
model_path = "projects/{}/locations/us-central1/models/{}".format(
project_id, model_id
)
automl_config = videointelligence.StreamingAutomlObjectTrackingConfig(
model_name=model_path
)
video_config = videointelligence.StreamingVideoConfig(
feature=videointelligence.StreamingFeature.STREAMING_AUTOML_OBJECT_TRACKING,
automl_object_tracking_config=automl_config,
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=video_config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
# Note: Input videos must have supported video codecs. See
# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
# for more details.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
def stream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=900)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
object_annotations = response.annotation_results.object_annotations
# object_annotations could be empty
if not object_annotations:
continue
for annotation in object_annotations:
# Each annotation has one frame, which has a timeoffset.
frame = annotation.frames[0]
time_offset = (
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)
description = annotation.entity.description
confidence = annotation.confidence
# track_id tracks the same object in the video.
track_id = annotation.track_id
# description is in Unicode
print("{}s".format(time_offset))
print(u"\tEntity description: {}".format(description))
print("\tTrack Id: {}".format(track_id))
if annotation.entity.entity_id:
print("\tEntity id: {}".format(annotation.entity.entity_id))
print("\tConfidence: {}".format(confidence))
# Every annotation has only one frame
frame = annotation.frames[0]
box = frame.normalized_bounding_box
print("\tBounding box position:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}\n".format(box.bottom))
# [END video_streaming_automl_object_tracking_beta]
def streaming_automl_action_recognition(path, project_id, model_id):
# [START video_streaming_automl_action_recognition_beta]
import io
from google.cloud import videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
# project_id = 'project_id'
# model_id = 'automl_action_recognition_model_id'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
model_path = "projects/{}/locations/us-central1/models/{}".format(
project_id, model_id
)
automl_config = videointelligence.StreamingAutomlActionRecognitionConfig(
model_name=model_path
)
video_config = videointelligence.StreamingVideoConfig(
feature=videointelligence.StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION,
automl_action_recognition_config=automl_config,
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=video_config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
def stream_generator():
yield config_request
# Load file content.
# Note: Input videos must have supported video codecs. See
# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
# for more details.
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
yield videointelligence.StreamingAnnotateVideoRequest(
input_content=data
)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the video.
responses = client.streaming_annotate_video(requests, timeout=900)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
for label in response.annotation_results.label_annotations:
for frame in label.frames:
print(
"At {:3d}s segment, {:5.1%} {}".format(
frame.time_offset.seconds,
frame.confidence,
label.entity.entity_id,
)
)
# [END video_streaming_automl_action_recognition_beta]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(dest="command")
speech_transcription_parser = subparsers.add_parser(
"transcription", help=speech_transcription.__doc__
)
speech_transcription_parser.add_argument("gcs_uri")
video_text_gcs_parser = subparsers.add_parser(
"video-text-gcs", help=video_detect_text_gcs.__doc__
)
video_text_gcs_parser.add_argument("gcs_uri")
video_text_parser = subparsers.add_parser(
"video-text", help=video_detect_text.__doc__
)
video_text_parser.add_argument("path")
video_streaming_labels_parser = subparsers.add_parser(
"streaming-labels", help=detect_labels_streaming.__doc__
)
video_streaming_labels_parser.add_argument("path")
video_streaming_shot_change_parser = subparsers.add_parser(
"streaming-shot-change", help=detect_shot_change_streaming.__doc__
)
video_streaming_shot_change_parser.add_argument("path")
video_streaming_objects_parser = subparsers.add_parser(
"streaming-objects", help=track_objects_streaming.__doc__
)
video_streaming_objects_parser.add_argument("path")
video_streaming_explicit_content_parser = subparsers.add_parser(
"streaming-explicit-content", help=detect_explicit_content_streaming.__doc__
)
video_streaming_explicit_content_parser.add_argument("path")
video_streaming_annotation_to_storage_parser = subparsers.add_parser(
"streaming-annotation-storage", help=annotation_to_storage_streaming.__doc__
)
video_streaming_annotation_to_storage_parser.add_argument("path")
video_streaming_annotation_to_storage_parser.add_argument("output_uri")
video_streaming_automl_classification_parser = subparsers.add_parser(
"streaming-automl-classification", help=streaming_automl_classification.__doc__
)
video_streaming_automl_classification_parser.add_argument("path")
video_streaming_automl_classification_parser.add_argument("project_id")
video_streaming_automl_classification_parser.add_argument("model_id")
video_streaming_automl_object_tracking_parser = subparsers.add_parser(
"streaming-automl-object-tracking",
help=streaming_automl_object_tracking.__doc__,
)
video_streaming_automl_object_tracking_parser.add_argument("path")
video_streaming_automl_object_tracking_parser.add_argument("project_id")
video_streaming_automl_object_tracking_parser.add_argument("model_id")
video_streaming_automl_action_recognition_parser = subparsers.add_parser(
"streaming-automl-action-recognition",
help=streaming_automl_action_recognition.__doc__,
)
video_streaming_automl_action_recognition_parser.add_argument("path")
video_streaming_automl_action_recognition_parser.add_argument("project_id")
video_streaming_automl_action_recognition_parser.add_argument("model_id")
args = parser.parse_args()
if args.command == "transcription":
speech_transcription(args.gcs_uri)
elif args.command == "video-text-gcs":
video_detect_text_gcs(args.gcs_uri)
elif args.command == "video-text":
video_detect_text(args.path)
elif args.command == "streaming-labels":
detect_labels_streaming(args.path)
elif args.command == "streaming-shot-change":
detect_shot_change_streaming(args.path)
elif args.command == "streaming-objects":
track_objects_streaming(args.path)
elif args.command == "streaming-explicit-content":
detect_explicit_content_streaming(args.path)
elif args.command == "streaming-annotation-storage":
annotation_to_storage_streaming(args.path, args.output_uri)
elif args.command == "streaming-automl-classification":
streaming_automl_classification(args.path, args.project_id, args.model_id)
elif args.command == "streaming-automl-object-tracking":
streaming_automl_object_tracking(args.path, args.project_id, args.model_id)
elif args.command == "streaming-automl-action-recognition":
streaming_automl_action_recognition(args.path, args.project_id, args.model_id)
|
|
from test.support import run_unittest, check_warnings
import cgi
import os
import sys
import tempfile
import unittest
from io import StringIO, BytesIO
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __eq__(self, anExc):
if not isinstance(anExc, Exception):
return NotImplemented
return (self.err.__class__ == anExc.__class__ and
self.err.args == anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = BytesIO(buf.encode('latin-1')) # FieldStorage expects bytes
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError("unknown method: %s" % method)
try:
return cgi.parse(fp, env, strict_parsing=1)
except Exception as err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(seq):
return sorted(seq, key=repr)
def first_elts(list):
return [p[0] for p in list]
def first_second_elts(list):
return [(p[0], p[1][0]) for p in list]
def gen_result(data, environ):
encoding = 'latin-1'
fake_stdin = BytesIO(data.encode(encoding))
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ, encoding=encoding)
result = {}
for k, v in dict(form).items():
result[k] = isinstance(v, list) and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s method GET" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s method POST" % repr(orig))
env = {'QUERY_STRING': orig}
fs = cgi.FieldStorage(environ=env)
if isinstance(expect, dict):
# test dict interface
self.assertEqual(len(expect), len(fs))
self.assertCountEqual(expect.keys(), fs.keys())
##self.assertEqual(norm(expect.values()), norm(fs.values()))
##self.assertEqual(norm(expect.items()), norm(fs.items()))
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assertIn(key, fs)
if len(expect_val) > 1:
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertEqual(fs.getvalue(key), expect_val[0])
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
def log_cleanup():
"""Restore the global state of the log vars."""
cgi.logfile = ''
cgi.logfp.close()
cgi.logfp = None
cgi.log = cgi.initlog
self.addCleanup(log_cleanup)
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile("wb+"))
self.addCleanup(f.close)
f.write(b'x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
self.addCleanup(fs.file.close)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assertTrue(f.numcalls > 2)
f.close()
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH': '558'}
fp = BytesIO(POSTDATA.encode('latin-1'))
fs = cgi.FieldStorage(fp, environ=env, encoding="latin-1")
self.assertEqual(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':b'Testing 123.\n'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
def test_fieldstorage_multipart_non_ascii(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary={}'.format(BOUNDARY),
'CONTENT_LENGTH':'558'}
for encoding in ['iso-8859-1','utf-8']:
fp = BytesIO(POSTDATA_NON_ASCII.encode(encoding))
fs = cgi.FieldStorage(fp, environ=env,encoding=encoding)
self.assertEqual(len(fs.list), 1)
expect = [{'name':'id', 'filename':None, 'value':'\xe7\xf1\x80'}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': b'this is the content of the fake file\n'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qs is deprecated, use urllib.parse.'
'parse_qs instead', DeprecationWarning)):
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urllib.parse, this is just a sanity check
with check_warnings(('cgi.parse_qsl is deprecated, use urllib.parse.'
'parse_qsl instead', DeprecationWarning)):
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_parse_header(self):
self.assertEqual(
cgi.parse_header("text/plain"),
("text/plain", {}))
self.assertEqual(
cgi.parse_header("text/vnd.just.made.this.up ; "),
("text/vnd.just.made.this.up", {}))
self.assertEqual(
cgi.parse_header("text/plain;charset=us-ascii"),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"'),
("text/plain", {"charset": "us-ascii"}))
self.assertEqual(
cgi.parse_header('text/plain ; charset="us-ascii"; another=opt'),
("text/plain", {"charset": "us-ascii", "another": "opt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="silly.txt"'),
("attachment", {"filename": "silly.txt"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name"'),
("attachment", {"filename": "strange;name"}))
self.assertEqual(
cgi.parse_header('attachment; filename="strange;name";size=123;'),
("attachment", {"filename": "strange;name", "size": "123"}))
self.assertEqual(
cgi.parse_header('form-data; name="files"; filename="fo\\"o;bar"'),
("form-data", {"name": "files", "filename": 'fo"o;bar'}))
BOUNDARY = "---------------------------721837373350705526688164684"
POSTDATA = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
POSTDATA_NON_ASCII = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
\xe7\xf1\x80
-----------------------------721837373350705526688164684
"""
def test_main():
run_unittest(CgiTests)
if __name__ == '__main__':
test_main()
|
|
# -*- coding: utf-8 -*-
import os
import sys
import cv2
import numpy as np
from datetime import datetime
from keras.optimizers import SGD
from keras.layers import Input, merge, ZeroPadding2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
import keras.backend as K
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from sklearn.metrics import log_loss
from scale_layer import Scale
SCENE_MODEL_SAVE_PATH = "/home/yan/Desktop/QlabChallengerRepo/ai_challenger_scene/densenet121"
def densenet121_model(img_rows, img_cols, color_type=1, nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, num_classes=None):
'''
DenseNet 121 Model for Keras
Model Schema is based on
https://github.com/flyyufelix/DenseNet-Keras
ImageNet Pretrained Weights
Theano: https://drive.google.com/open?id=0Byy2AcGyEVxfMlRYb3YzV210VzQ
TensorFlow: https://drive.google.com/open?id=0Byy2AcGyEVxfSTA4SHJVOHNuTXc
# Arguments
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters
reduction: reduction factor of transition blocks.
dropout_rate: dropout rate
weight_decay: weight decay factor
classes: optional number of classes to classify images
weights_path: path to pre-trained weights
# Returns
A Keras model instance.
'''
eps = 1.1e-5
# compute compression factor
compression = 1.0 - reduction
# Handle Dimension Ordering for different backends
global concat_axis
if K.image_dim_ordering() == 'tf':
concat_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='data')
else:
concat_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='data')
# From architecture for ImageNet (Table 1 in the paper)
nb_filter = 64
nb_layers = [6,12,24,16] # For DenseNet-121
# Initial convolution
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
x = Scale(axis=concat_axis, name='conv1_scale')(x)
x = Activation('relu', name='relu1')(x)
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
stage = block_idx+2
x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Add transition_block
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
final_stage = stage + 1
x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
x_newfc = Dense(num_classes, name='fc6')(x_newfc)
x_newfc = Activation('softmax', name='prob')(x_newfc)
model = Model(img_input, x_newfc)
if K.image_dim_ordering() == 'th':
# Use pre-trained weights for Theano backend
weights_path = 'densenet121/densenet121_weights_th.h5'
else:
# Use pre-trained weights for Tensorflow backend
weights_path = 'densenet121/DENSENET121_MODEL_WEIGHTS.01-0.74789.hdf5'
model.load_weights(weights_path, by_name=True)
# Learning rate is changed to 0.001
sgd = SGD(lr=1e-5, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_' + str(branch)
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_blk'
relu_name_base = 'relu' + str(stage) + '_blk'
pool_name_base = 'pool' + str(stage)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
x = Activation('relu', name=relu_name_base)(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
return x
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter
if __name__ == '__main__':
img_rows, img_cols = 224, 224 # Resolution of inputs
channel = 3
num_classes = 80
batch_size = 8
nb_epoch = 15
nb_train_samples = 53880
nb_validation_samples = 7120
# Load our model
model = densenet121_model(img_rows=img_rows, img_cols=img_cols, color_type=channel, num_classes=num_classes,dropout_rate=0.2)
#classes
our_class = []
for i in range(num_classes):
our_class.append(str(i))
# data arguement
train_datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
'/home/yan/Desktop/QlabChallengerRepo/dataset_224/data/train/',
target_size=(img_rows,img_cols),
batch_size=batch_size,
classes=our_class)
validation_generator = test_datagen.flow_from_directory(
'/home/yan/Desktop/QlabChallengerRepo/dataset_224/data/valid/',
target_size=(img_rows,img_cols),
batch_size=batch_size,
classes=our_class)
#print(train_generator.class_indices)
#print(validation_generator.class_indices)
# Callback
checkpointer = ModelCheckpoint(filepath='/home/yan/Desktop/QlabChallengerRepo/ai_challenger_scene/densenet121/DENSENET121_MODEL_WEIGHTS.{epoch:02d}-{val_acc:.5f}.hdf5',
monitor='val_acc',
verbose=1,
save_weights_only= True,
save_best_only=False)
# Start Fine-tuning
model.fit_generator(train_generator,
steps_per_epoch=nb_train_samples//batch_size,
epochs=nb_epoch,
shuffle=True,
verbose=1,
callbacks=[checkpointer],
validation_data=validation_generator,
validation_steps=nb_validation_samples//batch_size)
CURRENT_TIME = "DENSENET121_MODEL_WEIGHTS_"+datetime.now().strftime('%Y_%m_%d_%H_%M_%S')+".h5"
CURRENT_SCENE_MODEL_SAVE_PATH = os.path.join(SCENE_MODEL_SAVE_PATH, CURRENT_TIME)
model.save_weights(CURRENT_SCENE_MODEL_SAVE_PATH)
|
|
"""
file: components.py
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
try:
import Tkinter as tkinter
except ImportError:
import tkinter
try:
import ttk
except ImportError:
from tkinter import ttk
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
try:
from tkFont import Font
except ImportError:
from tkinter.font import Font
try:
import tkFileDialog
tkinter.filedialog = tkFileDialog
except ImportError:
import tkinter.filedialog
try:
import tkMessageBox
tkinter.messagebox = tkMessageBox
except ImportError:
import tkinter.messagebox
import os.path
import multiprocessing
import time
import codecs
import shutil
import logging
import sem
import sem.wapiti, sem.exporters.conll
import sem.importers
from sem.storage import Holder, Document, SEMCorpus
from sem.modules.tagger import load_master, main as tagger
from sem.modules import EnrichModule, WapitiLabelModule
from sem.logger import default_handler
from sem.storage import Annotation
from sem.storage.annotation import str2filter
from sem.storage.document import str2docfilter
from sem.misc import find_suggestions
class SemTkMasterSelector(ttk.Frame):
def __init__(self, root, resource_dir, lang="fr"):
ttk.Frame.__init__(self, root)
self.resource_dir = resource_dir
self._lang = None
langs = os.listdir(os.path.join(self.resource_dir, "master"))
if langs:
self._lang = (lang if lang in langs else langs[0])
self.items = (os.listdir(os.path.join(self.resource_dir, "master", self._lang)) if self._lang else [])
self.items.sort(key=lambda x: x.lower())
max_length = max([len(item) for item in self.items])
self.select_workflow_label = ttk.Label(root, text=u"select workflow:")
#strVar = tkinter.StringVar()
self.masters = tkinter.Listbox(root, width=max_length+1, height=len(self.items))#, textvariable=strVar)
for item in self.items:
self.masters.insert(tkinter.END, item)
def pack(self):
self.select_workflow_label.pack()
self.masters.pack()
def grid(self, row=0, column=0):
x = row
y = column
self.select_workflow_label.grid(row=x, column=y)
x += 1
self.masters.grid(row=x, column=y)
x += 1
return (x,y)
def workflow(self):
wf = self.masters.get(tkinter.ACTIVE)
return os.path.join(self.resource_dir, "master", self.lang(), wf) or None
def lang(self):
return self._lang
def set_lang(self, language):
self._lang = language
self.items = os.listdir(os.path.join(self.resource_dir, "master", self._lang))
self.items.sort(key=lambda x: x.lower())
max_length = max([len(item) for item in self.items] + [0])
self.masters["height"] = len(self.items)
self.masters.delete(0, tkinter.END)
for item in self.items:
self.masters.insert(tkinter.END, item)
class SemTkLangSelector(ttk.Frame):
def __init__(self, root, resource_dir):
ttk.Frame.__init__(self, root)
self.master_selector = None
self.resource_dir = resource_dir
self.items = os.listdir(os.path.join(self.resource_dir, "master"))
self.cur_lang = tkinter.StringVar()
self.select_lang_label = ttk.Label(root, text=u"select language:")
self.langs = ttk.Combobox(root, textvariable=self.cur_lang)
self.langs["values"] = self.items
self.langs.current(0)
for i, item in enumerate(self.items):
if item == "fr":
self.langs.current(i)
self.langs.bind("<<ComboboxSelected>>", self.select_lang)
def pack(self):
self.select_lang_label.pack()
self.langs.pack()
def grid(self, row=0, column=0):
x = row
y = column
self.select_lang_label.grid(row=x, column=y)
x += 1
self.langs.grid(row=x, column=y)
x += 1
return (x,y)
def lang(self):
return self.cur_lang.get()
def select_lang(self, event):
self.master_selector.set_lang(self.lang())
class SemTkFileSelector(ttk.Frame):
def __init__(self, root, main_window, button_opt):
ttk.Frame.__init__(self, root)
self.root = root
self.main_window = main_window
self.current_files = None
self.button_opt = button_opt
# define options for opening or saving a file
self.file_opt = options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['initialdir'] = os.path.expanduser("~")
options['parent'] = root
options['title'] = 'Select files to annotate.'
self.file_selector_button = ttk.Button(self.root, text=u"select file(s)", command=self.filenames)
self.label = ttk.Label(self.root, text=u"selected file(s):")
self.fa_search = tkinter.PhotoImage(file=os.path.join(self.main_window.resource_dir, "images", "fa_search_24_24.gif"))
self.file_selector_button.config(image=self.fa_search, compound=tkinter.LEFT)
self.scrollbar = ttk.Scrollbar(self.root)
self.selected_files = tkinter.Listbox(self.root, yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.selected_files.yview)
def pack(self):
self.file_selector_button.pack(**self.button_opt)
self.label.pack()
self.scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.selected_files.pack(fill=tkinter.BOTH, expand=True)
def grid(self, row=0, column=0):
"""
TODO: to be tested
"""
x = row
y = column
self.file_selector_button.grid(row=x, column=y, **self.button_opt)
x += 1
self.label.grid(row=x, column=y)
x += 1
self.scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.selected_files.grid(row=x, column=y, fill=tkinter.BOTH, expand=True)
x += 1
return (x,y)
def filenames(self, event=None):
self.current_files = list(tkinter.filedialog.askopenfilenames(**self.file_opt))
self.current_files.sort(key=lambda x:x.lower())
self.selected_files.delete(0, tkinter.END)
if self.current_files:
for current_file in self.current_files:
self.selected_files.insert(tkinter.END, os.path.basename(current_file))
self.file_opt['initialdir'] = os.path.dirname(self.current_files[0])
def files(self):
return self.current_files or []
class SemTkExportSelector(ttk.Frame):
def __init__(self, root):
ttk.Frame.__init__(self, root)
self.root = root
self.label = ttk.Label(self.root, text=u"select output format:")
self.export_formats = ["default"] + [exporter[:-3] for exporter in os.listdir(os.path.join(sem.SEM_HOME, "exporters")) if (exporter.endswith(".py") and not exporter.startswith("_") and not exporter == "exporter.py")]
self.export_combobox = ttk.Combobox(self.root)
self.export_combobox["values"] = self.export_formats
self.export_combobox.current(0)
def pack(self):
self.label.pack()
self.export_combobox.pack()
def grid(self, row=0, column=0):
x = row
y = column
self.label.grid(row=x, column=y)
x += 1
self.export_combobox.grid(row=x, column=y)
x += 1
return (x,y)
def export_format(self):
return self.export_combobox.get()
class SEMTkWapitiTrain(ttk.Frame):
def __init__(self, file_selector, master, annotation_name, annotation_level=None, document_filter=None, top=None, main_frame=None, text="Algorithm-specific variables", log_level="INFO"):
if top:
self.trainTop = top
else:
self.trainTop = tkinter.Toplevel()
self.file_selector = file_selector
self.master = master
self.annotation_name = annotation_name
self.annotation_level = annotation_level or tkinter.StringVar(self.trainTop, value="top level")
self.document_filter = document_filter or tkinter.StringVar(self.trainTop, value="all documents")
self.log_level = log_level
self.wapiti_train_logger = logging.getLogger("sem.wapiti_train")
self.wapiti_train_logger.addHandler(default_handler)
self.wapiti_train_logger.setLevel(self.log_level)
self.current_train = os.path.join(sem.SEM_DATA_DIR, "train")
if not os.path.exists(self.current_train):
os.makedirs(self.current_train)
self.main_frame = main_frame or self.trainTop
self.trainTop.focus_set()
self.CRF_algorithm_var = tkinter.StringVar(self.trainTop, value="rprop")
self.CRF_l1_var = tkinter.StringVar(self.trainTop, value="0.5")
self.CRF_l2_var = tkinter.StringVar(self.trainTop, value="0.0001")
self.CRF_nprocs_var = tkinter.StringVar(self.trainTop, value="1")
self.pattern_label_var = tkinter.StringVar(self.trainTop, value="")
self.compact_var = tkinter.IntVar()
self.compact_var.set(1)
algsFrame = ttk.LabelFrame(self.trainTop, text=text)
algsFrame.pack(fill="both", expand="yes")
crf_cur_row = 0
ttk.Button(algsFrame, text='pattern (optional)', command=self.select_file).grid(row=crf_cur_row, column=0, sticky=tkinter.W)
self.pattern_label = tkinter.Label(algsFrame, textvariable=self.pattern_label_var)
self.pattern_label.grid(row=crf_cur_row, column=1, sticky=tkinter.W)
crf_cur_row += 1
tkinter.Label(algsFrame, text='algorithm').grid(row=crf_cur_row, column=0, sticky=tkinter.W)
CRF_algorithmValue = ttk.Combobox(algsFrame, textvariable=self.CRF_algorithm_var)
CRF_algorithmValue["values"] = [u"rprop", u"l-bfgs", u"sgd-l1", u"bcd", u"rprop+", u"rprop-"]
CRF_algorithmValue.current(0)
CRF_algorithmValue.grid(row=crf_cur_row, column=1)
crf_cur_row += 1
tkinter.Label(algsFrame, text='l1').grid(row=crf_cur_row, column=0, sticky=tkinter.W)
CRF_algorithmValue = tkinter.Entry(algsFrame, textvariable=self.CRF_l1_var)
CRF_algorithmValue.grid(row=crf_cur_row, column=1)
crf_cur_row += 1
tkinter.Label(algsFrame, text='l2').grid(row=crf_cur_row, column=0, sticky=tkinter.W)
CRF_algorithmValue = tkinter.Entry(algsFrame, textvariable=self.CRF_l2_var)
CRF_algorithmValue.grid(row=crf_cur_row, column=1)
crf_cur_row += 1
tkinter.Label(algsFrame, text='number of processors').grid(row=crf_cur_row, column=0, sticky=tkinter.W)
CRF_nprocsValue = ttk.Combobox(algsFrame, textvariable=self.CRF_nprocs_var)
CRF_nprocsValue["values"] = list(range(1, multiprocessing.cpu_count()+1))
CRF_nprocsValue.current(0)
CRF_nprocsValue.grid(row=crf_cur_row, column=1)
crf_cur_row += 1
compact_btn = ttk.Checkbutton(algsFrame, text="compact model", variable=self.compact_var).grid(row=crf_cur_row, column=0, sticky=tkinter.W)
crf_cur_row += 1
CRF_trainButton = tkinter.Button(algsFrame, text="train", command=self.trainCRF)
CRF_trainButton.grid(row=crf_cur_row, column=0)
crf_cur_row += 1
def select_file(self, event=None):
options = {}
options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['initialdir'] = os.path.join(sem.SEM_DATA_DIR, "resources", "patterns")
options['parent'] = self.trainTop
options['title'] = 'Select pattern file.'
pattern = tkinter.filedialog.askopenfilename(**options)
self.pattern_label_var.set(pattern)
def algorithm(self):
return self.CRF_algorithm_var.get()
def l1(self):
return float(self.CRF_l1_var.get())
def l2(self):
return float(self.CRF_l2_var.get())
def nprocs(self):
return int(self.CRF_nprocs_var.get())
def pattern(self):
return self.pattern_label_var.get()
def compact(self):
return bool(self.compact_var.get())
def trainCRF(self, events=None):
exporter = sem.exporters.conll.Exporter()
alg = self.algorithm()
l1 = self.l1()
l2 = self.l2()
pattern = self.pattern()
nprocs = self.nprocs()
compact = self.compact()
masterfile = self.master.workflow()
export_format = "conll"
pipeline, workflow_options, exporter, couples = load_master(masterfile, force_format=export_format, pipeline_mode="train")
#pipeline.pipeline_mode = "train"
annotation_level = str2filter[self.annotation_level.get()]
document_filter = str2docfilter[self.document_filter.get()]
target_model = None
pipes = [pipe for pipe in pipeline]
for pipe in reversed(pipes):
if isinstance(pipe, EnrichModule):
pipe.mode = "train"
self.annotation_name = pipe.informations.aentries[-1].name
pipe.mode = "label"
break
elif isinstance(pipe, WapitiLabelModule):
self.annotation_name = pipe.field
target_model = pipe.model
break
out_dir = None
if target_model:
out_dir, name = os.path.split(target_model)
try:
os.makedirs(out_dir)
except OSError: # aleady exists
pass
except FileExistsError: # python3
pass
timestamp = time.strftime("%Y%m%d%H%M%S")
output_dir = os.path.join(self.current_train, timestamp)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
train_file = os.path.join(output_dir, "train.conll")
model_file = os.path.join(output_dir, "model.txt")
try:
files = self.file_selector.files()
except:
files = self.file_selector
fields = []
names = set()
with codecs.open(train_file, "w", "utf-8") as O:
for filename in files:
document = sem.importers.load(filename, encoding="utf-8", tagset_name=self.annotation_name)
args = Holder(**{"infiles":[document], "pipeline":pipeline, "options":workflow_options, "exporter":None, "couples":None})
if isinstance(document, SEMCorpus):
for doc in document:
if doc.name in names:
self.wapiti_train_logger.warn("document %s already found, skipping", doc.name)
continue
elif not document_filter(doc, self.annotation_name):
self.wapiti_train_logger.warn("document %s has no annotations, skipping", doc.name)
continue
args.infiles = [doc]
doc = tagger(args)[0]
if self.annotation_name is not None:
doc.add_to_corpus(self.annotation_name, filter=annotation_level)
doc.corpus.write(O)
if not fields:
fields = doc.corpus.fields[:-1]
else:
if document.name in names:
self.wapiti_train_logger.warn("document %s already found, skipping", document.name)
continue
elif not document_filter(document, self.annotation_name):
self.wapiti_train_logger.warn("document %s has no annotations, skipping", document.name)
continue
document = tagger(args)[0]
if self.annotation_name is not None:
document.add_to_corpus(self.annotation_name, filter=annotation_level)
document.corpus.write(O)
if not fields:
fields = document.corpus.fields[:-1]
pattern_file = os.path.join(output_dir, "pattern.txt")
if pattern:
shutil.copy(pattern, pattern_file)
else:
with codecs.open(os.path.join(output_dir, "pattern.txt"), "w", "utf-8") as O:
O.write("u\n\n")
for i, field in enumerate(fields):
for shift in range(-2,3):
O.write("u:{0} {1:+d}=%x[{1},{2}]\n".format(field, shift, i))
O.write(u"\n")
O.write("b\n")
with codecs.open(os.path.join(output_dir, "config.txt"), "w", "utf-8") as O:
O.write(u"algorithm\t{0}\n".format(alg))
O.write(u"l1\t{0}\n".format(l1))
O.write(u"l2\t{0}\n".format(l2))
O.write(u"number of processors\t{0}\n".format(nprocs))
O.write(u"compact\t{0}\n".format(compact))
sem.wapiti.train(train_file, pattern=pattern_file, output=model_file, algorithm=alg, rho1=l1, rho2=l2, nthreads=nprocs, compact=compact)
model_update_message = "\n\nNo candidate location found, model update has to be done manually"
if target_model:
if os.path.exists(target_model):
bname, ext = os.path.splitext(name)
backup_name = "{}.backup-{}{}".format(bname, timestamp, ext)
dest = os.path.join(out_dir, backup_name)
self.wapiti_train_logger.info('creating backup file before moving: %s', dest)
shutil.move(target_model, dest)
self.wapiti_train_logger.info('trained model moved to: %s', out_dir)
model_update_message = "\n\nTrained model moved to: {0}".format(out_dir)
shutil.copy(model_file, target_model)
self.wapiti_train_logger.info("files are located in: " + output_dir)
tkinter.messagebox.showinfo("training SEM", "Everything went ok! files are located in: {0}{1}".format(output_dir, model_update_message))
if self.main_frame:
self.main_frame.destroy()
else:
self.trainTop.destroy()
class SEMTkTrainInterface(ttk.Frame):
def __init__(self, documents, lang=None, master=None):
self.documents = documents
self._lang = lang
self._master = master
if not (master and lang):
self._master = None
self._lang = None
self.master_selector = None
self.lang_selector = None
trainTop = tkinter.Toplevel()
trainTop.focus_set()
varsFrame = ttk.LabelFrame(trainTop, text="Global variables")
document_filter_label = ttk.Label(varsFrame, text=u"document filter:")
document_filter_var = tkinter.StringVar(varsFrame, value=u"all documents")
document_filter = ttk.Combobox(varsFrame, textvariable=document_filter_var)
document_filter["values"] = sorted(str2docfilter.keys())
document_filter.current(sorted(str2docfilter.keys()).index(u"all documents"))
annotation_level_label = ttk.Label(varsFrame, text=u"annotation level:")
annotation_level_var = tkinter.StringVar(varsFrame, value="top level")
annotation_level = ttk.Combobox(varsFrame, textvariable=annotation_level_var)
annotation_level["values"] = sorted(str2filter.keys())
annotation_level.current(sorted(str2filter.keys()).index("top level"))
if not self._master:
self.master_selector = SemTkMasterSelector(varsFrame, os.path.join(sem.SEM_DATA_DIR, "resources"))
if not self._lang:
self.lang_selector = SemTkLangSelector(varsFrame, os.path.join(sem.SEM_DATA_DIR, "resources"))
self.lang_selector.master_selector = self.master_selector
algsFrame = ttk.LabelFrame(trainTop, text="Algorithm-specific variables")
notebook = ttk.Notebook(algsFrame)
frame1 = ttk.Frame(notebook)
frame2 = ttk.Frame(notebook)
notebook.add(frame1, text='CRF')
notebook.add(frame2, text='NN')
frame1.resource_dir = os.path.join(sem.SEM_DATA_DIR, "resources")
varsFrame.pack(fill="both", expand="yes")
vars_cur_row = 0
document_filter_label.grid(row=vars_cur_row, column=0)
vars_cur_row += 1
document_filter.grid(row=vars_cur_row, column=0)
vars_cur_row += 1
annotation_level_label.grid(row=vars_cur_row, column=0)
vars_cur_row += 1
annotation_level.grid(row=vars_cur_row, column=0)
vars_cur_row += 1
if self.lang_selector:
vars_cur_row, _ = self.lang_selector.grid(row=vars_cur_row, column=0)
if self.master_selector:
vars_cur_row, _ = self.master_selector.grid(row=vars_cur_row, column=0)
for _ in range(5):
ttk.Separator(trainTop,orient=tkinter.HORIZONTAL).pack()
algsFrame.pack(fill="both", expand="yes")
notebook.pack()
crf_cur_row = 0
crf_train = SEMTkWapitiTrain(self.documents, self.master, None, annotation_level=annotation_level_var, document_filter=document_filter_var, top=frame1, main_frame=trainTop, text="CRF-specific variables")
@property
def master(self):
return self._master or self.master_selector
@property
def lang(self):
return self._lang or self.lang_selector
class SearchFrame(ttk.Frame):
def __init__(self, text, regexp=False, nocase=False):
self.text = text
self.pattern = tkinter.StringVar()
self.regexp = tkinter.IntVar(int(regexp))
self.nocase = tkinter.IntVar(int(nocase))
self.prev_pattern = ""
self.prev_regexp = regexp
self.prev_nocase = nocase
self.findings = []
self.current = -1
self.text.tag_config("search", background='yellow', foreground="black")
bold_font = Font(self.text)
bold_font.configure(weight="bold")
self.text.tag_config("search_current", font=bold_font)
def clear_tags(self):
self.text.tag_remove("search", "1.0", "end")
self.text.tag_remove("search_current", "1.0", "end")
def clear(self):
self.clear_tags()
self.prev_pattern = ""
self.prev_regexp = self.regexp.get()
self.prev_nocase = self.nocase.get()
self.current = -1
del self.findings[:]
def find_in_text(self, event=None):
find_in_text_top = tkinter.Toplevel()
find_in_text_top.title("search")
find_in_text_top.focus_set()
matchesVar = tkinter.StringVar()
def nxt(event=None):
pattern = self.pattern.get()
regexp = self.regexp.get()
nocase = self.nocase.get()
if pattern != self.prev_pattern or regexp != self.prev_regexp or nocase != self.prev_nocase:
self.clear_tags()
self.prev_pattern = pattern
self.prev_regexp = regexp
self.prev_nocase = nocase
del self.findings[:]
start = 1.0
countVar = tkinter.StringVar()
pos = self.text.search(pattern, start, stopindex="end", count=countVar, regexp=self.regexp.get(), nocase=self.nocase.get())
while pos:
end = "{0} + {1}c".format(pos, countVar.get())
self.text.tag_add("search", pos, end)
self.findings.append((pos, end))
start = end
pos = self.text.search(pattern, start, stopindex="end", count=countVar, regexp=self.regexp.get(), nocase=self.nocase.get())
self.current = 1
elif self.findings:
prev = self.findings[self.current-1]
self.current = (self.current+1 if self.current < len(self.findings) else 1)
self.text.tag_remove("search_current", prev[0], prev[1])
if self.findings:
self.text.tag_add("search_current", self.findings[self.current-1][0], self.findings[self.current-1][1])
matchesVar.set("match {0} out of {1}".format(self.current, len(self.findings)))
self.text.mark_set("insert", self.findings[self.current-1][1])
self.text.see("insert")
else:
matchesVar.set("no matches found")
def cancel(event=None):
self.clear()
find_in_text_top.destroy()
label1 = tkinter.Label(find_in_text_top, text="search for:")
text = ttk.Entry(find_in_text_top, textvariable=self.pattern)
next_btn = ttk.Button(find_in_text_top, text="next", command=nxt)
cancel_btn = ttk.Button(find_in_text_top, text="cancel", command=cancel)
matches_found_lbl = tkinter.Label(find_in_text_top, textvariable=matchesVar)
regexp_btn = ttk.Checkbutton(find_in_text_top, text="regular expression", variable=self.regexp)
nocase_btn = ttk.Checkbutton(find_in_text_top, text="ignore case", variable=self.nocase)
label1.grid(row=0, column=0)
text.grid(row=0, column=1)
regexp_btn.grid(row=1, column=0)
nocase_btn.grid(row=1, column=1)
next_btn.grid(row=2, column=0)
cancel_btn.grid(row=2, column=1)
matches_found_lbl.grid(row=3, column=0, columnspan=2)
text.focus_set()
find_in_text_top.bind('<Return>', nxt)
find_in_text_top.bind('<Escape>', cancel)
find_in_text_top.protocol("WM_DELETE_WINDOW", cancel)
|
|
#stdlib imports
import errno, stat, time, subprocess, os
from pathlib import Path
#project imports
from . import elektra_util
from .elektra_util import *
startup_time = time.time()
#the following methods map 1:1 to the FUSE interface
#returns a map containing file attributes, i.e. the result of the stat command
def getattr(path, fh=None):
is_value_of_file = Path(path).name == elektra_util.dir_file_special_name
is_dir, is_file = key_type(path)
if is_value_of_file:
mode = stat.S_IFREG
#resolve the liml (/.../@elektra.value) key to the real key (the parent)
path = str(Path(path).parent)
elif not is_dir and not is_file:
mode = errno.ENOENT
elif is_dir and is_file:
mode = stat.S_IFDIR
elif is_dir:
mode = stat.S_IFDIR
elif is_file and has_meta(path, "meta:/fuse/directory"):
mode = stat.S_IFDIR
elif is_file:
mode = stat.S_IFREG
if mode == stat.S_IFREG:
try:
filesize = size_of_file(path)
except KeyError: #key does not exist
mode = errno.ENOENT
try:
kdb_file_stat = _stat_kdb_file(path)
st_mode_only_permission_bits = kdb_file_stat.st_mode & 0o777
except (FileNotFoundError, PermissionError) as e:
# some keys, e.g. "system:/elektra" resolve to a file like "/etc/kdb/elektra.ecf", which does not exist,
# other requests result in an unauthorized access.
# for the filesystem to remain useful in these cases, dummy file attributes are used
st_mode_only_permission_bits = 0o000
kdb_file_stat = dict(
st_ctime = startup_time,
st_mtime = startup_time,
st_atime = startup_time
)
#common attributes for files and directories
if not _namespace_is_writable(path):
st_mode_only_permission_bits = st_mode_only_permission_bits & 0o7555 # retain all permission bits except write bits
key_stat = dict(
st_mode = mode | st_mode_only_permission_bits,
#TODO: using the real timestamps results in vim complaining on write: "The file has been changed since reading it!!!"
# => defaulting to a static timestamp for now
#st_ctime = kdb_file_stat.st_ctime,
#st_mtime = kdb_file_stat.st_mtime,
#st_atime = kdb_file_stat.st_atime,
st_ctime = startup_time,
st_mtime = startup_time,
st_atime = startup_time
)
if mode == stat.S_IFDIR:
key_stat["st_nlink"] = 2
return key_stat
elif mode == stat.S_IFREG:
key_stat["st_nlink"] = 1
key_stat["st_size"] = filesize
return key_stat
else:
raise OSError(mode)
# returns true iff the namespace of the given path is read only
def _namespace_is_writable(os_path):
namespace = Path(os_path).parts[1]
return not namespace in ["cascading:", "proc:"]
# throws OSError(errno.EROFS) (read only file system) if the namespace of the given path is read only
def _ensure_namespace_is_writable(os_path):
if not _namespace_is_writable(os_path):
raise OSError(errno.EROFS)
#for the file path of a given key returns the backing file as would be by the command "kdb file"
#throws OSError when:
# -) `kdb file` does not return a path
# -) the returned path does not actually exist
def _stat_kdb_file(os_path):
resolved_file_path = get_kdb_file(os_path)
return os.stat(resolved_file_path)
#returns a list of files of a directory.
#On the root level, Elektras namespaces are listed,
#on deeper levels, the key hierarchy is mirroed.
#".", ".." are always included.
def readdir(path, fh):
if path == "/":
return [".", "..", *elektra_namespaces]
dir_set, file_set = ls(path)
return ['.', '..', *dir_set, *file_set]
#returns a chunk of a file, i.e a part of an Elektra key value
def read(path, size, offset, fh):
return file_contents(path)[offset:offset+size]
#updates a chunk of a file, i.e a part of an Elektra key value
def write(path, data, offset, fh):
_ensure_namespace_is_writable(path)
try:
old_value = file_contents(path)
new_value = old_value[:offset] + data + old_value[offset + len(data):]
update_key_value(path, new_value)
return len(data)
except KeyError:
raise OSError(errno.ENOENT)
except kdb.KDBException:
raise OSError(errno.EROFS) #TODO differentiate between validation error, write only keys etc
#truncates a file (discards all but a prefix of specified length) of a part of an Elektra key value
def truncate(path, length, fh=None):
_ensure_namespace_is_writable(path)
old_value = file_contents(path)
new_value = old_value[:length].ljust(length, '\x00'.encode()) #if length increased, fill new space with zeros
update_key_value(path, new_value)
#creates a file, i.e. a new Elektra key
def create(path, mode):
_ensure_namespace_is_writable(path)
if path.count('/') <= 1:
raise OSError(errno.EROFS) #cannot create key in top level directory (reserved for /user:, /system: ...)
create_key(path) #TODO: consider mode argument
#TODO: maybe consider possible error codes as in https://linux.die.net/man/2/
#creates a directory, i.e. a new Elektra key with the special meta key "meta:/fuse/directory"
def mkdir(path, mode):
_ensure_namespace_is_writable(path)
#TODO: think of a reasonable use for mode parameter
create(path, mode)
set_meta(path, "meta:/fuse/directory", "") # 'hack' to enable creation of empty folders (these would otherwise automatically become files)
#append 'meta:/' as Elektra requires this prefix to be present
def _ensure_meta_prefix(name):
return "meta:/" + name
#remove 'meta:/' if not already present
def _ensure_no_meta_prefix(name):
return name[len("meta:/"):] if name.startswith("meta:/") else name
#could use removeprefix, but that would require python 3.9+
#returns a map of extended file attributes, i.e. all meta keys of an Elektra key. The "meta:/" prefix is not included.
def listxattr(path):
try:
meta_map = get_meta_map(path)
return [elektra_util.xattr_kdb_file] + [_ensure_no_meta_prefix(keyname) for keyname in meta_map.keys()]
except KeyError:
return dict()
# if key does not really exist (intermediate directories) return an empty map insted of an error
# as to not confuse tools like xattr
#returns the value of an xattr key
def getxattr(path, name, position=0):
if name == elektra_util.xattr_kdb_file:
return get_kdb_file(path).encode()
name = _ensure_meta_prefix(name)
try:
return get_meta_map(path)[name].encode()
except KeyError:
raise OSError(errno.ENODATA)
#deletes an xattr key, i.e. the backing meta-key
def removexattr(path, name):
_ensure_namespace_is_writable(path)
if name == elektra_util.xattr_kdb_file:
raise OSError(errno.EROFS)
try:
meta_map = get_meta_map(path)
name = _ensure_meta_prefix(name)
del meta_map[name]
update_meta_map(path, meta_map)
except KeyError:
raise OSError(errno.ENODATA)
#updates the value of an xattr key, i.e. the backing meta-key
def setxattr(path, name, value, options, position=0):
_ensure_namespace_is_writable(path)
if name == elektra_util.xattr_kdb_file:
raise OSError(errno.EROFS)
#if key does not really exist (intermediate directories) key should be created (like kdb meta-set does)
try:
meta_map = get_meta_map(path)
except KeyError:
meta_map = dict()
create(path, 0)
name = _ensure_meta_prefix(name)
meta_map[name] = value.decode() #meta keys cannot contain binary data, decoding must succeed
update_meta_map(path, meta_map)
#deletes a file, i.e. the backing Elektra key
def unlink(path):
_ensure_namespace_is_writable(path)
#delete_key(path) keyset.cut behaved unexpected and deleted child keys => using kdb directly
returncode = subprocess.run(["kdb", "rm", os_path_to_elektra_path(path)]).returncode
if returncode != 0:
raise OSError(errno.EROFS) #TODO: differentiate between different error
#deletes a directory if not empty. (same semantics of unlink in that case)
def rmdir(path):
_ensure_namespace_is_writable(path)
if not is_directory_empty(path):
raise OSError(errno.ENOTEMPTY)
else:
unlink(path)
#renames a file, i.e. the backing Elektra-key
def rename(old_path, new_path):
_ensure_namespace_is_writable(old_path)
_ensure_namespace_is_writable(new_path)
if Path(old_path).name == elektra_util.dir_file_special_name:
#see https://github.com/ElektraInitiative/libelektra/issues/3648
returncode = subprocess.run(["kdb", "mv", os_path_to_elektra_path(old_path), os_path_to_elektra_path(new_path)]).returncode
else:
#clumsy to implement using the python api => using kdb directly
returncode = subprocess.run(["kdb", "mv", "-r", os_path_to_elektra_path(old_path), os_path_to_elektra_path(new_path)]).returncode
if returncode != 0:
raise OSError(errno.EROFS) #TODO: differentiate between different errors
# does nothing (besides checking for readonly namespaces) and reports success
# does not raise OSError(errno.EOPNOTSUPP), as this blocks tools like 'cp -r'
def chmod(path, mode):
_ensure_namespace_is_writable(path)
#TODO: maybe this can be handled better?
return 0
def chown(path, uid, gid):
_ensure_namespace_is_writable(path)
return 0
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Dirichlet",
]
_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with
dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e.,
`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with
`self.batch_shape() + self.event_shape()`."""
@tf_export("distributions.Dirichlet")
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution.
The Dirichlet distribution is defined over the
[`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive,
length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the
Beta distribution when `k = 2`.
#### Mathematical Details
The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e.,
```none
S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }.
```
The probability density function (pdf) is,
```none
pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z
Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j)
```
where:
* `x in S^{k-1}`, i.e., the `(k-1)`-simplex,
* `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`,
* `Z` is the normalization constant aka the [multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The `concentration` represents mean total counts of class occurrence, i.e.,
```none
concentration = alpha = mean * total_concentration
```
where `mean` in `S^{k-1}` and `total_concentration` is a positive real number
representing a mean total count.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Examples
```python
# Create a single trivariate Dirichlet, with the 3rd class being three times
# more frequent than the first. I.e., batch_shape=[], event_shape=[3].
alpha = [1., 2, 3]
dist = Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 3]
# x has one sample, one batch, three classes:
x = [.2, .3, .5] # shape: [3]
dist.prob(x) # shape: []
# x has two samples from one batch:
x = [[.1, .4, .5],
[.2, .3, .5]]
dist.prob(x) # shape: [2]
# alpha will be broadcast to shape [5, 7, 3] to match x.
x = [[...]] # shape: [5, 7, 3]
dist.prob(x) # shape: [5, 7]
```
```python
# Create batch_shape=[2], event_shape=[3]:
alpha = [[1., 2, 3],
[4, 5, 6]] # shape: [2, 3]
dist = Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # shape: [2]
```
"""
def __init__(self,
concentration,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration]) as name:
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration, name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(Dirichlet, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._total_concentration],
name=name)
@property
def concentration(self):
"""Concentration parameter; expected counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
gamma_sample = random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)
return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keepdims=True)
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.reduce_sum((self.concentration - 1.) * math_ops.log(x), -1)
def _log_normalization(self):
return special_math_ops.lbeta(self.concentration)
def _entropy(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
return (
self._log_normalization()
+ ((self.total_concentration - k)
* math_ops.digamma(self.total_concentration))
- math_ops.reduce_sum(
(self.concentration - 1.) * math_ops.digamma(self.concentration),
axis=-1))
def _mean(self):
return self.concentration / self.total_concentration[..., array_ops.newaxis]
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when any `concentration <= 1`. If
`self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If
`self.allow_nan_stats` is `False` an exception is raised when one or more
modes are undefined.""")
def _mode(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
mode = (self.concentration - 1.) / (
self.total_concentration[..., array_ops.newaxis] - k)
if self.allow_nan_stats:
nan = array_ops.fill(
array_ops.shape(mode),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(
math_ops.reduce_all(self.concentration > 1., axis=-1),
mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="Mode undefined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x,
message="samples must be positive"),
distribution_util.assert_close(
array_ops.ones([], dtype=self.dtype),
math_ops.reduce_sum(x, -1),
message="sample last-dimension must sum to `1`"),
], x)
@kullback_leibler.RegisterKL(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: (optional) Name to use for created operations.
default is "kl_dirichlet_dirichlet".
Returns:
Batchwise KL(d1 || d2)
"""
with ops.name_scope(name, "kl_dirichlet_dirichlet", values=[
d1.concentration, d2.concentration]):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
digamma_sum_d1 = math_ops.digamma(
math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True))
digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1
concentration_diff = d1.concentration - d2.concentration
return (math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) -
special_math_ops.lbeta(d1.concentration) +
special_math_ops.lbeta(d2.concentration))
|
|
from io import BytesIO
import PIL.Image
import PIL.ImageDraw
import wand.image
from . import utils
from .table import TableFinder
class COLORS(object):
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
TRANSPARENT = (0, 0, 0, 0)
DEFAULT_FILL = COLORS.BLUE + (50,)
DEFAULT_STROKE = COLORS.RED + (200,)
DEFAULT_STROKE_WIDTH = 1
DEFAULT_RESOLUTION = 72
def get_page_image(stream, page_no, resolution):
# If we are working with a file object saved to disk
if hasattr(stream, "name"):
spec = dict(filename=f"{stream.name}[{page_no}]")
def postprocess(img):
return img
# If we instead are working with a BytesIO stream
else:
stream.seek(0)
spec = dict(file=stream)
def postprocess(img):
return wand.image.Image(image=img.sequence[page_no])
with wand.image.Image(resolution=resolution, **spec) as img_init:
img = postprocess(img_init)
if img.alpha_channel:
img.background_color = wand.image.Color("white")
img.alpha_channel = "remove"
with img.convert("png") as png:
im = PIL.Image.open(BytesIO(png.make_blob()))
return im.convert("RGB")
class PageImage(object):
def __init__(self, page, original=None, resolution=DEFAULT_RESOLUTION):
self.page = page
if original is None:
self.original = get_page_image(
page.pdf.stream, page.page_number - 1, resolution
)
else:
self.original = original
if page.is_original:
self.root = page
cropped = False
else:
self.root = page.root_page
cropped = page.root_page.bbox != page.bbox
self.scale = self.original.size[0] / self.root.width
if cropped:
cropbox = (
(page.bbox[0] - page.root_page.bbox[0]) * self.scale,
(page.bbox[1] - page.root_page.bbox[1]) * self.scale,
(page.bbox[2] - page.root_page.bbox[0]) * self.scale,
(page.bbox[3] - page.root_page.bbox[1]) * self.scale,
)
self.original = self.original.crop(map(int, cropbox))
self.reset()
def _reproject_bbox(self, bbox):
x0, top, x1, bottom = bbox
_x0, _top = self._reproject((x0, top))
_x1, _bottom = self._reproject((x1, bottom))
return (_x0, _top, _x1, _bottom)
def _reproject(self, coord):
"""
Given an (x0, top) tuple from the *root* coordinate system,
return an (x0, top) tuple in the *image* coordinate system.
"""
x0, top = coord
px0, ptop = self.page.bbox[:2]
rx0, rtop = self.root.bbox[:2]
_x0 = (x0 + rx0 - px0) * self.scale
_top = (top + rtop - ptop) * self.scale
return (_x0, _top)
def reset(self):
self.annotated = PIL.Image.new(self.original.mode, self.original.size)
self.annotated.paste(self.original)
self.draw = PIL.ImageDraw.Draw(self.annotated, "RGBA")
return self
def copy(self):
return self.__class__(self.page, self.original)
def draw_line(
self, points_or_obj, stroke=DEFAULT_STROKE, stroke_width=DEFAULT_STROKE_WIDTH
):
if isinstance(points_or_obj, (tuple, list)):
points = points_or_obj
elif type(points_or_obj) == dict and "points" in points_or_obj:
points = points_or_obj["points"]
else:
obj = points_or_obj
points = ((obj["x0"], obj["top"]), (obj["x1"], obj["bottom"]))
self.draw.line(
list(map(self._reproject, points)), fill=stroke, width=stroke_width
)
return self
def draw_lines(self, list_of_lines, **kwargs):
for x in utils.to_list(list_of_lines):
self.draw_line(x, **kwargs)
return self
def draw_vline(
self, location, stroke=DEFAULT_STROKE, stroke_width=DEFAULT_STROKE_WIDTH
):
points = (location, self.page.bbox[1], location, self.page.bbox[3])
self.draw.line(self._reproject_bbox(points), fill=stroke, width=stroke_width)
return self
def draw_vlines(self, locations, **kwargs):
for x in utils.to_list(locations):
self.draw_vline(x, **kwargs)
return self
def draw_hline(
self, location, stroke=DEFAULT_STROKE, stroke_width=DEFAULT_STROKE_WIDTH
):
points = (self.page.bbox[0], location, self.page.bbox[2], location)
self.draw.line(self._reproject_bbox(points), fill=stroke, width=stroke_width)
return self
def draw_hlines(self, locations, **kwargs):
for x in utils.to_list(locations):
self.draw_hline(x, **kwargs)
return self
def draw_rect(
self,
bbox_or_obj,
fill=DEFAULT_FILL,
stroke=DEFAULT_STROKE,
stroke_width=DEFAULT_STROKE_WIDTH,
):
if isinstance(bbox_or_obj, (tuple, list)):
bbox = bbox_or_obj
else:
obj = bbox_or_obj
bbox = (obj["x0"], obj["top"], obj["x1"], obj["bottom"])
x0, top, x1, bottom = bbox
half = stroke_width / 2
x0 += half
top += half
x1 -= half
bottom -= half
self.draw.rectangle(
self._reproject_bbox((x0, top, x1, bottom)), fill, COLORS.TRANSPARENT
)
if stroke_width > 0:
segments = [
((x0, top), (x1, top)), # top
((x0, bottom), (x1, bottom)), # bottom
((x0, top), (x0, bottom)), # left
((x1, top), (x1, bottom)), # right
]
self.draw_lines(segments, stroke=stroke, stroke_width=stroke_width)
return self
def draw_rects(self, list_of_rects, **kwargs):
for x in utils.to_list(list_of_rects):
self.draw_rect(x, **kwargs)
return self
def draw_circle(
self, center_or_obj, radius=5, fill=DEFAULT_FILL, stroke=DEFAULT_STROKE
):
if isinstance(center_or_obj, (tuple, list)):
center = center_or_obj
else:
obj = center_or_obj
center = ((obj["x0"] + obj["x1"]) / 2, (obj["top"] + obj["bottom"]) / 2)
cx, cy = center
bbox = (cx - radius, cy - radius, cx + radius, cy + radius)
self.draw.ellipse(self._reproject_bbox(bbox), fill, stroke)
return self
def draw_circles(self, list_of_circles, **kwargs):
for x in utils.to_list(list_of_circles):
self.draw_circle(x, **kwargs)
return self
def save(self, *args, **kwargs):
return self.annotated.save(*args, **kwargs)
def debug_table(
self, table, fill=DEFAULT_FILL, stroke=DEFAULT_STROKE, stroke_width=1
):
"""
Outline all found tables.
"""
self.draw_rects(
table.cells, fill=fill, stroke=stroke, stroke_width=stroke_width
)
return self
def debug_tablefinder(self, tf={}):
if isinstance(tf, TableFinder):
pass
elif isinstance(tf, dict):
tf = self.page.debug_tablefinder(tf)
else:
raise ValueError(
"Argument must be instance of TableFinder"
"or a TableFinder settings dict."
)
for table in tf.tables:
self.debug_table(table)
self.draw_lines(tf.edges, stroke_width=1)
self.draw_circles(
tf.intersections.keys(),
fill=COLORS.TRANSPARENT,
stroke=COLORS.BLUE + (200,),
radius=3,
)
return self
def outline_words(
self,
stroke=DEFAULT_STROKE,
fill=DEFAULT_FILL,
stroke_width=DEFAULT_STROKE_WIDTH,
x_tolerance=utils.DEFAULT_X_TOLERANCE,
y_tolerance=utils.DEFAULT_Y_TOLERANCE,
):
words = self.page.extract_words(
x_tolerance=x_tolerance, y_tolerance=y_tolerance
)
self.draw_rects(words, stroke=stroke, fill=fill, stroke_width=stroke_width)
return self
def outline_chars(
self,
stroke=(255, 0, 0, 255),
fill=(255, 0, 0, int(255 / 4)),
stroke_width=DEFAULT_STROKE_WIDTH,
):
self.draw_rects(
self.page.chars, stroke=stroke, fill=fill, stroke_width=stroke_width
)
return self
def _repr_png_(self):
b = BytesIO()
self.annotated.save(b, "PNG")
return b.getvalue()
|
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1VirtualMachineInstanceGuestOSInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'kernel_release': 'str',
'kernel_version': 'str',
'machine': 'str',
'name': 'str',
'pretty_name': 'str',
'version': 'str',
'version_id': 'str'
}
attribute_map = {
'id': 'id',
'kernel_release': 'kernelRelease',
'kernel_version': 'kernelVersion',
'machine': 'machine',
'name': 'name',
'pretty_name': 'prettyName',
'version': 'version',
'version_id': 'versionId'
}
def __init__(self, id=None, kernel_release=None, kernel_version=None, machine=None, name=None, pretty_name=None, version=None, version_id=None):
"""
V1VirtualMachineInstanceGuestOSInfo - a model defined in Swagger
"""
self._id = None
self._kernel_release = None
self._kernel_version = None
self._machine = None
self._name = None
self._pretty_name = None
self._version = None
self._version_id = None
if id is not None:
self.id = id
if kernel_release is not None:
self.kernel_release = kernel_release
if kernel_version is not None:
self.kernel_version = kernel_version
if machine is not None:
self.machine = machine
if name is not None:
self.name = name
if pretty_name is not None:
self.pretty_name = pretty_name
if version is not None:
self.version = version
if version_id is not None:
self.version_id = version_id
@property
def id(self):
"""
Gets the id of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Id
:return: The id of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Id
:param id: The id of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._id = id
@property
def kernel_release(self):
"""
Gets the kernel_release of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Kernel Release
:return: The kernel_release of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._kernel_release
@kernel_release.setter
def kernel_release(self, kernel_release):
"""
Sets the kernel_release of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Kernel Release
:param kernel_release: The kernel_release of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._kernel_release = kernel_release
@property
def kernel_version(self):
"""
Gets the kernel_version of this V1VirtualMachineInstanceGuestOSInfo.
Kernel version of the Guest OS
:return: The kernel_version of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
"""
Sets the kernel_version of this V1VirtualMachineInstanceGuestOSInfo.
Kernel version of the Guest OS
:param kernel_version: The kernel_version of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._kernel_version = kernel_version
@property
def machine(self):
"""
Gets the machine of this V1VirtualMachineInstanceGuestOSInfo.
Machine type of the Guest OS
:return: The machine of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._machine
@machine.setter
def machine(self, machine):
"""
Sets the machine of this V1VirtualMachineInstanceGuestOSInfo.
Machine type of the Guest OS
:param machine: The machine of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._machine = machine
@property
def name(self):
"""
Gets the name of this V1VirtualMachineInstanceGuestOSInfo.
Name of the Guest OS
:return: The name of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1VirtualMachineInstanceGuestOSInfo.
Name of the Guest OS
:param name: The name of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._name = name
@property
def pretty_name(self):
"""
Gets the pretty_name of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Pretty Name
:return: The pretty_name of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._pretty_name
@pretty_name.setter
def pretty_name(self, pretty_name):
"""
Sets the pretty_name of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Pretty Name
:param pretty_name: The pretty_name of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._pretty_name = pretty_name
@property
def version(self):
"""
Gets the version of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Version
:return: The version of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this V1VirtualMachineInstanceGuestOSInfo.
Guest OS Version
:param version: The version of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._version = version
@property
def version_id(self):
"""
Gets the version_id of this V1VirtualMachineInstanceGuestOSInfo.
Version ID of the Guest OS
:return: The version_id of this V1VirtualMachineInstanceGuestOSInfo.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""
Sets the version_id of this V1VirtualMachineInstanceGuestOSInfo.
Version ID of the Guest OS
:param version_id: The version_id of this V1VirtualMachineInstanceGuestOSInfo.
:type: str
"""
self._version_id = version_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1VirtualMachineInstanceGuestOSInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
import collections
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis.udf.vectorized import analytic, elementwise, reduction
from ..udf import nullable
# --------
# Fixtures
# --------
@pytest.fixture
def df(npartitions):
return dd.from_pandas(
pd.DataFrame(
{
'a': list('abc'),
'b': [1, 2, 3],
'c': [4.0, 5.0, 6.0],
'key': list('aab'),
}
),
npartitions=npartitions,
)
@pytest.fixture
def df2(npartitions):
# df with some randomness
return dd.from_pandas(
pd.DataFrame(
{
'a': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'c': np.arange(7, dtype=int).tolist(),
'd': list('aaaaddd'),
'key': list('ddeefff'),
}
),
npartitions=npartitions,
)
@pytest.fixture
def df_timestamp(npartitions):
df = pd.DataFrame(
{
'a': list(range(10)),
'b': list('wwwwwxxxxx'),
'c': list('yyyzzzyyzz'),
}
)
df["a"] = df.a.astype(pd.DatetimeTZDtype(tz='UTC'))
return dd.from_pandas(
df,
npartitions=npartitions,
)
@pytest.fixture
def con(df, df2, df_timestamp):
return ibis.dask.connect(
{'df': df, 'df2': df2, 'df_timestamp': df_timestamp}
)
@pytest.fixture
def t(con):
return con.table('df')
@pytest.fixture
def t2(con):
return con.table('df2')
@pytest.fixture
def t_timestamp(con):
return con.table('df_timestamp')
# -------------
# UDF Functions
# -------------
@elementwise(input_type=['string'], output_type='int64')
def my_string_length(series, **kwargs):
return series.str.len() * 2
@elementwise(input_type=[dt.double, dt.double], output_type=dt.double)
def my_add(series1, series2, **kwargs):
return series1 + series2
@reduction(['double'], 'double')
def my_mean(series):
return series.mean()
@reduction(
input_type=[dt.Timestamp(timezone="UTC")],
output_type=dt.Timestamp(timezone="UTC"),
)
def my_tz_min(series):
return series.min()
@elementwise(
input_type=[dt.Timestamp(timezone="UTC")],
output_type=dt.Timestamp(timezone="UTC"),
)
def my_tz_add_one(series):
return series + pd.Timedelta(1, unit="D")
@reduction(input_type=[dt.string], output_type=dt.int64)
def my_string_length_sum(series, **kwargs):
return (series.str.len() * 2).sum()
@reduction(input_type=[dt.double, dt.double], output_type=dt.double)
def my_corr(lhs, rhs, **kwargs):
return lhs.corr(rhs)
@elementwise([dt.double], dt.double)
def add_one(x):
return x + 1.0
@elementwise([dt.double], dt.double)
def times_two(x):
return x * 2.0
@analytic(input_type=['double'], output_type='double')
def zscore(series):
return (series - series.mean()) / series.std()
@reduction(
input_type=[dt.double],
output_type=dt.Array(dt.double),
)
def quantiles(series, *, quantiles):
return list(series.quantile(quantiles))
# -----
# Tests
# -----
def test_udf(t, df):
expr = my_string_length(t.a)
assert isinstance(expr, ir.ColumnExpr)
result = expr.execute()
expected = df.a.str.len().mul(2).compute()
tm.assert_series_equal(result, expected, check_names=False)
def test_multiple_argument_udf(con, t, df):
expr = my_add(t.b, t.c)
assert isinstance(expr, ir.ColumnExpr)
assert isinstance(expr, ir.NumericColumn)
assert isinstance(expr, ir.FloatingColumn)
result = expr.execute()
expected = (df.b + df.c).compute()
tm.assert_series_equal(result, expected)
def test_multiple_argument_udf_group_by(con, t, df):
expr = t.groupby(t.key).aggregate(my_add=my_add(t.b, t.c).sum())
assert isinstance(expr, ir.TableExpr)
assert isinstance(expr.my_add, ir.ColumnExpr)
assert isinstance(expr.my_add, ir.NumericColumn)
assert isinstance(expr.my_add, ir.FloatingColumn)
result = expr.execute()
expected = pd.DataFrame(
{'key': list('ab'), 'my_add': [sum([1.0 + 4.0, 2.0 + 5.0]), 3.0 + 6.0]}
)
tm.assert_frame_equal(result, expected)
def test_udaf(con, t, df):
expr = my_string_length_sum(t.a)
assert isinstance(expr, ir.ScalarExpr)
result = expr.execute()
expected = t.a.execute().str.len().mul(2).sum()
assert result == expected
def test_udaf_analytic_tzcol(con, t_timestamp, df_timestamp):
expr = my_tz_min(t_timestamp.a)
result = expr.execute()
expected = my_tz_min.func(df_timestamp.a.compute())
assert result == expected
def test_udaf_elementwise_tzcol(con, t_timestamp, df_timestamp):
expr = my_tz_add_one(t_timestamp.a)
result = expr.execute().reset_index(drop=True)
expected = my_tz_add_one.func(df_timestamp.a.compute())
tm.assert_series_equal(result, expected)
def test_udaf_analytic(con, t, df):
expr = zscore(t.c)
assert isinstance(expr, ir.ColumnExpr)
result = expr.execute()
def f(s):
return s.sub(s.mean()).div(s.std())
expected = (f(df.c)).compute()
tm.assert_series_equal(result, expected)
def test_udaf_analytic_groupby(con, t, df):
expr = zscore(t.c).over(ibis.window(group_by=t.key))
assert isinstance(expr, ir.ColumnExpr)
result = expr.execute()
def f(s):
return s.sub(s.mean()).div(s.std())
expected = df.groupby('key').c.transform(f).compute()
# We don't check names here because the udf is used "directly".
# We could potentially special case this and set the name directly
# if the udf is only being run on one column.
tm.assert_series_equal(result, expected, check_names=False)
def test_udaf_groupby(t2, df2):
expr = t2.groupby(t2.key).aggregate(my_corr=my_corr(t2.a, t2.b))
result = expr.execute().sort_values('key').reset_index(drop=True)
dfi = df2.set_index('key').compute()
expected = pd.DataFrame(
{
'key': list('def'),
'my_corr': [
dfi.loc[value, 'a'].corr(dfi.loc[value, 'b'])
for value in 'def'
],
}
)
tm.assert_frame_equal(result, expected)
def test_udaf_groupby_multikey(t2, df2):
expr = t2.groupby([t2.key, t2.d]).aggregate(my_corr=my_corr(t2.a, t2.b))
result = expr.execute().sort_values('key').reset_index(drop=True)
dfi = df2.set_index('key').compute()
expected = pd.DataFrame(
{
'key': list('def'),
'd': list('aad'),
'my_corr': [
dfi.loc[value, 'a'].corr(dfi.loc[value, 'b'])
for value in 'def'
],
}
)
tm.assert_frame_equal(result, expected)
def test_udaf_groupby_multikey_tzcol(t_timestamp, df_timestamp):
expr = t_timestamp.groupby([t_timestamp.b, t_timestamp.c]).aggregate(
my_min_time=my_tz_min(t_timestamp.a)
)
result = expr.execute().sort_values('b').reset_index(drop=True)
expected = (
df_timestamp.groupby(["b", "c"])
.min()
.reset_index()
.rename(columns={'a': "my_min_time"})
.compute()
)
tm.assert_frame_equal(result, expected)
def test_nullable():
t = ibis.table([('a', 'int64')])
assert nullable(t.a.type()) == (type(None),)
def test_nullable_non_nullable_field():
t = ibis.table([('a', dt.String(nullable=False))])
assert nullable(t.a.type()) == ()
def test_compose_udfs(t2, df2):
expr = times_two(add_one(t2.a))
result = expr.execute().reset_index(drop=True)
expected = df2.a.add(1.0).mul(2.0).compute()
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.xfail(
raises=NotImplementedError, reason='TODO - windowing - #2553'
)
def test_udaf_window(t2, df2):
window = ibis.trailing_window(2, order_by='a', group_by='key')
expr = t2.mutate(rolled=my_mean(t2.b).over(window))
result = expr.execute().sort_values(['key', 'a'])
expected = df2.sort_values(['key', 'a']).assign(
rolled=lambda df: df.groupby('key')
.b.rolling(3, min_periods=1)
.mean()
.reset_index(level=0, drop=True)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(
raises=NotImplementedError, reason='TODO - windowing - #2553'
)
def test_udaf_window_interval(npartitions):
df = pd.DataFrame(
collections.OrderedDict(
[
(
"time",
pd.date_range(
start='20190105', end='20190101', freq='-1D'
),
),
("key", [1, 2, 1, 2, 1]),
("value", np.arange(5)),
]
)
)
df = dd.from_pandas(df, npartitions=npartitions)
con = ibis.dask.connect({'df': df})
t = con.table('df')
window = ibis.trailing_range_window(
ibis.interval(days=2), order_by='time', group_by='key'
)
expr = t.mutate(rolled=my_mean(t.value).over(window))
result = expr.execute().sort_values(['time', 'key']).reset_index(drop=True)
expected = (
df.sort_values(['time', 'key'])
.set_index('time')
.assign(
rolled=lambda df: df.groupby('key')
.value.rolling('2D', closed='both')
.mean()
.reset_index(level=0, drop=True)
)
).reset_index(drop=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(
raises=NotImplementedError, reason='TODO - windowing - #2553'
)
def test_multiple_argument_udaf_window(npartitions):
@reduction(['double', 'double'], 'double')
def my_wm(v, w):
return np.average(v, weights=w)
df = pd.DataFrame(
{
'a': np.arange(4, 0, dtype=float, step=-1).tolist()
+ np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'c': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'd': np.repeat(1, 7),
'key': list('deefefd'),
}
)
df = dd.from_pandas(df, npartitions=npartitions)
con = ibis.dask.connect({'df': df})
t = con.table('df')
window = ibis.trailing_window(2, order_by='a', group_by='key')
window2 = ibis.trailing_window(1, order_by='b', group_by='key')
expr = t.mutate(
wm_b=my_wm(t.b, t.d).over(window),
wm_c=my_wm(t.c, t.d).over(window),
wm_c2=my_wm(t.c, t.d).over(window2),
)
result = expr.execute().sort_values(['key', 'a'])
expected = (
df.sort_values(['key', 'a'])
.assign(
wm_b=lambda df: df.groupby('key')
.b.rolling(3, min_periods=1)
.mean()
.reset_index(level=0, drop=True)
)
.assign(
wm_c=lambda df: df.groupby('key')
.c.rolling(3, min_periods=1)
.mean()
.reset_index(level=0, drop=True)
)
)
expected = expected.sort_values(['key', 'b']).assign(
wm_c2=lambda df: df.groupby('key')
.c.rolling(2, min_periods=1)
.mean()
.reset_index(level=0, drop=True)
)
expected = expected.sort_values(['key', 'a'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(
raises=NotImplementedError, reason='TODO - windowing - #2553'
)
def test_udaf_window_nan(npartitions):
df = pd.DataFrame(
{
'a': np.arange(10, dtype=float),
'b': [3.0, np.NaN] * 5,
'key': list('ddeefffggh'),
}
)
df = dd.from_pandas(df, npartitions=npartitions)
con = ibis.dask.connect({'df': df})
t = con.table('df')
window = ibis.trailing_window(2, order_by='a', group_by='key')
expr = t.mutate(rolled=my_mean(t.b).over(window))
result = expr.execute().sort_values(['key', 'a'])
expected = df.sort_values(['key', 'a']).assign(
rolled=lambda d: d.groupby('key')
.b.rolling(3, min_periods=1)
.apply(lambda x: x.mean(), raw=True)
.reset_index(level=0, drop=True)
)
tm.assert_frame_equal(result, expected)
@pytest.fixture(params=[[0.25, 0.75], [0.01, 0.99]])
def qs(request):
return request.param
def test_array_return_type_reduction(con, t, df, qs):
"""Tests reduction UDF returning an array."""
expr = quantiles(t.b, quantiles=qs)
result = expr.execute()
expected = df.b.quantile(qs).compute()
assert list(result) == expected.tolist()
def test_array_return_type_reduction_window(con, t, df, qs):
"""Tests reduction UDF returning an array, used over a window."""
expr = quantiles(t.b, quantiles=qs).over(ibis.window())
result = expr.execute()
expected_raw = df.b.quantile(qs).compute().tolist()
expected = pd.Series([expected_raw] * len(df))
tm.assert_series_equal(result, expected)
def test_array_return_type_reduction_group_by(con, t, df, qs):
"""Tests reduction UDF returning an array, used in a grouped agg."""
expr = t.groupby(t.key).aggregate(
quantiles_col=quantiles(t.b, quantiles=qs)
)
result = expr.execute()
df = df.compute() # Convert to Pandas
expected_col = df.groupby(df.key).b.agg(lambda s: s.quantile(qs).tolist())
expected = pd.DataFrame({'quantiles_col': expected_col}).reset_index()
tm.assert_frame_equal(result, expected)
def test_elementwise_udf_with_many_args(t2):
@elementwise(
input_type=[dt.double] * 16 + [dt.int32] * 8, output_type=dt.double
)
def my_udf(
c1,
c2,
c3,
c4,
c5,
c6,
c7,
c8,
c9,
c10,
c11,
c12,
c13,
c14,
c15,
c16,
c17,
c18,
c19,
c20,
c21,
c22,
c23,
c24,
):
return c1
expr = my_udf(*([t2.a] * 8 + [t2.b] * 8 + [t2.c] * 8))
result = expr.execute()
expected = t2.a.execute()
tm.assert_series_equal(result, expected, check_names=False)
# -----------------
# Test raied errors
# -----------------
def test_udaf_parameter_mismatch():
with pytest.raises(TypeError):
@reduction(input_type=[dt.double], output_type=dt.double)
def my_corr(lhs, rhs, **kwargs):
pass
def test_udf_parameter_mismatch():
with pytest.raises(TypeError):
@reduction(input_type=[], output_type=dt.double)
def my_corr2(lhs, **kwargs):
pass
def test_udf_error(t):
@elementwise(input_type=[dt.double], output_type=dt.double)
def error_udf(s):
raise ValueError('xxx')
with pytest.raises(ValueError):
error_udf(t.c).execute()
|
|
from conans.model.options import Options, PackageOptions, OptionsValues
from conans.model.requires import Requirements
from conans.model.build_info import DepsCppInfo
from conans import tools # @UnusedImport KEEP THIS! Needed for pyinstaller to copy to exe.
from conans.errors import ConanException
from conans.model.env_info import DepsEnvInfo
import os
def create_options(conanfile):
try:
package_options = PackageOptions(getattr(conanfile, "options", None))
options = Options(package_options)
default_options = getattr(conanfile, "default_options", None)
if default_options:
if isinstance(default_options, tuple):
default_values = OptionsValues.loads("\n".join(default_options))
elif isinstance(default_options, list):
default_values = OptionsValues.from_list(default_options)
elif isinstance(default_options, str):
default_values = OptionsValues.loads(default_options)
else:
raise ConanException("Please define your default_options as list or "
"multiline string")
options.values = default_values
return options
except Exception as e:
raise ConanException("Error while initializing options. %s" % str(e))
def create_requirements(conanfile):
try:
# Actual requirements of this package
if not hasattr(conanfile, "requires"):
return Requirements()
else:
if isinstance(conanfile.requires, tuple):
return Requirements(*conanfile.requires)
else:
return Requirements(conanfile.requires, )
except Exception as e:
raise ConanException("Error while initializing requirements. %s" % str(e))
def create_settings(conanfile, settings):
try:
defined_settings = getattr(conanfile, "settings", None)
if isinstance(defined_settings, str):
defined_settings = [defined_settings]
current = defined_settings or {}
settings.constraint(current)
return settings
except Exception as e:
raise ConanException("Error while initializing settings. %s" % str(e))
def create_exports(conanfile):
if not hasattr(conanfile, "exports"):
return None
else:
if isinstance(conanfile.exports, str):
return (conanfile.exports, )
return conanfile.exports
class ConanFile(object):
""" The base class for all conans
"""
name = None
version = None # Any str, can be "1.1" or whatever
url = None # The URL where this File is located, as github, to collaborate in package
# The license of the PACKAGE, just a shortcut, does not replace or
# change the actual license of the source code
license = None
author = None # Main maintainer/responsible for the package, any format
build_policy = None
short_paths = False
def __init__(self, output, runner, settings, conanfile_directory):
'''
param settings: Settings
'''
# User defined generators
self.generators = self.generators if hasattr(self, "generators") else ["txt"]
if isinstance(self.generators, str):
self.generators = [self.generators]
# User defined options
self.options = create_options(self)
self.requires = create_requirements(self)
self.settings = create_settings(self, settings)
self.exports = create_exports(self)
# needed variables to pack the project
self.cpp_info = None # Will be initialized at processing time
self.deps_cpp_info = DepsCppInfo()
# environment variables declared in the package_info
self.env_info = None # Will be initialized at processing time
self.deps_env_info = DepsEnvInfo()
self.copy = None # initialized at runtime
# an output stream (writeln, info, warn error)
self.output = output
# something that can run commands, as os.sytem
self._runner = runner
self._conanfile_directory = conanfile_directory
self.package_folder = None # Assigned at runtime
self._scope = None
def collect_libs(self, folder="lib"):
if not self.package_folder:
return []
lib_folder = os.path.join(self.package_folder, folder)
if not os.path.exists(lib_folder):
self.output.warn("Package folder doesn't exist, can't collect libraries")
return []
files = os.listdir(lib_folder)
result = []
for f in files:
name, ext = os.path.splitext(f)
if ext in (".so", ".lib", ".a", ".dylib"):
if ext != ".lib" and name.startswith("lib"):
name = name[3:]
result.append(name)
return result
@property
def scope(self):
return self._scope
@scope.setter
def scope(self, value):
self._scope = value
if value.dev:
self.requires.allow_dev = True
try:
if hasattr(self, "dev_requires"):
if isinstance(self.dev_requires, tuple):
self.requires.add_dev(*self.dev_requires)
else:
self.requires.add_dev(self.dev_requires, )
except Exception as e:
raise ConanException("Error while initializing dev_requirements. %s" % str(e))
@property
def conanfile_directory(self):
return self._conanfile_directory
@property
def build_policy_missing(self):
return self.build_policy == "missing"
@property
def build_policy_always(self):
return self.build_policy == "always"
def source(self):
pass
def requirements(self):
pass
def system_requirements(self):
""" this method can be overwritten to implement logic for system package
managers, as apt-get
You can define self.global_system_requirements = True, if you want the installation
to be for all packages (not depending on settings/options/requirements)
"""
def config_options(self):
""" modify options, probably conditioned to some settings. This call is executed
before config_settings. E.g.
if self.settings.os == "Windows":
del self.options.shared # shared/static not supported in win
"""
def configure(self):
""" modify settings, probably conditioned to some options. This call is executed
after config_options. E.g.
if self.options.header_only:
self.settings.clear()
This is also the place for conditional requirements
"""
def imports(self):
pass
def build(self):
self.output.warn("This conanfile has no build step")
def package(self):
self.output.warn("This conanfile has no package step")
def package_info(self):
""" define cpp_build_info, flags, etc
"""
def run(self, command, output=True, cwd=None):
""" runs such a command in the folder the Conan
is defined
"""
retcode = self._runner(command, output, cwd)
if retcode != 0:
raise ConanException("Error %d while executing %s" % (retcode, command))
def conan_info(self):
""" modify the conans info, typically to narrow values
eg.: conaninfo.package_references = []
"""
def test(self):
raise ConanException("You need to create a method 'test' in your test/conanfile.py")
def __repr__(self):
result = []
result.append("name: %s" % self.name)
result.append("version: %s" % self.version)
return '\n'.join(result)
|
|
from __future__ import unicode_literals
import warnings
from django.db import models
from django.db.utils import DatabaseError
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from .models import (
AbstractBase1, AbstractBase2, AbstractBase3, Child1, Child2, Child3,
Child4, Child5, Child6, Child7, RelatedModel, RelationModel,
)
class ManagersRegressionTests(TestCase):
def test_managers(self):
Child1.objects.create(name='fred', data='a1')
Child1.objects.create(name='barney', data='a2')
Child2.objects.create(name='fred', data='b1', value=1)
Child2.objects.create(name='barney', data='b2', value=42)
Child3.objects.create(name='fred', data='c1', comment='yes')
Child3.objects.create(name='barney', data='c2', comment='no')
Child4.objects.create(name='fred', data='d1')
Child4.objects.create(name='barney', data='d2')
Child5.objects.create(name='fred', comment='yes')
Child5.objects.create(name='barney', comment='no')
Child6.objects.create(name='fred', data='f1', value=42)
Child6.objects.create(name='barney', data='f2', value=42)
Child7.objects.create(name='fred')
Child7.objects.create(name='barney')
self.assertQuerysetEqual(Child1.manager1.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child1.manager2.all(), ["<Child1: a2>"])
self.assertQuerysetEqual(Child1._default_manager.all(), ["<Child1: a1>"])
self.assertQuerysetEqual(Child2._default_manager.all(), ["<Child2: b1>"])
self.assertQuerysetEqual(Child2.restricted.all(), ["<Child2: b2>"])
self.assertQuerysetEqual(Child3._default_manager.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager1.all(), ["<Child3: c1>"])
self.assertQuerysetEqual(Child3.manager2.all(), ["<Child3: c2>"])
# Since Child6 inherits from Child4, the corresponding rows from f1 and
# f2 also appear here. This is the expected result.
self.assertQuerysetEqual(Child4._default_manager.order_by('data'), [
"<Child4: d1>",
"<Child4: d2>",
"<Child4: f1>",
"<Child4: f2>",
])
self.assertQuerysetEqual(Child4.manager1.all(), ["<Child4: d1>", "<Child4: f1>"], ordered=False)
self.assertQuerysetEqual(Child5._default_manager.all(), ["<Child5: fred>"])
self.assertQuerysetEqual(Child6._default_manager.all(), ["<Child6: f1>", "<Child6: f2>"], ordered=False)
self.assertQuerysetEqual(
Child7._default_manager.order_by('name'),
["<Child7: barney>", "<Child7: fred>"]
)
def test_abstract_manager(self):
# Accessing the manager on an abstract model should
# raise an attribute error with an appropriate message.
# This error message isn't ideal, but if the model is abstract and
# a lot of the class instantiation logic isn't invoked; if the
# manager is implied, then we don't get a hook to install the
# error-raising manager.
msg = "type object 'AbstractBase3' has no attribute 'objects'"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase3.objects.all()
def test_custom_abstract_manager(self):
# Accessing the manager on an abstract model with an custom
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase2 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase2.restricted.all()
def test_explicit_abstract_manager(self):
# Accessing the manager on an abstract model with an explicit
# manager should raise an attribute error with an appropriate
# message.
msg = "Manager isn't available; AbstractBase1 is abstract"
with self.assertRaisesMessage(AttributeError, msg):
AbstractBase1.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_swappable_manager(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model should
# raise an attribute error with a helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_custom_swappable_manager(self):
class SwappableModel(models.Model):
stuff = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.stuff.all()
@override_settings(TEST_SWAPPABLE_MODEL='managers_regress.Parent')
@isolate_apps('managers_regress')
def test_explicit_swappable_manager(self):
class SwappableModel(models.Model):
objects = models.Manager()
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
# Accessing the manager on a swappable model with an
# explicit manager should raise an attribute error with a
# helpful message
msg = (
"Manager isn't available; 'managers_regress.SwappableModel' "
"has been swapped for 'managers_regress.Parent'"
)
with self.assertRaisesMessage(AttributeError, msg):
SwappableModel.objects.all()
def test_regress_3871(self):
related = RelatedModel.objects.create()
relation = RelationModel()
relation.fk = related
relation.gfk = related
relation.save()
relation.m2m.add(related)
t = Template('{{ related.test_fk.all.0 }}{{ related.test_gfk.all.0 }}{{ related.test_m2m.all.0 }}')
self.assertEqual(
t.render(Context({'related': related})),
''.join([force_text(relation.pk)] * 3),
)
def test_field_can_be_called_exact(self):
# Make sure related managers core filters don't include an
# explicit `__exact` lookup that could be interpreted as a
# reference to a foreign `exact` field. refs #23940.
related = RelatedModel.objects.create(exact=False)
relation = related.test_fk.create()
self.assertEqual(related.test_fk.get(), relation)
@isolate_apps('managers_regress')
class TestManagerInheritance(TestCase):
def test_implicit_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = CustomManager()
class Meta:
abstract = True
class PlainModel(models.Model):
custom_manager = CustomManager()
self.assertIsInstance(PlainModel._base_manager, models.Manager)
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
class Meta:
manager_inheritance_from_future = True
self.assertIsInstance(ModelWithAbstractParent._base_manager, models.Manager)
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
manager_inheritance_from_future = True
proxy = True
self.assertIsInstance(ProxyModel._base_manager, models.Manager)
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
class Meta:
manager_inheritance_from_future = True
self.assertIsInstance(MTIModel._base_manager, models.Manager)
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_default_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
class Meta:
manager_inheritance_from_future = True
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
manager_inheritance_from_future = True
proxy = True
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
class Meta:
manager_inheritance_from_future = True
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_base_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = 'custom_manager'
self.assertIsInstance(PlainModel._base_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
class Meta:
manager_inheritance_from_future = True
self.assertIsInstance(ModelWithAbstractParent._base_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
manager_inheritance_from_future = True
proxy = True
self.assertIsInstance(ProxyModel._base_manager, CustomManager)
class MTIModel(PlainModel):
class Meta:
manager_inheritance_from_future = True
self.assertIsInstance(MTIModel._base_manager, CustomManager)
def test_manager_no_duplicates(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = models.Manager()
class Meta:
abstract = True
class TestModel(AbstractModel):
custom_manager = CustomManager()
self.assertEqual(TestModel._meta.managers, (TestModel.custom_manager,))
self.assertEqual(TestModel._meta.managers_map, {'custom_manager': TestModel.custom_manager})
@isolate_apps('managers_regress')
class TestManagerDeprecations(TestCase):
def test_use_for_related_fields_on_geomanager(self):
from django.contrib.gis.db.models import GeoManager
class MyModel(models.Model):
objects = GeoManager()
# Shouldn't issue any warnings, since GeoManager itself will be
# deprecated at the same time as use_for_related_fields, there
# is no point annoying users with this deprecation.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
MyModel._base_manager
self.assertEqual(len(warns), 0)
def test_use_for_related_fields_for_base_manager(self):
class MyManager(models.Manager):
use_for_related_fields = True
class MyModel(models.Model):
objects = MyManager()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
MyModel._base_manager
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"use_for_related_fields is deprecated, "
"instead set Meta.base_manager_name on "
"'managers_regress.MyModel'.",
)
# With the new base_manager_name API there shouldn't be any warnings.
class MyModel2(models.Model):
objects = MyManager()
class Meta:
base_manager_name = 'objects'
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
MyModel2._base_manager
self.assertEqual(len(warns), 0)
def test_use_for_related_fields_for_many_to_one(self):
# Common objects
class MyManagerQuerySet(models.QuerySet):
pass
class MyLegacyManagerQuerySet(models.QuerySet):
pass
class MyManager(models.Manager):
def get_queryset(self):
return MyManagerQuerySet(model=self.model, using=self._db, hints=self._hints)
class MyLegacyManager(models.Manager):
use_for_related_fields = True
def get_queryset(self):
return MyLegacyManagerQuerySet(model=self.model, using=self._db, hints=self._hints)
# With legacy config there should be a deprecation warning
class MyRelModel(models.Model):
objects = MyLegacyManager()
class MyModel(models.Model):
fk = models.ForeignKey(MyRelModel, on_delete=models.DO_NOTHING)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
try:
MyModel(fk_id=42).fk
except DatabaseError:
pass
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"use_for_related_fields is deprecated, "
"instead set Meta.base_manager_name on "
"'managers_regress.MyRelModel'.",
)
# With the new base_manager_name API there shouldn't be any warnings.
class MyRelModel2(models.Model):
objects = MyManager()
class Meta:
base_manager_name = 'objects'
class MyModel2(models.Model):
fk = models.ForeignKey(MyRelModel2, on_delete=models.DO_NOTHING)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
try:
MyModel2(fk_id=42).fk
except DatabaseError:
pass
self.assertEqual(len(warns), 0)
# When mixing base_manager_name and use_for_related_fields, there
# should be warnings.
class MyRelModel3(models.Model):
my_base_manager = MyManager()
my_default_manager = MyLegacyManager()
class Meta:
base_manager_name = 'my_base_manager'
default_manager_name = 'my_default_manager'
class MyModel3(models.Model):
fk = models.ForeignKey(MyRelModel3, on_delete=models.DO_NOTHING)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
try:
MyModel3(fk_id=42).fk
except DatabaseError:
pass
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"use_for_related_fields is deprecated, "
"instead set Meta.base_manager_name on "
"'managers_regress.MyRelModel3'.",
)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RemovedInDjango20Warning)
self.assertIsInstance(MyModel3.fk.get_queryset(), MyLegacyManagerQuerySet)
def test_use_for_related_fields_for_one_to_one(self):
# Common objects
class MyManagerQuerySet(models.QuerySet):
pass
class MyLegacyManagerQuerySet(models.QuerySet):
pass
class MyManager(models.Manager):
def get_queryset(self):
return MyManagerQuerySet(model=self.model, using=self._db, hints=self._hints)
class MyLegacyManager(models.Manager):
use_for_related_fields = True
def get_queryset(self):
return MyLegacyManagerQuerySet(model=self.model, using=self._db, hints=self._hints)
# With legacy config there should be a deprecation warning
class MyRelModel(models.Model):
objects = MyLegacyManager()
class MyModel(models.Model):
o2o = models.OneToOneField(MyRelModel, on_delete=models.DO_NOTHING)
objects = MyLegacyManager()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
try:
MyModel(o2o_id=42).o2o
except DatabaseError:
pass
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"use_for_related_fields is deprecated, "
"instead set Meta.base_manager_name on "
"'managers_regress.MyRelModel'.",
)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
try:
MyRelModel(pk=42).mymodel
except DatabaseError:
pass
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"use_for_related_fields is deprecated, "
"instead set Meta.base_manager_name on "
"'managers_regress.MyModel'.",
)
# With the new base_manager_name API there shouldn't be any warnings.
class MyRelModel2(models.Model):
objects = MyManager()
class Meta:
base_manager_name = 'objects'
class MyModel2(models.Model):
o2o = models.OneToOneField(MyRelModel2, on_delete=models.DO_NOTHING)
objects = MyManager()
class Meta:
base_manager_name = 'objects'
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
try:
MyModel2(o2o_id=42).o2o
except DatabaseError:
pass
try:
MyRelModel2(pk=42).mymodel2
except DatabaseError:
pass
self.assertEqual(len(warns), 0)
# When mixing base_manager_name and use_for_related_fields, there
# should be warnings.
class MyRelModel3(models.Model):
my_base_manager = MyManager()
my_default_manager = MyLegacyManager()
class Meta:
base_manager_name = 'my_base_manager'
default_manager_name = 'my_default_manager'
class MyModel3(models.Model):
o2o = models.OneToOneField(MyRelModel3, on_delete=models.DO_NOTHING)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
try:
MyModel3(o2o_id=42).o2o
except DatabaseError:
pass
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"use_for_related_fields is deprecated, "
"instead set Meta.base_manager_name on "
"'managers_regress.MyRelModel3'.",
)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RemovedInDjango20Warning)
self.assertIsInstance(MyModel3.o2o.get_queryset(), MyLegacyManagerQuerySet)
def test_legacy_objects_is_created(self):
class ConcreteParentWithoutManager(models.Model):
pass
class ConcreteParentWithManager(models.Model):
default = models.Manager()
class AbstractParent(models.Model):
default = models.Manager()
class Meta:
abstract = True
# Shouldn't complain since the inherited manager
# is basically the same that would have been created.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
class MyModel(ConcreteParentWithoutManager):
pass
self.assertEqual(len(warns), 0)
# Should create 'objects' (set as default) and warn that
# it will no longer be the case in the future.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
class MyModel2(ConcreteParentWithManager):
pass
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"Managers from concrete parents will soon qualify as default "
"managers. As a result, the 'objects' manager won't be created "
"(or recreated) automatically anymore on "
"'managers_regress.MyModel2' and 'default' declared on "
"'managers_regress.ConcreteParentWithManager' will be promoted "
"to default manager. You can declare explicitly "
"`objects = models.Manager()` on 'MyModel2' to keep things the "
"way they are or you can switch to the new behavior right away "
"by setting `Meta.manager_inheritance_from_future` to `True`.",
)
self.assertIs(MyModel2.objects, MyModel2._default_manager)
# When there is a local manager we shouldn't get any warning
# and 'objects' shouldn't be created.
class MyModel3(ConcreteParentWithManager):
default = models.Manager()
self.assertIs(MyModel3.default, MyModel3._default_manager)
self.assertIsNone(getattr(MyModel3, 'objects', None))
# When there is an inherited manager we shouldn't get any warning
# and 'objects' shouldn't be created.
class MyModel4(AbstractParent, ConcreteParentWithManager):
pass
self.assertIs(MyModel4.default, MyModel4._default_manager)
self.assertIsNone(getattr(MyModel4, 'objects', None))
# With `manager_inheritance_from_future = True` 'objects'
# shouldn't be created.
class MyModel5(ConcreteParentWithManager):
class Meta:
manager_inheritance_from_future = True
self.assertIs(MyModel5.default, MyModel5._default_manager)
self.assertIsNone(getattr(MyModel5, 'objects', None))
def test_legacy_default_manager_promotion(self):
class ConcreteParent(models.Model):
concrete = models.Manager()
class AbstractParent(models.Model):
abstract = models.Manager()
class Meta:
abstract = True
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always', RemovedInDjango20Warning)
class MyModel(ConcreteParent, AbstractParent):
pass
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"Managers from concrete parents will soon qualify as default "
"managers if they appear before any other managers in the "
"MRO. As a result, 'abstract' declared on "
"'managers_regress.AbstractParent' will no longer be the "
"default manager for 'managers_regress.MyModel' in favor of "
"'concrete' declared on 'managers_regress.ConcreteParent'. "
"You can redeclare 'abstract' on 'MyModel' to keep things the "
"way they are or you can switch to the new behavior right "
"away by setting `Meta.manager_inheritance_from_future` to "
"`True`.",
)
self.assertIs(MyModel.abstract, MyModel._default_manager)
class MyModel2(ConcreteParent, AbstractParent):
abstract = models.Manager()
self.assertIs(MyModel2.abstract, MyModel2._default_manager)
class MyModel3(ConcreteParent, AbstractParent):
class Meta:
manager_inheritance_from_future = True
self.assertIs(MyModel3.concrete, MyModel3._default_manager)
|
|
#!/usr/bin/python
import json
import boto3
import sys
import time
import timeit
from datetime import datetime, timedelta
# Pretty print a JSON object - for debugging
def pretty(d, indent=0):
for key, value in d.iteritems():
print '\t' * indent + str(key)
if isinstance(value, dict):
pretty(value, indent + 1)
else:
print '\t' * (indent + 1) + str(value)
def get_instance_info(emr, cluster_id):
info = emr.list_instance_groups(ClusterId=cluster_id)
master = []
worker = []
for instance in info['InstanceGroups']:
if instance['InstanceGroupType'] == "MASTER":
array = master
elif instance['InstanceGroupType'] == "CORE":
array = worker
array.append(instance['Status']['State'])
array.append(instance['RequestedInstanceCount'])
array.append(instance['RunningInstanceCount'])
return master, worker
if len(sys.argv) != 2:
print("usage: mrgeo-emr.py <config file>")
sys.exit(-1)
with open(sys.argv[1]) as configFile:
config = json.load(configFile)
emr_version = config["EmrVersion"]
use_zone = config["zone"]
zones = config["zones"]
machine = "Linux/UNIX"
master_type = config["MasterType"]
worker_type = config["WorkerType"]
worker_nodes = int(config["WorkerNodes"])
use_spot = int(config["Spot"])
install_accumulo = int(config["InstallAccumulo"])
key_name = config["Ec2KeyName"]
log_uri = config['LogUri']
start_time = datetime.today()
cluster_name = config["ClusterName"]
if (len(cluster_name) <= 0):
cluster_name = config["ClusterPrefix"] + start_time.strftime('%Y-%m-%d-%I:%M:%S') + time.tzname[
1] + "-" + worker_type + "-" + `worker_nodes`
emr = boto3.client("emr")
# Define bootstrap steps - these execute on every node in the cluster
install_gdal = {
'Name': 'Install GDAL',
'ScriptBootstrapAction': {
'Path': config["GDALBootstrap"],
'Args': []
}
}
install_opencv = {
'Name': 'Install OpenCV',
'ScriptBootstrapAction': {
'Path': config["OpenCVBootstrap"],
'Args': []
}
}
accumulo_bootstrap = {
'Name': 'Install Accumulo',
'ScriptBootstrapAction': {
'Path': 's3://elasticmapreduce.bootstrapactions/accumulo/1.6.1/install-accumulo_mj',
'Args': []
}
}
# Define setup steps - these execute only on the name node
install_mrgeo_step = {
'Name': 'Install MrGeo',
'ActionOnFailure': 'TERMINATE_CLUSTER',
'HadoopJarStep': {
'Jar': 's3://us-west-2.elasticmapreduce/libs/script-runner/script-runner.jar',
'Args': [config["InstallMrGeoStep"]]
}
}
bootstrap_actions = [install_gdal, install_opencv]
if (install_accumulo == 1):
bootstrap_actions.append(accumulo_bootstrap)
ec2 = boto3.client('ec2')
# Get the current spot price
if (use_spot == 1):
use_zone = None
spot_price = 9999
print("Checking spot prices")
for tryZone in zones:
zone_name = tryZone["zone"]
curr_time = datetime.utcnow()
back_minute = timedelta(seconds=-59)
minute_ago = curr_time + back_minute
# The following call returns a list of one element that looks like:
# [SpotPriceHistory(m3.xlarge):0.043500]
price_result = ec2.describe_spot_price_history(StartTime=minute_ago.isoformat(),
EndTime=curr_time.isoformat(),
InstanceTypes=[worker_type],
AvailabilityZone=zone_name,
ProductDescriptions=[machine])
if len(price_result['SpotPriceHistory']) > 0:
zone_spot_price = float(price_result['SpotPriceHistory'][0]['SpotPrice'])
print " " + zone_name + ": " + "{0:.3f}".format(zone_spot_price)
if (zone_spot_price < spot_price):
spot_price = zone_spot_price
use_zone = zone_name
bid_price = spot_price * 2
if use_zone is not None:
print "Using zone " + use_zone + " bidding: " + "{0:.3f}".format(bid_price)
else:
print "No spot pricing available. Try a different instance type."
sys.exit(-1)
instance_groups = []
instance_groups.append({
'InstanceCount': 1,
'InstanceRole': 'MASTER',
'InstanceType': master_type,
'Market': 'ON_DEMAND',
'Name': 'Main node'
})
if (use_spot == 1):
instance_groups.append({
'InstanceCount': worker_nodes,
'InstanceRole': 'CORE',
'InstanceType': worker_type,
'Market': 'SPOT',
'BidPrice': '{0:.3f}'.format(bid_price),
'Name': 'Worker nodes'
})
else:
instance_groups.append({
'InstanceCount': worker_nodes,
'InstanceRole': 'CORE',
'InstanceType': worker_type,
'Market': 'ON_DEMAND',
'Name': 'Worker nodes'
})
subnet_id = ""
for z in zones:
if (z["zone"] == use_zone):
subnet_id = z["subnetId"]
job = {}
job['Name'] = cluster_name
job['LogUri'] = log_uri
job['ReleaseLabel'] = emr_version
job['Instances'] = {
'InstanceGroups': instance_groups,
'Ec2KeyName': key_name,
'KeepJobFlowAliveWhenNoSteps': True,
'TerminationProtected': False,
'Ec2SubnetId': subnet_id
}
job['Steps'] = [install_mrgeo_step]
job['BootstrapActions'] = bootstrap_actions
job['Applications'] = [
# {
# "Name": "Hadoop"
# }
# ,
{
"Name": "Spark"
}
]
job['Configurations'] = []
configs = job['Configurations']
localConfigs = config['localConfiguration']
yarnsite = {"Classification": "yarn-site",
"Properties": {
"yarn.nodemanager.pmem-check-enabled": "false",
"yarn.nodemanager.vmem-check-enabled": "false",
"yarn.scheduler.minimum-allocation-mb": "1024",
"yarn.nodemanager.aux-services": "spark_shuffle",
"yarn.nodemanager.aux-services.spark_shuffle.class": "org.apache.spark.network.yarn.YarnShuffleService"
},
"Configurations": []
}
yarnsiteprops = yarnsite['Properties']
if "yarn-site" in localConfigs:
# print(localConfigs['yarn-site'])
for k, v in localConfigs['yarn-site'].iteritems():
yarnsiteprops[k] = v
configs.append(yarnsite)
mapredsite = {"Classification": "mapred-site",
"Properties": {
},
"Configurations": []
}
mapredsiteprops = mapredsite['Properties']
if "mapred-site" in localConfigs:
for k, v in localConfigs['mapred-site'].iteritems():
mapredsiteprops[k] = v
configs.append(mapredsite)
# no core-site here, check the local config section
if "core-site" in localConfigs:
coresite = {"Classification": "core-site",
"Properties": {
"fs.s3n.multipart.uploads.enabled": "false",
"fs.s3.buckets.create.enabled": "false"
},
"Configurations": []
}
coresiteprops = coresite['Properties']
for k, v in localConfigs['core-site'].iteritems():
coresiteprops[k] = v
configs.append(coresite)
sparkdefaults = {"Classification": "spark-defaults",
"Properties": {
"spark.yarn.jar": "/usr/lib/spark/lib/spark-assembly.jar",
"spark.network.timeout": "600s",
"spark.driver.maxResultSize": "0",
"spark.buffer.pageSize":"4m"
},
"Configurations": []
}
sparkdefaultsprops = sparkdefaults['Properties']
if "spark-defaults" in localConfigs:
for k, v in localConfigs['spark-defaults'].iteritems():
sparkdefaultsprops[k] = v
configs.append(sparkdefaults)
hadoopenv = {"Classification": "hadoop-env",
"Properties": {},
"Configurations": [{
"Classification": "export",
"Configurations": [],
"Properties": {
"JAVA_HOME": "/usr/lib/jvm/java-1.8.0"
}
}
]
}
configs.append(hadoopenv)
sparkenv = {"Classification": "spark-env",
"Properties": {},
"Configurations": [{
"Classification": "export",
"Configurations": [],
"Properties": {
"JAVA_HOME": "/usr/lib/jvm/java-1.8.0"
}
}
]
}
configs.append(sparkenv)
job['JobFlowRole'] = 'EMR_EC2_DefaultRole'
job['ServiceRole'] = 'EMR_DefaultRole'
job['VisibleToAllUsers'] = True
job['Tags'] = config["tags"]
response = emr.run_job_flow(**job)
cluster_id = response['JobFlowId']
cluster_info = emr.describe_cluster(ClusterId=cluster_id)
status = cluster_info['Cluster']['Status']
curr_state = "CREATION"
state_start_time = timeit.default_timer()
print(cluster_name + ' (' + cluster_id + ') - ' + start_time.strftime('%I:%M:%S %p'))
master_count = -1
worker_count = -1
while (status['State'] != 'TERMINATED' and status['State'] != 'TERMINATED_WITH_ERRORS'):
cluster_info = emr.describe_cluster(ClusterId=cluster_id)
master_info, worker_info = get_instance_info(emr, cluster_id)
status = cluster_info['Cluster']['Status']
if status['State'] != curr_state:
curr_time = timeit.default_timer()
print(' ' + status['State'] + ' - ' + datetime.today().strftime(
'%I:%M:%S %p') + ' ' + curr_state + ' took {:.0f}'.format(curr_time - state_start_time) + ' sec')
curr_state = status['State']
state_start_time = curr_time
if master_count != master_info[2] or worker_count != worker_info[2]:
print(' Master node ' + str(master_info[2]) + '/' + str(master_info[1]) + ' Worker nodes ' + str(
worker_info[2]) + '/' + str(worker_info[1]))
master_count = master_info[2]
worker_count = worker_info[2]
if status['State'] != 'WAITING':
time.sleep(5) # Wait before checking status again
else:
time.sleep(30)
end_time = datetime.today()
total_sec = (end_time - start_time).total_seconds()
hours, remainder = divmod(total_sec, 3600)
minutes, seconds = divmod(remainder, 60)
print(cluster_name + ' ran {:.0f}:{:02.0f}:{:02.0f}'.format(hours, minutes, seconds))
|
|
from datetime import datetime
import re
import traceback
import warnings
import numpy as np
import pandas as pd
from collections import defaultdict
from pandas.tslib import OutOfBoundsDatetime
from .core import indexing, ops, utils
from .core.formatting import format_timestamp, first_n_items
from .core.variable import as_variable, Variable
from .core.pycompat import (iteritems, bytes_type, unicode_type, OrderedDict,
PY3)
# standard calendars recognized by netcdftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
def mask_and_scale(array, fill_value=None, scale_factor=None, add_offset=None,
dtype=float):
"""Scale and mask array values according to CF conventions for packed and
missing values
First, values equal to the fill_value are replaced by NaN. Then, new values
are given by the formula:
original_values * scale_factor + add_offset
Parameters
----------
array : array-like
Original array of values to wrap
fill_value : number, optional
All values equal to fill_value in the original array are replaced
by NaN.
scale_factor : number, optional
Multiply entries in the original array by this number.
add_offset : number, optional
After applying scale_factor, add this number to entries in the
original array.
Returns
-------
scaled : np.ndarray
Array of masked and scaled values.
References
----------
http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html
"""
# by default, cast to float to ensure NaN is meaningful
values = np.array(array, dtype=dtype, copy=True)
if fill_value is not None and not pd.isnull(fill_value):
if values.ndim > 0:
values[values == fill_value] = np.nan
elif values == fill_value:
values = np.array(np.nan)
if scale_factor is not None:
values *= scale_factor
if add_offset is not None:
values += add_offset
return values
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_netcdf4(num_dates, units, calendar):
import netCDF4 as nc4
dates = np.asarray(nc4.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678
or dates[np.nanargmax(num_dates)].year >= 2262):
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'netCDF4.datetime objects instead, reason: dates out'
' of range', RuntimeWarning, stacklevel=3)
else:
try:
dates = nctime_to_nptime(dates)
except ValueError as e:
warnings.warn('Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy netCDF4.datetime objects instead, reason:'
'{0}'.format(e), RuntimeWarning, stacklevel=3)
return dates
def decode_cf_datetime(num_dates, units, calendar=None):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than netCDF4.num2date. In such a
case, the returned array will be of type np.datetime64.
See also
--------
netCDF4.num2date
"""
num_dates = np.asarray(num_dates, dtype=float)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
ref_date = pd.Timestamp(ref_date)
dates = (pd.to_timedelta(flat_num_dates, delta) + ref_date).values
# ValueError is raised by pd.Timestamp for non-ISO timestamp strings,
# in which case we fall back to using netCDF4
except (OutOfBoundsDatetime, ValueError, OverflowError):
dates = _decode_datetime_with_netcdf4(flat_num_dates, units, calendar)
return dates.reshape(num_dates.shape)
def _asarray_or_scalar(x):
x = np.asarray(x)
if x.ndim > 0:
return x
else:
return x.item()
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = _asarray_or_scalar(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit, delta in [('days', 86400), ('hours', 3600),
('minutes', 60), ('seconds', 1)]:
unit_delta = np.timedelta64(10 ** 9 * delta, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = pd.to_datetime(np.asarray(dates).ravel(), box=False)
unique_timedeltas = np.unique(np.diff(dates[pd.notnull(dates)]))
units = _infer_time_units_from_diff(unique_timedeltas)
return '%s since %s' % (units, pd.Timestamp(dates[0]))
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def nctime_to_nptime(times):
"""Given an array of netCDF4.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype='M8[ns]')
for i, t in np.ndenumerate(times):
dt = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = '%s since %s' % (delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_netcdf4(dates, units, calendar):
"""Fallback method for encoding dates using netCDF4-python.
This method is more flexible than xray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
import netCDF4 as nc4
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype('M8[us]').astype(datetime)
def encode_datetime(d):
return np.nan if d is None else nc4.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF complient time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
netCDF4.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = 'proleptic_gregorian'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == 'O':
# parse with netCDF4 instead
raise OutOfBoundsDatetime
assert dates.dtype == 'datetime64[ns]'
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype('timedelta64[ns]')
ref_date = np.datetime64(pd.Timestamp(ref_date))
num = (dates - ref_date) / time_delta
except (OutOfBoundsDatetime, ValueError, OverflowError):
num = _encode_datetime_with_netcdf4(dates, units, calendar)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where(pd.isnull(timedeltas), np.nan, num)
int_num = np.asarray(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return (num, units)
class MaskedAndScaledArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessesed, are automatically scaled and masked according to
CF conventions for packed and missing data values.
New values are given by the formula:
original_values * scale_factor + add_offset
Values can only be accessed via `__getitem__`:
>>> x = MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]), -99, 0.01, 1)
>>> x
MaskedAndScaledArray(array([-99, -1, 0, 1, 2]), fill_value=-99,
scale_factor=0.01, add_offset=1)
>>> x[:]
array([ nan, 0.99, 1. , 1.01, 1.02]
References
----------
http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html
"""
def __init__(self, array, fill_value=None, scale_factor=None,
add_offset=None, dtype=float):
"""
Parameters
----------
array : array-like
Original array of values to wrap
fill_value : number, optional
All values equal to fill_value in the original array are replaced
by NaN.
scale_factor : number, optional
Multiply entries in the original array by this number.
add_offset : number, optional
After applying scale_factor, add this number to entries in the
original array.
"""
self.array = array
self.fill_value = fill_value
self.scale_factor = scale_factor
self.add_offset = add_offset
self._dtype = dtype
@property
def dtype(self):
return np.dtype(self._dtype)
def __getitem__(self, key):
return mask_and_scale(self.array[key], self.fill_value,
self.scale_factor, self.add_offset, self._dtype)
def __repr__(self):
return ("%s(%r, fill_value=%r, scale_factor=%r, add_offset=%r, "
"dtype=%r)" %
(type(self).__name__, self.array, self.fill_value,
self.scale_factor, self.add_offset, self._dtype))
class DecodedCFDatetimeArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessed, are automatically converted into datetime objects
using decode_cf_datetime.
"""
def __init__(self, array, units, calendar=None):
self.array = array
self.units = units
self.calendar = calendar
# Verify at least one date can be decoded successfully.
# Otherwise, tracebacks end up swallowed by Dataset.__repr__ when users
# try to view their lazily decoded array.
example_value = first_n_items(array, 1) or 0
try:
result = decode_cf_datetime(example_value, units, calendar)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
self._dtype = getattr(result, 'dtype', np.dtype('object'))
@property
def dtype(self):
return self._dtype
def __getitem__(self, key):
return decode_cf_datetime(self.array[key], units=self.units,
calendar=self.calendar)
class DecodedCFTimedeltaArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessed, are automatically converted into timedelta objects
using decode_cf_timedelta.
"""
def __init__(self, array, units):
self.array = array
self.units = units
@property
def dtype(self):
return np.dtype('timedelta64[ns]')
def __getitem__(self, key):
return decode_cf_timedelta(self.array[key], units=self.units)
class CharToStringArray(utils.NDArrayMixin):
"""Wrapper around array-like objects to create a new indexable object where
values, when accessed, are automatically concatenated along the last
dimension.
>>> CharToStringArray(np.array(['a', 'b', 'c']))[:]
array('abc',
dtype='|S3')
"""
def __init__(self, array):
"""
Parameters
----------
array : array-like
Original array of values to wrap.
"""
self.array = array
@property
def dtype(self):
return np.dtype('S' + str(self.array.shape[-1]))
@property
def shape(self):
return self.array.shape[:-1]
def __str__(self):
if self.ndim == 0:
# always return a unicode str if it's a single item for py3 compat
return self[...].item().decode('utf-8')
else:
return repr(self)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.array)
def __getitem__(self, key):
if self.array.ndim == 0:
values = self.array[key]
else:
# require slicing the last dimension completely
key = indexing.expanded_indexer(key, self.array.ndim)
if key[-1] != slice(None):
raise IndexError('too many indices')
values = char_to_string(self.array[key])
return values
class NativeEndiannessArray(utils.NDArrayMixin):
"""Decode arrays on the fly from non-native to native endianness
This is useful for decoding arrays from netCDF3 files (which are all
big endian) into native endianness, so they can be used with Cython
functions, such as those found in bottleneck and pandas.
>>> x = np.arange(5, dtype='>i2')
>>> x.dtype
dtype('>i2')
>>> NativeEndianArray(x).dtype
dtype('int16')
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""
def __init__(self, array):
self.array = array
@property
def dtype(self):
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
def string_to_char(arr):
"""Like netCDF4.stringtochar, but faster and more flexible.
"""
# ensure the array is contiguous
arr = np.array(arr, copy=False, order='C')
kind = arr.dtype.kind
if kind not in ['U', 'S']:
raise ValueError('argument must be a string')
return arr.reshape(arr.shape + (1,)).view(kind + '1')
def char_to_string(arr):
"""Like netCDF4.chartostring, but faster and more flexible.
"""
# based on: http://stackoverflow.com/a/10984878/809705
arr = np.array(arr, copy=False, order='C')
kind = arr.dtype.kind
if kind not in ['U', 'S']:
raise ValueError('argument must be a string')
return arr.view(kind + str(arr.shape[-1]))[..., 0]
def safe_setitem(dest, key, value):
if key in dest:
raise ValueError('Failed hard to prevent overwriting key %r' % key)
dest[key] = value
def pop_to(source, dest, key, default=None):
"""
A convenience function which pops a key k from source to dest.
None values are not passed on. If k already exists in dest an
error is raised.
"""
value = source.pop(key, None)
if value is not None:
safe_setitem(dest, key, value)
return value
def _var_as_tuple(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def maybe_encode_datetime(var):
if np.issubdtype(var.dtype, np.datetime64):
dims, data, attrs, encoding = _var_as_tuple(var)
(data, units, calendar) = encode_cf_datetime(
data, encoding.pop('units', None), encoding.pop('calendar', None))
safe_setitem(attrs, 'units', units)
safe_setitem(attrs, 'calendar', calendar)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_encode_timedelta(var):
if np.issubdtype(var.dtype, np.timedelta64):
dims, data, attrs, encoding = _var_as_tuple(var)
data, units = encode_cf_timedelta(
data, encoding.pop('units', None))
safe_setitem(attrs, 'units', units)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_encode_offset_and_scale(var, needs_copy=True):
if any(k in var.encoding for k in ['add_offset', 'scale_factor']):
dims, data, attrs, encoding = _var_as_tuple(var)
data = data.astype(dtype=float, copy=needs_copy)
needs_copy = False
if 'add_offset' in encoding:
data -= pop_to(encoding, attrs, 'add_offset')
if 'scale_factor' in encoding:
data /= pop_to(encoding, attrs, 'scale_factor')
var = Variable(dims, data, attrs, encoding)
return var, needs_copy
def maybe_encode_fill_value(var, needs_copy=True):
# replace NaN with the fill value
if '_FillValue' in var.encoding:
dims, data, attrs, encoding = _var_as_tuple(var)
fill_value = pop_to(encoding, attrs, '_FillValue')
if not pd.isnull(fill_value):
data = ops.fillna(data, fill_value)
needs_copy = False
var = Variable(dims, data, attrs, encoding)
return var, needs_copy
def maybe_encode_dtype(var):
if 'dtype' in var.encoding:
dims, data, attrs, encoding = _var_as_tuple(var)
dtype = np.dtype(encoding.pop('dtype'))
if dtype != var.dtype and dtype.kind != 'O':
if np.issubdtype(dtype, int):
data = ops.around(data)
if dtype == 'S1' and data.dtype != 'S1':
data = string_to_char(np.asarray(data, 'S'))
dims = dims + ('string%s' % data.shape[-1],)
data = data.astype(dtype=dtype)
var = Variable(dims, data, attrs, encoding)
return var
def _infer_dtype(array):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.size == 0:
dtype = np.dtype(float)
else:
dtype = np.array(array[(0,) * array.ndim]).dtype
if dtype.kind in ['S', 'U']:
# don't just use inferred dtype to avoid truncating arrays to
# the length of their first element
dtype = np.dtype(dtype.kind)
elif dtype.kind == 'O':
raise ValueError('unable to infer dtype; xray cannot '
'serialize arbitrary Python objects')
return dtype
def ensure_dtype_not_object(var):
# TODO: move this from conventions to backends? (it's not CF related)
if var.dtype.kind == 'O':
dims, data, attrs, encoding = _var_as_tuple(var)
missing = pd.isnull(data)
if missing.any():
# nb. this will fail for dask.array data
non_missing_values = data[~missing]
inferred_dtype = _infer_dtype(non_missing_values)
if inferred_dtype.kind in ['S', 'U']:
# There is no safe bit-pattern for NA in typical binary string
# formats, we so can't set a fill_value. Unfortunately, this
# means we won't be able to restore string arrays with missing
# values.
fill_value = ''
else:
# insist on using float for numeric values
if not np.issubdtype(inferred_dtype, float):
inferred_dtype = np.dtype(float)
fill_value = np.nan
data = np.array(data, dtype=inferred_dtype, copy=True)
data[missing] = fill_value
else:
data = data.astype(dtype=_infer_dtype(data))
var = Variable(dims, data, attrs, encoding)
return var
def encode_cf_variable(var, needs_copy=True):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xray.Variable
A variable holding un-encoded data.
Returns
-------
out : xray.Variable
A variable which has been encoded as described above.
"""
var = maybe_encode_datetime(var)
var = maybe_encode_timedelta(var)
var, needs_copy = maybe_encode_offset_and_scale(var, needs_copy)
var, needs_copy = maybe_encode_fill_value(var, needs_copy)
var = maybe_encode_dtype(var)
var = ensure_dtype_not_object(var)
return var
def decode_cf_variable(var, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_endianness=True):
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var
"""
# use _data instead of data so as not to trigger loading data
var = as_variable(var)
data = var._data
dimensions = var.dims
attributes = var.attrs.copy()
encoding = var.encoding.copy()
original_dtype = data.dtype
if concat_characters:
if data.dtype.kind == 'S' and data.dtype.itemsize == 1:
dimensions = dimensions[:-1]
data = CharToStringArray(data)
if mask_and_scale:
if 'missing_value' in attributes:
# missing_value is deprecated, but we still want to support it as
# an alias for _FillValue.
if ('_FillValue' in attributes
and not utils.equivalent(attributes['_FillValue'],
attributes['missing_value'])):
raise ValueError("Discovered conflicting _FillValue "
"and missing_value. Considering "
"opening the offending dataset using "
"decode_cf=False, corrected the attributes",
"and decoding explicitly using "
"xray.conventions.decode_cf(ds)")
attributes['_FillValue'] = attributes.pop('missing_value')
fill_value = pop_to(attributes, encoding, '_FillValue')
scale_factor = pop_to(attributes, encoding, 'scale_factor')
add_offset = pop_to(attributes, encoding, 'add_offset')
if ((fill_value is not None and not pd.isnull(fill_value))
or scale_factor is not None or add_offset is not None):
if isinstance(fill_value, (bytes_type, unicode_type)):
dtype = object
else:
dtype = float
data = MaskedAndScaledArray(data, fill_value, scale_factor,
add_offset, dtype)
if decode_times and 'units' in attributes:
if 'since' in attributes['units']:
# datetime
units = pop_to(attributes, encoding, 'units')
calendar = pop_to(attributes, encoding, 'calendar')
data = DecodedCFDatetimeArray(data, units, calendar)
elif attributes['units'] in TIME_UNITS:
# timedelta
units = pop_to(attributes, encoding, 'units')
data = DecodedCFTimedeltaArray(data, units)
if decode_endianness and not data.dtype.isnative:
# do this last, so it's only done if we didn't already unmask/scale
data = NativeEndiannessArray(data)
original_dtype = data.dtype
if 'dtype' in encoding:
if original_dtype != encoding['dtype']:
warnings.warn("CF decoding is overwriting dtype")
else:
encoding['dtype'] = original_dtype
return Variable(dimensions, indexing.LazilyIndexedArray(data),
attributes, encoding=encoding)
def decode_cf_variables(variables, attributes, concat_characters=True,
mask_and_scale=True, decode_times=True,
decode_coords=True):
"""
Decode a several CF encoded variables.
See: decode_cf_variable
"""
dimensions_used_by = defaultdict(list)
for v in variables.values():
for d in v.dims:
dimensions_used_by[d].append(v)
def stackable(dim):
# figure out if a dimension can be concatenated over
if dim in variables:
return False
for v in dimensions_used_by[dim]:
if v.dtype.kind != 'S' or dim != v.dims[-1]:
return False
return True
coord_names = set()
new_vars = OrderedDict()
for k, v in iteritems(variables):
concat = (concat_characters and v.dtype.kind == 'S' and v.ndim > 0 and
stackable(v.dims[-1]))
new_vars[k] = decode_cf_variable(
v, concat_characters=concat, mask_and_scale=mask_and_scale,
decode_times=decode_times)
if decode_coords:
var_attrs = new_vars[k].attrs
if 'coordinates' in var_attrs:
var_coord_names = var_attrs['coordinates'].split()
if all(k in variables for k in var_coord_names):
coord_names.update(var_coord_names)
del var_attrs['coordinates']
if decode_coords and 'coordinates' in attributes:
attributes = OrderedDict(attributes)
coord_names.update(attributes.pop('coordinates').split())
return new_vars, attributes, coord_names
def decode_cf(obj, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_coords=True):
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since 'hours since 2000-01-01') to
np.datetime64.
decode_coords : bool, optional
Use the 'coordinates' attribute on variable (or the dataset itself) to
identify coordinates.
Returns
-------
decoded : Dataset
"""
from .core.dataset import Dataset
from .backends.common import AbstractDataStore
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
file_obj = obj._file_obj
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
file_obj = obj
else:
raise TypeError('can only decode Dataset or DataStore objects')
vars, attrs, coord_names = decode_cf_variables(
vars, attrs, concat_characters, mask_and_scale, decode_times,
decode_coords)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords))
ds._file_obj = file_obj
return ds
def cf_decoder(variables, attributes,
concat_characters=True, mask_and_scale=True,
decode_times=True):
"""
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times)
return variables, attributes
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (k not in non_dim_coord_names and k not in v.dims
and any(d in target_dims for d in v.dims)):
variable_coordinates[k].add(coord_name)
global_coordinates.discard(coord_name)
variables = OrderedDict((k, v.copy(deep=False))
for k, v in variables.items())
# These coordinates are saved according to CF conventions
for var_name, coord_names in variable_coordinates.items():
attrs = variables[var_name].attrs
if 'coordinates' in attrs:
raise ValueError('cannot serialize coordinates because variable '
"%s already has an attribute 'coordinates'"
% var_name)
attrs['coordinates'] = ' '.join(map(str, coord_names))
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/057771.html
if global_coordinates:
attributes = OrderedDict(attributes)
if 'coordinates' in attributes:
raise ValueError('cannot serialize coordinates because the global '
"attribute 'coordinates' already exists")
attributes['coordinates'] = ' '.join(map(str, global_coordinates))
return variables, attributes
def encode_dataset_coordinates(dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(dataset._variables, dataset.attrs,
non_dim_coord_names=non_dim_coord_names)
def cf_encoder(variables, attributes):
"""
A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable
"""
new_vars = OrderedDict((k, encode_cf_variable(v))
for k, v in iteritems(variables))
return new_vars, attributes
|
|
"""Local Media Source Implementation."""
from __future__ import annotations
import logging
import mimetypes
from pathlib import Path
import shutil
from aiohttp import web
from aiohttp.web_request import FileField
import voluptuous as vol
from homeassistant.components import http, websocket_api
from homeassistant.components.media_player.const import MEDIA_CLASS_DIRECTORY
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import Unauthorized
from homeassistant.util import raise_if_invalid_filename, raise_if_invalid_path
from .const import DOMAIN, MEDIA_CLASS_MAP, MEDIA_MIME_TYPES
from .error import Unresolvable
from .models import BrowseMediaSource, MediaSource, MediaSourceItem, PlayMedia
MAX_UPLOAD_SIZE = 1024 * 1024 * 10
LOGGER = logging.getLogger(__name__)
@callback
def async_setup(hass: HomeAssistant) -> None:
"""Set up local media source."""
source = LocalSource(hass)
hass.data[DOMAIN][DOMAIN] = source
hass.http.register_view(LocalMediaView(hass, source))
hass.http.register_view(UploadMediaView(hass, source))
websocket_api.async_register_command(hass, websocket_remove_media)
class LocalSource(MediaSource):
"""Provide local directories as media sources."""
name: str = "Local Media"
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize local source."""
super().__init__(DOMAIN)
self.hass = hass
@callback
def async_full_path(self, source_dir_id: str, location: str) -> Path:
"""Return full path."""
return Path(self.hass.config.media_dirs[source_dir_id], location)
@callback
def async_parse_identifier(self, item: MediaSourceItem) -> tuple[str, str]:
"""Parse identifier."""
if item.domain != DOMAIN:
raise Unresolvable("Unknown domain.")
source_dir_id, _, location = item.identifier.partition("/")
if source_dir_id not in self.hass.config.media_dirs:
raise Unresolvable("Unknown source directory.")
try:
raise_if_invalid_path(location)
except ValueError as err:
raise Unresolvable("Invalid path.") from err
return source_dir_id, location
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media to a url."""
source_dir_id, location = self.async_parse_identifier(item)
path = self.async_full_path(source_dir_id, location)
mime_type, _ = mimetypes.guess_type(str(path))
assert isinstance(mime_type, str)
return PlayMedia(f"/media/{item.identifier}", mime_type)
async def async_browse_media(self, item: MediaSourceItem) -> BrowseMediaSource:
"""Return media."""
if item.identifier:
try:
source_dir_id, location = self.async_parse_identifier(item)
except Unresolvable as err:
raise BrowseError(str(err)) from err
else:
source_dir_id, location = None, ""
result = await self.hass.async_add_executor_job(
self._browse_media, source_dir_id, location
)
return result
def _browse_media(
self, source_dir_id: str | None, location: str
) -> BrowseMediaSource:
"""Browse media."""
# If only one media dir is configured, use that as the local media root
if source_dir_id is None and len(self.hass.config.media_dirs) == 1:
source_dir_id = list(self.hass.config.media_dirs)[0]
# Multiple folder, root is requested
if source_dir_id is None:
if location:
raise BrowseError("Folder not found.")
base = BrowseMediaSource(
domain=DOMAIN,
identifier="",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type=None,
title=self.name,
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_DIRECTORY,
)
base.children = [
self._browse_media(source_dir_id, "")
for source_dir_id in self.hass.config.media_dirs
]
return base
full_path = Path(self.hass.config.media_dirs[source_dir_id], location)
if not full_path.exists():
if location == "":
raise BrowseError("Media directory does not exist.")
raise BrowseError("Path does not exist.")
if not full_path.is_dir():
raise BrowseError("Path is not a directory.")
result = self._build_item_response(source_dir_id, full_path)
if not result:
raise BrowseError("Unknown source directory.")
return result
def _build_item_response(
self, source_dir_id: str, path: Path, is_child: bool = False
) -> BrowseMediaSource | None:
mime_type, _ = mimetypes.guess_type(str(path))
is_file = path.is_file()
is_dir = path.is_dir()
# Make sure it's a file or directory
if not is_file and not is_dir:
return None
# Check that it's a media file
if is_file and (
not mime_type or mime_type.split("/")[0] not in MEDIA_MIME_TYPES
):
return None
title = path.name
media_class = MEDIA_CLASS_DIRECTORY
if mime_type:
media_class = MEDIA_CLASS_MAP.get(
mime_type.split("/")[0], MEDIA_CLASS_DIRECTORY
)
media = BrowseMediaSource(
domain=DOMAIN,
identifier=f"{source_dir_id}/{path.relative_to(self.hass.config.media_dirs[source_dir_id])}",
media_class=media_class,
media_content_type=mime_type or "",
title=title,
can_play=is_file,
can_expand=is_dir,
)
if is_file or is_child:
return media
# Append first level children
media.children = []
for child_path in path.iterdir():
child = self._build_item_response(source_dir_id, child_path, True)
if child:
media.children.append(child)
# Sort children showing directories first, then by name
media.children.sort(key=lambda child: (child.can_play, child.title))
return media
class LocalMediaView(http.HomeAssistantView):
"""
Local Media Finder View.
Returns media files in config/media.
"""
url = "/media/{source_dir_id}/{location:.*}"
name = "media"
def __init__(self, hass: HomeAssistant, source: LocalSource) -> None:
"""Initialize the media view."""
self.hass = hass
self.source = source
async def get(
self, request: web.Request, source_dir_id: str, location: str
) -> web.FileResponse:
"""Start a GET request."""
try:
raise_if_invalid_path(location)
except ValueError as err:
raise web.HTTPBadRequest() from err
if source_dir_id not in self.hass.config.media_dirs:
raise web.HTTPNotFound()
media_path = self.source.async_full_path(source_dir_id, location)
# Check that the file exists
if not media_path.is_file():
raise web.HTTPNotFound()
# Check that it's a media file
mime_type, _ = mimetypes.guess_type(str(media_path))
if not mime_type or mime_type.split("/")[0] not in MEDIA_MIME_TYPES:
raise web.HTTPNotFound()
return web.FileResponse(media_path)
class UploadMediaView(http.HomeAssistantView):
"""View to upload images."""
url = "/api/media_source/local_source/upload"
name = "api:media_source:local_source:upload"
def __init__(self, hass: HomeAssistant, source: LocalSource) -> None:
"""Initialize the media view."""
self.hass = hass
self.source = source
self.schema = vol.Schema(
{
"media_content_id": str,
"file": FileField,
}
)
async def post(self, request: web.Request) -> web.Response:
"""Handle upload."""
if not request["hass_user"].is_admin:
raise Unauthorized()
# Increase max payload
request._client_max_size = MAX_UPLOAD_SIZE # pylint: disable=protected-access
try:
data = self.schema(dict(await request.post()))
except vol.Invalid as err:
LOGGER.error("Received invalid upload data: %s", err)
raise web.HTTPBadRequest() from err
try:
item = MediaSourceItem.from_uri(self.hass, data["media_content_id"])
except ValueError as err:
LOGGER.error("Received invalid upload data: %s", err)
raise web.HTTPBadRequest() from err
try:
source_dir_id, location = self.source.async_parse_identifier(item)
except Unresolvable as err:
LOGGER.error("Invalid local source ID")
raise web.HTTPBadRequest() from err
uploaded_file: FileField = data["file"]
if not uploaded_file.content_type.startswith(("image/", "video/", "audio/")):
LOGGER.error("Content type not allowed")
raise vol.Invalid("Only images and video are allowed")
try:
raise_if_invalid_filename(uploaded_file.filename)
except ValueError as err:
LOGGER.error("Invalid filename")
raise web.HTTPBadRequest() from err
try:
await self.hass.async_add_executor_job(
self._move_file,
self.source.async_full_path(source_dir_id, location),
uploaded_file,
)
except ValueError as err:
LOGGER.error("Moving upload failed: %s", err)
raise web.HTTPBadRequest() from err
return self.json(
{"media_content_id": f"{data['media_content_id']}/{uploaded_file.filename}"}
)
def _move_file( # pylint: disable=no-self-use
self, target_dir: Path, uploaded_file: FileField
) -> None:
"""Move file to target."""
if not target_dir.is_dir():
raise ValueError("Target is not an existing directory")
target_path = target_dir / uploaded_file.filename
target_path.relative_to(target_dir)
raise_if_invalid_path(str(target_path))
with target_path.open("wb") as target_fp:
shutil.copyfileobj(uploaded_file.file, target_fp)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_source/local_source/remove",
vol.Required("media_content_id"): str,
}
)
@websocket_api.require_admin
@websocket_api.async_response
async def websocket_remove_media(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Remove media."""
try:
item = MediaSourceItem.from_uri(hass, msg["media_content_id"])
except ValueError as err:
connection.send_error(msg["id"], websocket_api.ERR_INVALID_FORMAT, str(err))
return
source: LocalSource = hass.data[DOMAIN][DOMAIN]
try:
source_dir_id, location = source.async_parse_identifier(item)
except Unresolvable as err:
connection.send_error(msg["id"], websocket_api.ERR_INVALID_FORMAT, str(err))
return
item_path = source.async_full_path(source_dir_id, location)
def _do_delete() -> tuple[str, str] | None:
if not item_path.exists():
return websocket_api.ERR_NOT_FOUND, "Path does not exist"
if not item_path.is_file():
return websocket_api.ERR_NOT_SUPPORTED, "Path is not a file"
item_path.unlink()
return None
try:
error = await hass.async_add_executor_job(_do_delete)
except OSError as err:
error = (websocket_api.ERR_UNKNOWN_ERROR, str(err))
if error:
connection.send_error(msg["id"], *error)
else:
connection.send_result(msg["id"])
|
|
"""
Make sure that Pipe and Pipeline classes work
TODO: Make below code work
if not len_pipe.has_next():
break
"""
from django.test import override_settings
from django.test import TestCase
from ozpcenter.recommend.graph_factory import GraphFactory
from ozpcenter.recommend import recommend_utils
from ozpcenter.pipe import pipes
from ozpcenter.pipe import pipeline
from ozpcenter.recommend.graph import Graph
@override_settings(ES_ENABLED=False)
class PipelineTest(TestCase):
def setUp(self):
"""
setUp is invoked before each test method
"""
self.graph_test_1 = Graph()
self.graph_test_1.add_vertex('test_label', {'test_field': 1})
self.graph_test_1.add_vertex('test_label', {'test_field': 2})
self.graph_test_2 = Graph()
self.graph_test_2.add_vertex('test_label', {'test_field': 8})
self.graph_test_2.add_vertex('test_label', {'test_field': 10})
self.graph_test_2.add_vertex('test_label', {'test_field': 12, 'time': 'now'})
@classmethod
def setUpTestData(cls):
"""
Set up test data for the whole TestCase (only run once for the TestCase)
"""
pass
def _iterate_pipeline(self, current_pipeline):
list_out = []
try:
while current_pipeline.has_next():
current_object = current_pipeline.next()
list_out.append(current_object)
except recommend_utils.FastNoSuchElementException:
# Ignore FastNoSuchElementException
pass
return list_out
def test_pipeline_limit(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.LimitPipe(5)])
self.assertEqual(pipeline_test.to_list(), [1, 2, 3, 4, 5])
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.LimitPipe(2)])
self.assertEqual(pipeline_test.to_list(), [1, 2])
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3]),
[pipes.LimitPipe(5)])
self.assertEqual(pipeline_test.to_list(), [1, 2, 3])
def test_pipeline_exclude_limit(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
self.assertEqual(pipeline_test.to_list(), [2, 3, 4, 5, 6])
def test_pipeline_capitalize(self):
caps_pipe = pipes.CapitalizePipe()
pipeline_test = pipeline.Pipeline()
pipeline_test.add_pipe(caps_pipe)
pipeline_test.set_starts(recommend_utils.ListIterator(['this', 'is', 'the', 'test']))
list_out = self._iterate_pipeline(pipeline_test)
self.assertEqual(list_out, ['THIS', 'IS', 'THE', 'TEST'])
def test_pipeline_capitalize_len(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator(['this', 'is', 'the', 'test']),
[pipes.CapitalizePipe(),
pipes.LenPipe()])
list_out = self._iterate_pipeline(pipeline_test)
self.assertEqual(list_out, [4, 2, 3, 4])
def test_pipeline_capitalize_len_list(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator(['this', 'is', 'the', 'test']),
[pipes.CapitalizePipe(),
pipes.LenPipe()])
self.assertEqual(pipeline_test.to_list(), [4, 2, 3, 4])
def test_pipeline_graph_vertex_while(self):
pipeline_test = pipeline.Pipeline(self.graph_test_1.get_vertices_iterator(),
[pipes.GraphVertexPipe()])
list_out = self._iterate_pipeline(pipeline_test)
self.assertEquals(str(list_out), '[Vertex(test_label), Vertex(test_label)]')
# self.assertEqual(list_out, [1, 2])
def test_pipeline_to_list_exception(self):
pipeline_test = pipeline.Pipeline()
with self.assertRaisesRegex(Exception, 'No Start Iterator set') as err:
pipeline_test.to_list()
def test_pipeline_graph_vertex_chain_to_list(self):
pipeline_test = pipeline.Pipeline(self.graph_test_1.get_vertices_iterator(),
[pipes.GraphVertexPipe(),
pipes.ElementIdPipe()])
self.assertEqual(pipeline_test.to_list(), [1, 2])
self.assertEqual(str(pipeline_test), '[DictKeyValueIterator(2), GraphVertexPipe(), ElementIdPipe()]')
def test_pipeline_graph_vertex_chain_dict_to_list(self):
pipeline_test = pipeline.Pipeline(self.graph_test_2.get_vertices_iterator(),
[pipes.GraphVertexPipe(),
pipes.ElementPropertiesPipe()])
expected_output = [
{'test_field': 8},
{'test_field': 10},
{'test_field': 12, 'time': 'now'}
]
self.assertEqual(pipeline_test.to_list(), expected_output)
self.assertEqual(str(pipeline_test), '[DictKeyValueIterator(3), GraphVertexPipe(), ElementPropertiesPipe(internal:False)]')
def test_pipeline_graph_vertex_chain_dict_to_list_internal(self):
pipeline_test = pipeline.Pipeline(self.graph_test_2 .get_vertices_iterator(),
[pipes.GraphVertexPipe(),
pipes.ElementPropertiesPipe(internal=True)])
expected_output = [
{'_id': 1, '_label': 'test_label', 'test_field': 8},
{'_id': 2, '_label': 'test_label', 'test_field': 10},
{'_id': 3, '_label': 'test_label', 'test_field': 12, 'time': 'now'}
]
self.assertEqual(pipeline_test.to_list(), expected_output)
self.assertEqual(str(pipeline_test), '[DictKeyValueIterator(3), GraphVertexPipe(), ElementPropertiesPipe(internal:True)]')
def test_pipeline_get_starts(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
result = pipeline_test.get_starts()
self.assertEqual(str(result), 'ListIterator(7)')
def test_pipeline_get_pipes(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
result = ', '.join([str(pipe) for pipe in pipeline_test.get_pipes()])
self.assertEqual(str(result), 'ListIterator(7), ExcludePipe(), LimitPipe(limit_number:5)')
def test_pipeline_size(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
self.assertEqual(pipeline_test.size(), 3)
self.assertEqual(pipeline_test.to_list(), [2, 3, 4, 5, 6])
def test_pipeline_count(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
result = pipeline_test.count()
self.assertEqual(result, 5)
result = pipeline_test.count()
self.assertEqual(result, 0)
# TODO: A way to reset pipeline to iterate again
def test_pipeline_count_exception(self):
pipeline_test = pipeline.Pipeline()
with self.assertRaisesRegex(Exception, 'No Start Iterator set') as err:
pipeline_test.count()
def test_pipeline_iterate(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
result = pipeline_test.iterate()
self.assertEqual(result, None)
# TODO: Have a SideEffectPipe to prove that it is iterating
def test_pipeline_iterate_exception(self):
pipeline_test = pipeline.Pipeline()
with self.assertRaisesRegex(Exception, 'No Start Iterator set') as err:
pipeline_test.iterate()
def test_pipeline_remove(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
self.assertRaises(recommend_utils.UnsupportedOperationException, pipeline_test.remove)
def test_pipeline_refresh_as_pipes(self):
pipeline_test = pipeline.Pipeline(recommend_utils.ListIterator([1, 2, 3, 4, 5, 6, 7]),
[pipes.ExcludePipe([1]),
pipes.LimitPipe(5)])
result = pipeline_test.refresh_as_pipes()
self.assertEqual(result, None)
# TODO: Finish Test
|
|
# encoding: utf-8
# Copyright 2013 maker
# License
#-*- codeing: utf-8 -*-
import simplejson as json
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from maker.core.models import User, Group, Perspective, ModuleSetting, Object
from maker.identities.models import Contact, ContactType
from maker.sales.models import SaleOrder, Product, OrderedProduct, Subscription, \
SaleStatus, SaleSource, Lead, Opportunity
from maker.finance.models import Currency
class SalesAPITest(TestCase):
"Sales functional tests for views"
username = "api_test"
password = "api_password"
prepared = False
authentication_headers ={"CONTENT_TYPE": "application/json",
"HTTP_AUTHORIZATION" : "Basic YXBpX3Rlc3Q6YXBpX3Bhc3N3b3Jk" }
content_type ='application/json'
def setUp(self):
"Initial Setup"
if not self.prepared:
# Clean up first
Object.objects.all().delete()
User.objects.all().delete()
# Create objects
try:
self.group = Group.objects.get(name='test')
except Group.DoesNotExist:
Group.objects.all().delete()
self.group = Group(name='test')
self.group.save()
try:
self.user = DjangoUser.objects.get(username=self.username)
self.user.set_password(self.password)
try:
self.profile = self.user.get_profile()
except Exception:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
except DjangoUser.DoesNotExist:
User.objects.all().delete()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
try:
perspective = Perspective.objects.get(name='default')
except Perspective.DoesNotExist:
Perspective.objects.all().delete()
perspective = Perspective(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.contact_type = ContactType()
self.contact_type.slug = 'machine'
self.contact_type.name = 'machine'
self.contact_type.save()
self.contact = Contact()
self.contact.contact_type = self.contact_type
self.contact.set_default_user()
self.contact.save()
self.assertNotEquals(self.contact.id, None)
self.status = SaleStatus()
self.status.active = True
self.status.use_sales = True
self.status.use_leads = True
self.status.use_opportunities = True
self.status.set_default_user()
self.status.save()
self.assertNotEquals(self.status.id, None)
self.currency = Currency(code="GBP",
name="Pounds",
symbol="L",
is_default=True)
self.currency.save()
self.source = SaleSource()
self.source.active = True
self.source.save()
self.source.set_user(self.user)
self.assertNotEquals(self.source.id, None)
self.product = Product(name="Test")
self.product.product_type = 'service'
self.product.active = True
self.product.sell_price = 10
self.product.buy_price = 100
self.product.set_default_user()
self.product.save()
self.assertNotEquals(self.product.id, None)
self.subscription = Subscription()
self.subscription.client = self.contact
self.subscription.set_default_user()
self.subscription.save()
self.assertNotEquals(self.subscription.id, None)
self.lead = Lead()
self.lead.contact_method = 'email'
self.lead.status = self.status
self.lead.contact = self.contact
self.lead.set_default_user()
self.lead.save()
self.assertNotEquals(self.lead.id, None)
self.opportunity = Opportunity()
self.opportunity.lead = self.lead
self.opportunity.contact = self.contact
self.opportunity.status = self.status
self.opportunity.amount = 100
self.opportunity.amount_currency = self.currency
self.opportunity.amount_display = 120
self.opportunity.set_default_user()
self.opportunity.save()
self.assertNotEquals(self.opportunity.id, None)
self.order = SaleOrder(reference="Test")
self.order.opportunity = self.opportunity
self.order.status = self.status
self.order.source = self.source
self.order.currency = self.currency
self.order.total = 0
self.order.total_display = 0
self.order.set_default_user()
self.order.save()
self.assertNotEquals(self.order.id, None)
self.ordered_product = OrderedProduct()
self.ordered_product.product = self.product
self.ordered_product.order = self.order
self.ordered_product.rate = 0
self.ordered_product.subscription = self.subscription
self.ordered_product.set_default_user()
self.ordered_product.save()
self.assertNotEquals(self.ordered_product.id, None)
self.client = Client()
self.prepared = True
def test_unauthenticated_access(self):
"Test index page at /sales/statuses"
response = self.client.get('/api/sales/statuses')
# Redirects as unauthenticated
self.assertEquals(response.status_code, 401)
def test_get_statuses_list(self):
""" Test index page api/sales/status """
response = self.client.get(path=reverse('api_sales_status'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_status(self):
response = self.client.get(path=reverse('api_sales_status', kwargs={'object_ptr': self.status.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_status(self):
updates = {"name": "Close_API", "active": True, "details": "api test details",
"use_leads": True, "use_opportunities": True, "hidden": False}
response = self.client.put(path=reverse('api_sales_status', kwargs={'object_ptr': self.status.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['details'], updates['details'])
self.assertEquals(data['use_leads'], updates['use_leads'])
self.assertEquals(data['use_opportunities'], updates['use_opportunities'])
self.assertEquals(data['hidden'], updates['hidden'])
def test_get_products_list(self):
""" Test index page api/sales/products """
response = self.client.get(path=reverse('api_sales_products'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_product(self):
response = self.client.get(path=reverse('api_sales_products', kwargs={'object_ptr': self.product.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_product(self):
updates = {"name": "API product", "parent": None, "product_type": "service", "code": "api_test_code",
"buy_price": '100.05', "sell_price":'10.5', "active": True, "runout_action": "ignore", "details": "api details"}
response = self.client.put(path=reverse('api_sales_products', kwargs={'object_ptr': self.product.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['product_type'], updates['product_type'])
self.assertEquals(data['code'], updates['code'])
self.assertEquals(data['buy_price'], updates['buy_price'])
self.assertEquals(data['sell_price'], updates['sell_price'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['runout_action'], updates['runout_action'])
self.assertEquals(data['details'], updates['details'])
def test_get_sources_list(self):
""" Test index page api/sales/sources """
response = self.client.get(path=reverse('api_sales_sources'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_source(self):
response = self.client.get(path=reverse('api_sales_sources', kwargs={'object_ptr': self.source.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_source(self):
updates = {"name":"Api source", "active": True, "details": "api details"}
response = self.client.put(path=reverse('api_sales_sources', kwargs={'object_ptr': self.source.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['name'], updates['name'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['details'], updates['details'])
#
def test_get_leads_list(self):
""" Test index page api/sales/leads """
response = self.client.get(path=reverse('api_sales_leads'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_lead(self):
response = self.client.get(path=reverse('api_sales_leads', kwargs={'object_ptr': self.lead.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_lead(self):
updates = {"status": self.status.id, "contact_method": "email", "contact": self.contact.id,
"products_interested": [self.product.id], "source": self.source.id, 'details': 'Api details'}
response = self.client.put(path=reverse('api_sales_leads', kwargs={'object_ptr': self.lead.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['status']['id'], updates['status'])
self.assertEquals(data['contact_method'], updates['contact_method'])
self.assertEquals(data['contact']['id'], updates['contact'])
for i, product in enumerate(data['products_interested']):
self.assertEquals(product['id'], updates['products_interested'][i])
self.assertEquals(data['source']['id'], updates['source'])
self.assertEquals(data['details'], updates['details'])
def test_get_opportunities_list(self):
""" Test index page api/sales/opportunities """
response = self.client.get(path=reverse('api_sales_opportunities'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_opportunity(self):
response = self.client.get(path=reverse('api_sales_opportunities', kwargs={'object_ptr': self.opportunity.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_opportunity(self):
updates = {"status": self.status.id, "products_interested": [self.product.id], "contact": self.contact.id,
"amount_display": 3000.56, "amount_currency": self.currency.id, "details": "API DETAILS"}
response = self.client.put(path=reverse('api_sales_opportunities', kwargs={'object_ptr': self.opportunity.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['status']['id'], updates['status'])
self.assertEquals(data['contact']['id'], updates['contact'])
for i, product in enumerate(data['products_interested']):
self.assertEquals(product['id'], updates['products_interested'][i])
self.assertEquals(data['amount_currency']['id'], updates['amount_currency'])
self.assertEquals(data['details'], updates['details'])
def test_get_orders_list(self):
""" Test index page api/sales/orders """
response = self.client.get(path=reverse('api_sales_orders'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_order(self):
response = self.client.get(path=reverse('api_sales_orders', kwargs={'object_ptr': self.order.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_order(self):
updates = {"datetime": "2011-04-11 12:01:15", "status": self.status.id, "source": self.source.id, "details": "api details"}
response = self.client.put(path=reverse('api_sales_orders', kwargs={'object_ptr': self.order.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['status']['id'], updates['status'])
self.assertEquals(data['source']['id'], updates['source'])
self.assertEquals(data['details'], updates['details'])
def test_get_subscriptions_list(self):
""" Test index page api/sales/subscriptions"""
response = self.client.get(path=reverse('api_sales_subscriptions'), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_get_subscription(self):
response = self.client.get(path=reverse('api_sales_subscriptions', kwargs={'object_ptr': self.subscription.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_subscription(self):
updates = {"product": self.product.id, "start": "2011-06-30", "cycle_period": "daily", "active": True, "details": "api details"}
response = self.client.put(path=reverse('api_sales_subscriptions', kwargs={'object_ptr': self.subscription.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['product']['id'], updates['product'])
self.assertEquals(data['cycle_period'], updates['cycle_period'])
self.assertEquals(data['active'], updates['active'])
self.assertEquals(data['details'], updates['details'])
def test_get_ordered_product(self):
response = self.client.get(path=reverse('api_sales_ordered_products', kwargs={'object_ptr': self.ordered_product.id}), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
def test_update_ordered_product(self):
updates = {"discount": '10.0', "product": self.product.id, "quantity": '10'}
response = self.client.put(path=reverse('api_sales_ordered_products', kwargs={'object_ptr': self.ordered_product.id}),
content_type=self.content_type, data=json.dumps(updates), **self.authentication_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data['product']['id'], updates['product'])
self.assertEquals(data['discount'], updates['discount'])
self.assertEquals(data['quantity'], updates['quantity'])
|
|
import os
import re
import numpy as np
from multiprocessing import Process
#from matplotlib.pyplot import plot, show
import tensorflow as tf
from tensorflow.python.client import timeline
import tensorflow.contrib.slim as slim
from . import util
from ..ops import forward_warp
from .image_warp import image_warp
from .unsupervised import unsupervised_loss
from .supervised import supervised_loss
from .losses import occlusion, DISOCC_THRESH, create_outgoing_mask
from .flow_util import flow_error_avg, flow_to_color, flow_error_image, outlier_pct
from ..gui import display
from .util import summarized_placeholder
from .input import resize_input, resize_output_crop, resize_output, resize_output_flow
def restore_networks(sess, params, ckpt, ckpt_path=None):
finetune = params.get('finetune', [])
train_all = params.get('train_all', None)
spec = params.get('flownet', 'S')
flownet_num = len(spec)
net_names = ['flownet_c'] + ['stack_{}_flownet'.format(i+1) for i in range(flownet_num - 1)]
assert len(finetune) <= flownet_num
# Save all trained networks, restore all networks which are kept fixed
if train_all:
restore_external_nets = finetune if ckpt is None else []
variables_to_save = slim.get_variables_to_restore(include=net_names)
else:
restore_external_nets = finetune if ckpt is None else finetune[:flownet_num - 1]
variables_to_save = slim.get_variables_to_restore(include=[net_names[-1]])
saver = tf.train.Saver(variables_to_save, max_to_keep=1000)
sess.run(tf.global_variables_initializer())
if ckpt is not None:
# continue training
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
for i, ckpt in enumerate(restore_external_nets):
print('-- restore', net_names[i], ckpt.model_checkpoint_path)
try:
nets_to_restore = [net_names[i]]
variables_to_restore = slim.get_variables_to_restore(
include=nets_to_restore)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, ckpt.model_checkpoint_path)
except:
# load partial network (missing final 2 upconvolutions)
nets_to_restore = [net_names[i]]
variables_to_restore = slim.get_variables_to_restore(
include=nets_to_restore)
variables_to_restore = [v for v in variables_to_restore
if not 'full_res' in v.name]
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, ckpt.model_checkpoint_path)
return saver
def _add_loss_summaries():
losses = tf.get_collection('losses')
for l in losses:
tensor_name = re.sub('tower_[0-9]*/', '', l.op.name)
tf.summary.scalar(tensor_name, l)
def _add_param_summaries():
params = tf.get_collection('params')
for p in params:
tensor_name = re.sub('tower_[0-9]*/', '', p.op.name)
tf.summary.scalar(tensor_name, p)
def _add_image_summaries():
images = tf.get_collection('train_images')
for im in images:
tensor_name = re.sub('tower_[0-9]*/', '', im.op.name)
tf.summary.image(tensor_name, im)
def _eval_plot(results, image_names, title):
import matplotlib.pyplot as plt
display(results, image_names, title)
class Trainer():
def __init__(self, train_batch_fn, eval_batch_fn, params,
train_summaries_dir, eval_summaries_dir, ckpt_dir,
normalization, debug=False, experiment="", interactive_plot=False,
supervised=False, devices=None):
self.train_summaries_dir = train_summaries_dir
self.eval_summaries_dir = eval_summaries_dir
self.ckpt_dir = ckpt_dir
self.params = params
self.debug = debug
self.train_batch_fn = train_batch_fn
self.eval_batch_fn = eval_batch_fn
self.normalization = normalization
self.experiment = experiment
self.interactive_plot = interactive_plot
self.plot_proc = None
self.supervised = supervised
self.loss_fn = supervised_loss if supervised else unsupervised_loss
self.devices = devices or '/gpu:0'
self.shared_device = devices[0] if len(devices) == 1 else '/cpu:0'
def run(self, min_iter, max_iter):
"""Train (at most) from min_iter + 1 to max_iter.
If checkpoints are found in ckpt_dir,
they must be have a global_step within [min_iter, max_iter]. In this case,
training is continued from global_step + 1 until max_iter is reached.
"""
save_interval = self.params['save_interval']
ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
if ckpt is not None:
ckpt_path = ckpt.model_checkpoint_path
global_step = int(ckpt_path.split('/')[-1].split('-')[-1])
assert global_step >= min_iter, 'training stage not reached'
start_iter = global_step + 1
if start_iter > max_iter:
print('-- train: max_iter reached')
return
else:
start_iter = min_iter + 1
print('-- training from i = {} to {}'.format(start_iter, max_iter))
assert (max_iter - start_iter + 1) % save_interval == 0
for i in range(start_iter, max_iter + 1, save_interval):
self.train(i, i + save_interval - 1, i - (min_iter + 1))
self.eval(1)
if self.plot_proc:
self.plot_proc.join()
def get_train_and_loss_ops(self, batch, learning_rate, global_step):
if self.params['flownet'] == 'resnet':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
else:
opt = tf.train.AdamOptimizer(beta1=0.9, beta2=0.999,
learning_rate=learning_rate)
def _add_summaries():
_add_loss_summaries()
_add_param_summaries()
if self.debug:
_add_image_summaries()
if len(self.devices) == 1:
loss_ = self.loss_fn(batch, self.params, self.normalization)
train_op = opt.minimize(loss_)
_add_summaries()
else:
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i, devid in enumerate(self.devices):
with tf.device(devid):
with tf.name_scope('tower_{}'.format(i)) as scope:
loss_ = self.loss_fn(batch, self.params, self.normalization)
_add_summaries()
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
tower_summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
scope)
grads = opt.compute_gradients(loss_)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads)
train_op = apply_gradient_op
return train_op, loss_
def train(self, start_iter, max_iter, iter_offset):
ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
with tf.Graph().as_default(), tf.device(self.shared_device):
batch = self.train_batch_fn(iter_offset)
with tf.name_scope('params') as scope:
learning_rate_ = util.summarized_placeholder('learning_rate', 'train')
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
global_step_ = tf.placeholder(tf.int32, name="global_step")
train_op, loss_ = self.get_train_and_loss_ops(batch, learning_rate_, global_step_)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
summary_ = tf.summary.merge(summaries)
sess_config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=sess_config) as sess:
if self.debug:
summary_writer = tf.summary.FileWriter(self.train_summaries_dir,
sess.graph)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
else:
summary_writer = tf.summary.FileWriter(self.train_summaries_dir)
run_options = None
run_metadata = None
saver = restore_networks(sess, self.params, ckpt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for local_i, i in enumerate(range(start_iter, max_iter + 1)):
#if INTERACTIVE_PLOT:
# plt.title = "{} ({})".format(self.experiment, i)
decay_iters = local_i + iter_offset
if 'manual_decay_lrs' in self.params \
and 'manual_decay_iters' in self.params:
decay_index = 0
iter_counter = 0
for decay_i, manual_decay_iter in enumerate(self.params['manual_decay_iters']):
iter_counter += manual_decay_iter
if decay_iters <= iter_counter:
decay_index = decay_i
break
learning_rate = self.params['manual_decay_lrs'][decay_index]
else:
decay_interval = self.params['decay_interval']
decay_after = self.params.get('decay_after', 0)
if decay_iters >= decay_after:
decay_minimum = decay_after / decay_interval
decay = (decay_iters // decay_interval) - decay_minimum
learning_rate = self.params['learning_rate'] / (2 ** decay)
else:
learning_rate = self.params['learning_rate']
feed_dict = {learning_rate_: learning_rate, global_step_: i}
_, loss = sess.run(
[train_op, loss_],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if i == 1 or i % self.params['display_interval'] == 0:
summary = sess.run(summary_, feed_dict=feed_dict)
summary_writer.add_summary(summary, i)
print("-- train: i = {}, loss = {}".format(i, loss))
save_path = os.path.join(self.ckpt_dir, 'model.ckpt')
saver.save(sess, save_path, global_step=max_iter)
summary_writer.close()
coord.request_stop()
coord.join(threads)
def eval(self, num):
assert num == 1 # TODO enable num > 1
with tf.Graph().as_default():
inputs = self.eval_batch_fn()
im1, im2, input_shape = inputs[:3]
truths = inputs[3:]
height, width, _ = tf.unstack(tf.squeeze(input_shape), num=3, axis=0)
im1 = resize_input(im1, height, width, 384, 1280)
im2 = resize_input(im2, height, width, 384, 1280)
_, flow, flow_bw = unsupervised_loss(
(im1, im2),
params=self.params,
normalization=self.normalization,
augment=False, return_flow=True)
im1 = resize_output(im1, height, width, 3)
im2 = resize_output(im2, height, width, 3)
flow = resize_output_flow(flow, height, width, 2)
flow_bw = resize_output_flow(flow_bw, height, width, 2)
variables_to_restore = tf.all_variables()
images_ = [image_warp(im1, flow) / 255,
flow_to_color(flow),
1 - (1 - occlusion(flow, flow_bw)[0]) * create_outgoing_mask(flow) ,
forward_warp(flow_bw) < DISOCC_THRESH]
image_names = ['warped image', 'flow', 'occ', 'reverse disocc']
values_ = []
averages_ = []
truth_tuples = []
if len(truths) == 4:
flow_occ, mask_occ, flow_noc, mask_noc = truths
flow_occ = resize_output_crop(flow_occ, height, width, 2)
flow_noc = resize_output_crop(flow_noc, height, width, 2)
mask_occ = resize_output_crop(mask_occ, height, width, 1)
mask_noc = resize_output_crop(mask_noc, height, width, 1)
truth_tuples.append(('occluded', flow_occ, mask_occ))
truth_tuples.append(('non-occluded', flow_noc, mask_noc))
images_ += [flow_error_image(flow, flow_occ, mask_occ, mask_noc)]
image_names += ['flow error']
else:
raise NotImplementedError()
truth_tuples.append(('flow', truths[0], truths[1]))
for name, gt_flow, mask in truth_tuples:
error_ = flow_error_avg(gt_flow, flow, mask)
error_avg_ = summarized_placeholder('AEE/' + name, key='eval_avg')
outliers_ = outlier_pct(gt_flow, flow, mask)
outliers_avg = summarized_placeholder('outliers/' + name,
key='eval_avg')
values_.extend([error_, outliers_])
averages_.extend([error_avg_, outliers_avg])
losses = tf.get_collection('losses')
for l in losses:
values_.append(l)
tensor_name = re.sub('tower_[0-9]*/', '', l.op.name)
loss_avg_ = summarized_placeholder(tensor_name, key='eval_avg')
averages_.append(loss_avg_)
ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
assert ckpt is not None, "No checkpoints to evaluate"
# Correct path for ckpts from different machine
# ckpt_path = self.ckpt_dir + "/" + os.path.basename(ckpt.model_checkpoint_path)
ckpt_path = ckpt.model_checkpoint_path
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(self.eval_summaries_dir)
saver = tf.train.Saver(variables_to_restore)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
restore_networks(sess, self.params, ckpt)
global_step = ckpt_path.split('/')[-1].split('-')[-1]
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,
coord=coord)
averages = np.zeros(len(averages_))
num_iters = 0
image_lists = []
try:
while not coord.should_stop():
results = sess.run(values_ + images_)
values = results[:len(averages_)]
images = results[len(averages_):]
image_lists.append(images)
averages += values
num_iters += 1
except tf.errors.OutOfRangeError:
pass
averages /= num_iters
feed = {k: v for (k, v) in zip(averages_, averages)}
summary_ = tf.summary.merge_all('eval_avg')
summary = sess.run(summary_, feed_dict=feed)
summary_writer.add_summary(summary, global_step)
print("-- eval: i = {}".format(global_step))
coord.request_stop()
coord.join(threads)
summary_writer.close()
if self.interactive_plot:
if self.plot_proc:
self.plot_proc.terminate()
self.plot_proc = Process(target=_eval_plot,
args=([image_lists], image_names,
"{} (i={})".format(self.experiment,
global_step)))
self.plot_proc.start()
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
if g is not None:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
if grads != []:
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import errno
import subprocess
import time
import os
import pprint
import urllib.parse, urllib.request
import shutil
import traceback
import asyncore
from http.server import HTTPServer, SimpleHTTPRequestHandler
# Optionally test SSL support, if we have it in the tested platform
skip_expected = False
try:
import ssl
except ImportError:
skip_expected = True
HOST = support.HOST
CERTFILE = None
SVN_PYTHON_ORG_ROOT_CERT = None
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def testSSLconnect(self):
if not support.is_resource_enabled('network'):
return
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
s.connect(("svn.python.org", 443))
c = s.getpeercert()
if c:
raise support.TestFailed("Peer cert %s shouldn't be here!")
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
try:
s.connect(("svn.python.org", 443))
except ssl.SSLError:
pass
finally:
s.close()
def testCrucialConstants(self):
ssl.PROTOCOL_SSLv2
ssl.PROTOCOL_SSLv23
ssl.PROTOCOL_SSLv3
ssl.PROTOCOL_TLSv1
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
def testRAND(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
try:
ssl.RAND_egd(1)
except TypeError:
pass
else:
print("didn't raise TypeError")
ssl.RAND_add("this is a random string", 75.0)
def testParseCert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE, False)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
def testDERtoPEM(self):
pem = open(SVN_PYTHON_ORG_ROOT_CERT, 'r').read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
if (d1 != d2):
raise support.TestFailed("PEM-to-DER or DER-to-PEM translation failed")
class NetworkedTests(unittest.TestCase):
def testConnect(self):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
s.connect(("svn.python.org", 443))
c = s.getpeercert()
if c:
raise support.TestFailed("Peer cert %s shouldn't be here!")
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
try:
s.connect(("svn.python.org", 443))
except ssl.SSLError:
pass
finally:
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
except ssl.SSLError as x:
raise support.TestFailed("Unexpected exception %s" % x)
finally:
s.close()
def testNonBlockingHandshake(self):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [])
else:
raise
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def testFetchServerCert(self):
pem = ssl.get_server_certificate(("svn.python.org", 443))
if not pem:
raise support.TestFailed("No server certificate on svn.python.org:443!")
return
try:
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
raise support.TestFailed("Got server certificate %s for svn.python.org!" % pem)
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
if not pem:
raise support.TestFailed("No server certificate on svn.python.org:443!")
if support.verbose:
sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn (self):
try:
self.sslconn = ssl.wrap_socket(self.sock, server_side=True,
certfile=self.server.certificate,
ssl_version=self.server.protocol,
ca_certs=self.server.cacerts,
cert_reqs=self.server.certreqs)
except:
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
if not self.server.expect_bad_connects:
# here, we want to stop the server, because this shouldn't
# happen in the context of our test case
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
self.close()
return False
else:
if self.server.certreqs == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run (self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
amsg = (msg and str(msg, 'ASCII', 'strict')) or ''
if not msg:
# eof, so quit this handler
self.running = False
self.close()
elif amsg.strip() == 'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
amsg.strip() == 'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write("OK\n".encode("ASCII", "strict"))
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and amsg.strip() == 'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write("OK\n".encode("ASCII", "strict"))
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %s (%s), sending back %s (%s)...\n"
% (repr(msg), ctype, repr(msg.lower()), ctype))
self.write(amsg.lower().encode('ASCII', 'strict'))
except socket.error:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
except:
handle_error('')
def __init__(self, certificate, ssl_version=None,
certreqs=None, cacerts=None, expect_bad_connects=False,
chatty=True, connectionchatty=False, starttls_server=False):
if ssl_version is None:
ssl_version = ssl.PROTOCOL_TLSv1
if certreqs is None:
certreqs = ssl.CERT_NONE
self.certificate = certificate
self.protocol = ssl_version
self.certreqs = certreqs
self.cacerts = cacerts
self.expect_bad_connects = expect_bad_connects
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
threading.Thread.__init__(self)
self.daemon = True
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run (self):
self.sock.settimeout(0.5)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except:
if self.chatty:
handle_error("Test server failure:\n")
self.sock.close()
def stop (self):
self.active = False
class OurHTTPSServer(threading.Thread):
# This one's based on HTTPServer, which is based on SocketServer
class HTTPSServer(HTTPServer):
def __init__(self, server_address, RequestHandlerClass, certfile):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
# we assume the certfile contains both private key and certificate
self.certfile = certfile
self.active = False
self.active_lock = threading.Lock()
self.allow_reuse_address = True
def __str__(self):
return ('<%s %s:%s>' %
(self.__class__.__name__,
self.server_name,
self.server_port))
def get_request (self):
# override this to wrap socket with SSL
sock, addr = self.socket.accept()
sslconn = ssl.wrap_socket(sock, server_side=True,
certfile=self.certfile)
return sslconn, addr
# The methods overridden below this are mainly so that we
# can run it in a thread and be able to stop it from another
# You probably wouldn't need them in other uses.
def server_activate(self):
# We want to run this in a thread for testing purposes,
# so we override this to set timeout, so that we get
# a chance to stop the server
self.socket.settimeout(0.5)
HTTPServer.server_activate(self)
def serve_forever(self):
# We want this to run in a thread, so we use a slightly
# modified version of "forever".
self.active = True
while 1:
try:
# We need to lock while handling the request.
# Another thread can close the socket after self.active
# has been checked and before the request is handled.
# This causes an exception when using the closed socket.
with self.active_lock:
if not self.active:
break
self.handle_request()
except socket.timeout:
pass
except KeyboardInterrupt:
self.server_close()
return
except:
sys.stdout.write(''.join(traceback.format_exception(*sys.exc_info())))
break
time.sleep(0.1)
def server_close(self):
# Again, we want this to run in a thread, so we need to override
# close to clear the "active" flag, so that serve_forever() will
# terminate.
with self.active_lock:
HTTPServer.server_close(self)
self.active = False
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# need to override translate_path to get a known root,
# instead of using os.curdir, since the test could be
# run from anywhere
server_version = "TestHTTPS/1.0"
root = None
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = urllib.parse.urlparse(path)[2]
path = os.path.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in self.root: continue
path = os.path.join(path, word)
return path
def log_message(self, format, *args):
# we override this to suppress logging unless "verbose"
if support.verbose:
sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" %
(self.server.server_address,
self.server.server_port,
self.request.cipher(),
self.log_date_time_string(),
format%args))
def __init__(self, certfile):
self.flag = None
self.active = False
self.RootedHTTPRequestHandler.root = os.path.split(CERTFILE)[0]
self.port = support.find_unused_port()
self.server = self.HTTPSServer(
(HOST, self.port), self.RootedHTTPRequestHandler, certfile)
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run (self):
self.active = True
if self.flag:
self.flag.set()
self.server.serve_forever()
self.active = False
def stop (self):
self.active = False
self.server.server_close()
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
# now we have to do the handshake
# we'll just do it the easy way, and block the connection
# till it's finished. If we were doing it right, we'd
# do this in multiple calls to handle_read...
self.do_handshake(block=True)
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def handle_read(self):
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(str(data, 'ASCII', 'strict').lower().encode('ASCII', 'strict'))
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, port, certfile):
self.port = port
self.certfile = certfile
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(('', port))
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.port = support.find_unused_port()
self.server = self.EchoServer(self.port, certfile)
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run (self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop (self):
self.active = False
self.server.close()
def badCertTest (certfile):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
try:
s = ssl.wrap_socket(socket.socket(),
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x)
except socket.error as x:
if support.verbose:
sys.stdout.write("\nsocket.error is %s\n" % x)
else:
raise support.TestFailed(
"Use of invalid cert should have failed!")
finally:
server.stop()
server.join()
def serverParamsTest (certfile, protocol, certreqs, cacertsfile,
client_certfile, client_protocol=None,
indata="FOO\n",
chatty=False, connectionchatty=False):
server = ThreadedEchoServer(certfile,
certreqs=certreqs,
ssl_version=protocol,
cacerts=cacertsfile,
chatty=chatty,
connectionchatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
if client_protocol is None:
client_protocol = protocol
try:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=client_certfile,
ca_certs=cacertsfile,
cert_reqs=certreqs,
ssl_version=client_protocol)
s.connect((HOST, server.port))
except ssl.SSLError as x:
raise support.TestFailed("Unexpected SSL error: " + str(x))
except Exception as x:
raise support.TestFailed("Unexpected exception: " + str(x))
else:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(indata)))
s.write(indata.encode('ASCII', 'strict'))
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
outdata = str(outdata, 'ASCII', 'strict')
if outdata != indata.lower():
raise support.TestFailed(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (repr(outdata[:min(len(outdata),20)]), len(outdata),
repr(indata[:min(len(indata),20)].lower()), len(indata)))
s.write("over\n".encode("ASCII", "strict"))
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
finally:
server.stop()
server.join()
def tryProtocolCombo (server_protocol,
client_protocol,
expectedToWork,
certsreqs=None):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
if certsreqs == ssl.CERT_NONE:
certtype = "CERT_NONE"
elif certsreqs == ssl.CERT_OPTIONAL:
certtype = "CERT_OPTIONAL"
elif certsreqs == ssl.CERT_REQUIRED:
certtype = "CERT_REQUIRED"
if support.verbose:
formatstr = (expectedToWork and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
try:
serverParamsTest(CERTFILE, server_protocol, certsreqs,
CERTFILE, CERTFILE, client_protocol,
chatty=False, connectionchatty=False)
except support.TestFailed:
if expectedToWork:
raise
else:
if not expectedToWork:
raise support.TestFailed(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
def testEcho (self):
if support.verbose:
sys.stdout.write("\n")
serverParamsTest(CERTFILE, ssl.PROTOCOL_TLSv1, ssl.CERT_NONE,
CERTFILE, CERTFILE, ssl.PROTOCOL_TLSv1,
chatty=True, connectionchatty=True)
def testReadCert(self):
if support.verbose:
sys.stdout.write("\n")
s2 = socket.socket()
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_SSLv23,
cacerts=CERTFILE,
chatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
try:
s = ssl.wrap_socket(socket.socket(),
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_SSLv23)
s.connect((HOST, server.port))
except ssl.SSLError as x:
raise support.TestFailed(
"Unexpected SSL error: " + str(x))
except Exception as x:
raise support.TestFailed(
"Unexpected exception: " + str(x))
else:
if not s:
raise support.TestFailed(
"Can't SSL-handshake with test server")
cert = s.getpeercert()
if not cert:
raise support.TestFailed(
"Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
raise support.TestFailed(
"No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
raise support.TestFailed(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
s.close()
finally:
server.stop()
server.join()
def testNULLcert(self):
badCertTest(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def testMalformedCert(self):
badCertTest(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def testWrongCert(self):
badCertTest(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def testMalformedKey(self):
badCertTest(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def testRudeShutdown(self):
listener_ready = threading.Event()
listener_gone = threading.Event()
port = support.find_unused_port()
# `listener` runs in a thread. It opens a socket listening on
# PORT, and sits in an accept() until the main thread connects.
# Then it rudely closes the socket, and sets Event `listener_gone`
# to let the main thread know the socket is gone.
def listener():
s = socket.socket()
s.bind((HOST, port))
s.listen(5)
listener_ready.set()
s.accept()
s = None # reclaim the socket object, which also closes it
listener_gone.set()
def connector():
listener_ready.wait()
s = socket.socket()
s.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(s)
except IOError:
pass
else:
raise support.TestFailed(
'connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
connector()
t.join()
def testProtocolSSL2(self):
if support.verbose:
sys.stdout.write("\n")
tryProtocolCombo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
tryProtocolCombo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
tryProtocolCombo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
tryProtocolCombo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True)
tryProtocolCombo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
tryProtocolCombo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
def testProtocolSSL23(self):
if support.verbose:
sys.stdout.write("\n")
try:
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except support.TestFailed as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
tryProtocolCombo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
def testProtocolSSL3(self):
if support.verbose:
sys.stdout.write("\n")
tryProtocolCombo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
tryProtocolCombo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
tryProtocolCombo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
tryProtocolCombo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
tryProtocolCombo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False)
tryProtocolCombo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
def testProtocolTLS1(self):
if support.verbose:
sys.stdout.write("\n")
tryProtocolCombo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
tryProtocolCombo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
tryProtocolCombo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
tryProtocolCombo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
tryProtocolCombo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
tryProtocolCombo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False)
def testSTARTTLS (self):
msgs = ("msg 1", "MSG 2", "STARTTLS", "MSG 3", "msg 4", "ENDTLS", "msg 5", "msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
wrapped = False
try:
try:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
except Exception as x:
raise support.TestFailed("Unexpected exception: " + str(x))
else:
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
msg = indata.encode('ASCII', 'replace')
if support.verbose:
sys.stdout.write(
" client: sending %s...\n" % repr(msg))
if wrapped:
conn.write(msg)
outdata = conn.read()
else:
s.send(msg)
outdata = s.recv(1024)
if (indata == "STARTTLS" and
str(outdata, 'ASCII', 'replace').strip().lower().startswith("ok")):
if support.verbose:
msg = str(outdata, 'ASCII', 'replace')
sys.stdout.write(
" client: read %s from server, starting TLS...\n"
% repr(msg))
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif (indata == "ENDTLS" and
str(outdata, 'ASCII', 'replace').strip().lower().startswith("ok")):
if support.verbose:
msg = str(outdata, 'ASCII', 'replace')
sys.stdout.write(
" client: read %s from server, ending TLS...\n"
% repr(msg))
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
msg = str(outdata, 'ASCII', 'replace')
sys.stdout.write(
" client: read %s from server\n" % repr(msg))
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write("over\n".encode("ASCII", "strict"))
else:
s.send("over\n".encode("ASCII", "strict"))
if wrapped:
conn.close()
else:
s.close()
finally:
server.stop()
server.join()
def testSocketServer(self):
server = OurHTTPSServer(CERTFILE)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
if support.verbose:
sys.stdout.write('\n')
d1 = open(CERTFILE, 'rb').read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://%s:%d/%s' % (
HOST, server.port, os.path.split(CERTFILE)[1])
f = urllib.request.urlopen(url)
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
f.close()
except:
msg = ''.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write('\n' + msg)
raise support.TestFailed(msg)
else:
if not (d1 == d2):
print("d1 is", len(d1), repr(d1))
print("d2 is", len(d2), repr(d2))
raise support.TestFailed(
"Couldn't fetch data from HTTPS server")
finally:
if support.verbose:
sys.stdout.write('stopping server\n')
server.stop()
if support.verbose:
sys.stdout.write('joining thread\n')
server.join()
def testAsyncoreServer(self):
if support.verbose:
sys.stdout.write("\n")
indata="FOO\n"
server = AsyncoreEchoServer(CERTFILE)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
s = ssl.wrap_socket(socket.socket())
s.connect((HOST, server.port))
except ssl.SSLError as x:
raise support.TestFailed("Unexpected SSL error: " + str(x))
except Exception as x:
raise support.TestFailed("Unexpected exception: " + str(x))
else:
if support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(indata)))
s.sendall(indata.encode('ASCII', 'strict'))
outdata = s.recv()
if support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
outdata = str(outdata, 'ASCII', 'strict')
if outdata != indata.lower():
raise support.TestFailed(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (repr(outdata[:min(len(outdata),20)]), len(outdata),
repr(indata[:min(len(indata),20)].lower()), len(indata)))
s.write("over\n".encode("ASCII", "strict"))
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
finally:
server.stop()
server.join()
def testAllRecvAndSendMethods(self):
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
raise support.TestFailed("Unexpected SSL error: " + str(x))
except Exception as x:
raise support.TestFailed("Unexpected exception: " + str(x))
else:
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = data_prefix + meth_name
try:
send_meth(indata.encode('ASCII', 'strict'), *args)
outdata = s.read()
outdata = str(outdata, 'ASCII', 'strict')
if outdata != indata.lower():
raise support.TestFailed(
"While sending with <<{name:s}>> bad data "
"<<{outdata:s}>> ({nout:d}) received; "
"expected <<{indata:s}>> ({nin:d})\n".format(
name=meth_name, outdata=repr(outdata[:20]),
nout=len(outdata),
indata=repr(indata[:20]), nin=len(indata)
)
)
except ValueError as e:
if expect_success:
raise support.TestFailed(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
raise support.TestFailed(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = data_prefix + meth_name
try:
s.send(indata.encode('ASCII', 'strict'))
outdata = recv_meth(*args)
outdata = str(outdata, 'ASCII', 'strict')
if outdata != indata.lower():
raise support.TestFailed(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:s}>> ({nout:d}) received; "
"expected <<{indata:s}>> ({nin:d})\n".format(
name=meth_name, outdata=repr(outdata[:20]),
nout=len(outdata),
indata=repr(indata[:20]), nin=len(indata)
)
)
except ValueError as e:
if expect_success:
raise support.TestFailed(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
raise support.TestFailed(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
s.write("over\n".encode("ASCII", "strict"))
s.close()
finally:
server.stop()
server.join()
def test_main(verbose=False):
if skip_expected:
raise unittest.SkipTest("No SSL support")
global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
SVN_PYTHON_ORG_ROOT_CERT = os.path.join(
os.path.dirname(__file__) or os.curdir,
"https_svn_python_org_root.pem")
if (not os.path.exists(CERTFILE) or
not os.path.exists(SVN_PYTHON_ORG_ROOT_CERT)):
raise support.TestFailed("Can't read certificate files!")
tests = [BasicTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info and support.is_resource_enabled('network'):
tests.append(ThreadedTests)
support.run_unittest(*tests)
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user profile related views.
"""
from datetime import date
from datetime import timedelta
from soc.modules.seeder.logic.seeder import logic as seeder_logic
from soc.modules.gci.models.profile import GCIProfile
from tests.test_utils import GCIDjangoTestCase
class ProfileViewTest(GCIDjangoTestCase):
"""Tests user profile views.
"""
def setUp(self):
from soc.modules.gci.models.profile import GCIProfile
from soc.modules.gci.models.profile import GCIStudentInfo
self.init()
program_suffix = self.gci.key().name()
self.url = '/gci/profile/%(program_suffix)s' % {
'program_suffix': program_suffix
}
self.validated_url = self.url + '?validated'
self.student_url = '/gci/profile/%(role)s/%(program_suffix)s' % {
'role': 'student',
'program_suffix': program_suffix
}
self.birth_date = str(date.today() - timedelta(365*15))
props = {}
# we do not want to seed the data in the datastore, we just
# want to get the properties generated for seeding. The post
# test will actually do the entity creation, so we reuse the
# seed_properties method from the seeder to get the most common
# values for Profile and StudentInfo
props.update(seeder_logic.seed_properties(GCIProfile))
props.update(seeder_logic.seed_properties(GCIStudentInfo))
props.update({
'student_info': None,
'status': 'active',
'is_org_admin': False,
'is_mentor': False,
'org_admin_for': [],
'mentor_for': [],
'scope': self.gci,
'birth_date': self.birth_date,
'res_country': 'Netherlands',
'ship_country': 'Netherlands',
})
self.default_props = props
# we have other tests that verify the age_check system
self.client.cookies['age_check'] = self.birth_date
def _updateDefaultProps(self, request_data):
"""Updates default_props variable with more personal data stored in
the specified request_data object.
"""
self.default_props.update({
'link_id': request_data.user.link_id,
'user': request_data.user,
'parent': request_data.user,
'email': request_data.user.account.email()
})
def assertProfileTemplatesUsed(self, response):
self.assertGCITemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gci/profile/base.html')
self.assertTemplateUsed(response, 'v2/modules/gci/_form.html')
def testCreateProfilePage(self):
self.timeline.studentSignup()
url = '/gci/profile/student/' + self.gci.key().name()
self.client.cookies['age_check'] = '1'
response = self.get(url)
self.assertProfileTemplatesUsed(response)
def testCreateMentorProfilePage(self):
self.timeline.studentSignup()
url = '/gci/profile/mentor/' + self.gci.key().name()
response = self.get(url)
self.assertProfileTemplatesUsed(response)
def testRedirectWithStudentProfilePage(self):
self.timeline.studentSignup()
self.data.createStudent()
url = '/gci/profile/student/' + self.gci.key().name()
response = self.get(url)
redirect_url = '/gci/profile/' + self.gci.key().name()
self.assertResponseRedirect(response, redirect_url)
def testRedirectWithMentorProfilePage(self):
self.timeline.studentSignup()
self.data.createMentor(self.org)
url = '/gci/profile/mentor/' + self.gci.key().name()
response = self.get(url)
response_url = '/gci/profile/' + self.gci.key().name()
self.assertResponseRedirect(response, response_url)
def testForbiddenWithStudentProfilePage(self):
self.timeline.studentSignup()
self.data.createStudent()
url = '/gci/profile/mentor/' + self.gci.key().name()
response = self.get(url)
self.assertResponseForbidden(response)
url = '/gci/profile/org_admin/' + self.gci.key().name()
response = self.get(url)
self.assertResponseForbidden(response)
def testForbiddenWithMentorProfilePage(self):
self.timeline.studentSignup()
self.data.createMentor(self.org)
url = '/gci/profile/student/' + self.gci.key().name()
response = self.get(url)
self.assertResponseForbidden(response)
def testEditProfilePage(self):
self.timeline.studentSignup()
self.data.createProfile()
url = '/gci/profile/' + self.gci.key().name()
response = self.get(url)
self.assertResponseOK(response)
#TODO(daniel): this test should work, when we disable edition of profiles
# after the project is over
#def testEditProfilePageInactive(self):
# self.timeline.offSeason()
# self.data.createProfile()
# url = '/gci/profile/' + self.gci.key().name()
# response = self.get(url)
# self.assertResponseForbidden(response)
def testCreateUser(self):
self.timeline.studentSignup()
self.default_props.update({
'link_id': 'test',
})
response = self.post(self.student_url, self.default_props)
self.assertResponseRedirect(response, self.validated_url)
self.assertEqual(1, GCIProfile.all().count())
student = GCIProfile.all().get()
self.assertEqual(self.birth_date, str(student.birth_date))
def testCreateUserNoLinkId(self):
self.timeline.studentSignup()
self.default_props.update({
})
response = self.post(self.student_url, self.default_props)
self.assertResponseOK(response)
self.assertTrue('link_id' in response.context['error'])
def testCreateProfile(self):
from soc.modules.gci.models.profile import GCIStudentInfo
self.timeline.studentSignup()
self.data.createUser()
self._updateDefaultProps(self.data)
postdata = self.default_props
response = self.post(self.student_url, postdata)
self.assertResponseRedirect(response, self.validated_url)
# hacky
profile = GCIProfile.all().get()
profile.delete()
postdata.update({
'email': 'somerandominvalid@emailid',
})
response = self.post(self.student_url, postdata)
# yes! this is the protocol for form posts. We get an OK response
# with the response containing the form's GET request page whenever
# the form has an error and could not be posted. This is the architecture
# chosen in order to save the form error state's while rendering the
# error fields.
self.assertResponseOK(response)
error_dict = response.context['error']
self.assertTrue('email' in error_dict)
|
|
"""The tests for the automation component."""
import asyncio
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.core import State
from homeassistant.setup import setup_component, async_setup_component
import homeassistant.components.automation as automation
from homeassistant.const import ATTR_ENTITY_ID, STATE_ON, STATE_OFF
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component, get_test_home_assistant, fire_time_changed,
mock_component, mock_service, mock_restore_cache)
# pylint: disable=invalid-name
class TestAutomation(unittest.TestCase):
"""Test the event automation."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, 'group')
self.calls = mock_service(self.hass, 'test', 'automation')
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_service_data_not_a_dict(self):
"""Test service data not dict."""
with assert_setup_component(0):
assert not setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data': 100,
}
}
})
def test_service_specify_data(self):
"""Test service data."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.event.event_type }}'
},
}
}
})
time = dt_util.utcnow()
with patch('homeassistant.components.automation.utcnow',
return_value=time):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data['some'] == 'event - test_event'
state = self.hass.states.get('automation.hello')
assert state is not None
assert state.attributes.get('last_triggered') == time
state = self.hass.states.get('group.all_automations')
assert state is not None
assert state.attributes.get('entity_id') == ('automation.hello',)
def test_action_delay(self):
"""Test action delay."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': [
{
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.event.event_type }}'
}
},
{'delay': {'minutes': '10'}},
{
'service': 'test.automation',
'data_template': {
'some': '{{ trigger.platform }} - '
'{{ trigger.event.event_type }}'
}
},
]
}
})
time = dt_util.utcnow()
with patch('homeassistant.components.automation.utcnow',
return_value=time):
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data['some'] == 'event - test_event'
future = dt_util.utcnow() + timedelta(minutes=10)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert len(self.calls) == 2
assert self.calls[1].data['some'] == 'event - test_event'
state = self.hass.states.get('automation.hello')
assert state is not None
assert state.attributes.get('last_triggered') == time
state = self.hass.states.get('group.all_automations')
assert state is not None
assert state.attributes.get('entity_id') == ('automation.hello',)
def test_service_specify_entity_id(self):
"""Test service data."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': 'hello.world'
}
}
})
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world'],
self.calls[0].data.get(ATTR_ENTITY_ID))
def test_service_initial_value_off(self):
"""Test initial value off."""
entity_id = 'automation.hello'
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'initial_state': 'off',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': ['hello.world', 'hello.world2']
}
}
})
assert not automation.is_on(self.hass, entity_id)
def test_service_initial_value_on(self):
"""Test initial value on."""
entity_id = 'automation.hello'
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'initial_state': 'on',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': ['hello.world', 'hello.world2']
}
}
})
assert automation.is_on(self.hass, entity_id)
def test_service_specify_entity_id_list(self):
"""Test service data."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'entity_id': ['hello.world', 'hello.world2']
}
}
})
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world', 'hello.world2'],
self.calls[0].data.get(ATTR_ENTITY_ID))
def test_two_triggers(self):
"""Test triggers."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
{
'platform': 'state',
'entity_id': 'test.entity',
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set('test.entity', 'hello')
self.hass.block_till_done()
self.assertEqual(2, len(self.calls))
def test_trigger_service_ignoring_condition(self):
"""Test triggers."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
],
'condition': {
'condition': 'state',
'entity_id': 'non.existing',
'state': 'beer',
},
'action': {
'service': 'test.automation',
}
}
})
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 0
self.hass.services.call('automation', 'trigger', blocking=True)
self.hass.block_till_done()
assert len(self.calls) == 1
def test_two_conditions_with_and(self):
"""Test two and conditions."""
entity_id = 'test.entity'
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': [
{
'platform': 'event',
'event_type': 'test_event',
},
],
'condition': [
{
'condition': 'state',
'entity_id': entity_id,
'state': '100'
},
{
'condition': 'numeric_state',
'entity_id': entity_id,
'below': 150
}
],
'action': {
'service': 'test.automation',
}
}
})
self.hass.states.set(entity_id, 100)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 101)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.states.set(entity_id, 151)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
def test_automation_list_setting(self):
"""Event is not a valid condition."""
self.assertTrue(setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: [{
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
}
}, {
'trigger': {
'platform': 'event',
'event_type': 'test_event_2',
},
'action': {
'service': 'test.automation',
}
}]
}))
self.hass.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(self.calls))
self.hass.bus.fire('test_event_2')
self.hass.block_till_done()
self.assertEqual(2, len(self.calls))
def test_automation_calling_two_actions(self):
"""Test if we can call two actions from automation definition."""
self.assertTrue(setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': [{
'service': 'test.automation',
'data': {'position': 0},
}, {
'service': 'test.automation',
'data': {'position': 1},
}],
}
}))
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 2
assert self.calls[0].data['position'] == 0
assert self.calls[1].data['position'] == 1
def test_services(self):
"""Test the automation services for turning entities on/off."""
entity_id = 'automation.hello'
assert self.hass.states.get(entity_id) is None
assert not automation.is_on(self.hass, entity_id)
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
}
}
})
assert self.hass.states.get(entity_id) is not None
assert automation.is_on(self.hass, entity_id)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
automation.turn_off(self.hass, entity_id)
self.hass.block_till_done()
assert not automation.is_on(self.hass, entity_id)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
automation.toggle(self.hass, entity_id)
self.hass.block_till_done()
assert automation.is_on(self.hass, entity_id)
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 2
automation.trigger(self.hass, entity_id)
self.hass.block_till_done()
assert len(self.calls) == 3
automation.turn_off(self.hass, entity_id)
self.hass.block_till_done()
automation.trigger(self.hass, entity_id)
self.hass.block_till_done()
assert len(self.calls) == 4
automation.turn_on(self.hass, entity_id)
self.hass.block_till_done()
assert automation.is_on(self.hass, entity_id)
@patch('homeassistant.config.load_yaml_config_file', autospec=True,
return_value={
automation.DOMAIN: {
'alias': 'bye',
'trigger': {
'platform': 'event',
'event_type': 'test_event2',
},
'action': {
'service': 'test.automation',
'data_template': {
'event': '{{ trigger.event.event_type }}'
}
}
}
})
def test_reload_config_service(self, mock_load_yaml):
"""Test the reload config service."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data_template': {
'event': '{{ trigger.event.event_type }}'
}
}
}
})
assert self.hass.states.get('automation.hello') is not None
assert self.hass.states.get('automation.bye') is None
listeners = self.hass.bus.listeners
assert listeners.get('test_event') == 1
assert listeners.get('test_event2') is None
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data.get('event') == 'test_event'
automation.reload(self.hass)
self.hass.block_till_done()
# De-flake ?!
self.hass.block_till_done()
assert self.hass.states.get('automation.hello') is None
assert self.hass.states.get('automation.bye') is not None
listeners = self.hass.bus.listeners
assert listeners.get('test_event') is None
assert listeners.get('test_event2') == 1
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
self.hass.bus.fire('test_event2')
self.hass.block_till_done()
assert len(self.calls) == 2
assert self.calls[1].data.get('event') == 'test_event2'
@patch('homeassistant.config.load_yaml_config_file', autospec=True,
return_value={automation.DOMAIN: 'not valid'})
def test_reload_config_when_invalid_config(self, mock_load_yaml):
"""Test the reload config service handling invalid config."""
with assert_setup_component(1):
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data_template': {
'event': '{{ trigger.event.event_type }}'
}
}
}
})
assert self.hass.states.get('automation.hello') is not None
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data.get('event') == 'test_event'
automation.reload(self.hass)
self.hass.block_till_done()
assert self.hass.states.get('automation.hello') is None
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_reload_config_handles_load_fails(self):
"""Test the reload config service."""
assert setup_component(self.hass, automation.DOMAIN, {
automation.DOMAIN: {
'alias': 'hello',
'trigger': {
'platform': 'event',
'event_type': 'test_event',
},
'action': {
'service': 'test.automation',
'data_template': {
'event': '{{ trigger.event.event_type }}'
}
}
}
})
assert self.hass.states.get('automation.hello') is not None
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0].data.get('event') == 'test_event'
with patch('homeassistant.config.load_yaml_config_file',
side_effect=HomeAssistantError('bla')):
automation.reload(self.hass)
self.hass.block_till_done()
assert self.hass.states.get('automation.hello') is not None
self.hass.bus.fire('test_event')
self.hass.block_till_done()
assert len(self.calls) == 2
@asyncio.coroutine
def test_automation_restore_state(hass):
"""Ensure states are restored on startup."""
time = dt_util.utcnow()
mock_restore_cache(hass, (
State('automation.hello', STATE_ON),
State('automation.bye', STATE_OFF, {'last_triggered': time}),
))
config = {automation.DOMAIN: [{
'alias': 'hello',
'trigger': {
'platform': 'event',
'event_type': 'test_event_hello',
},
'action': {'service': 'test.automation'}
}, {
'alias': 'bye',
'trigger': {
'platform': 'event',
'event_type': 'test_event_bye',
},
'action': {'service': 'test.automation'}
}]}
assert (yield from async_setup_component(hass, automation.DOMAIN, config))
state = hass.states.get('automation.hello')
assert state
assert state.state == STATE_ON
state = hass.states.get('automation.bye')
assert state
assert state.state == STATE_OFF
assert state.attributes.get('last_triggered') == time
calls = mock_service(hass, 'test', 'automation')
assert automation.is_on(hass, 'automation.bye') is False
hass.bus.async_fire('test_event_bye')
yield from hass.async_block_till_done()
assert len(calls) == 0
assert automation.is_on(hass, 'automation.hello')
hass.bus.async_fire('test_event_hello')
yield from hass.async_block_till_done()
assert len(calls) == 1
|
|
# -*- coding: utf-8 -*-
#
# ChainerCV documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 3 16:38:20 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import inspect
import os
import pkg_resources
import sys
__version__ = pkg_resources.get_distribution('chainercv').version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
rtd_version = os.environ.get('READTHEDOCS_VERSION')
if rtd_version == 'latest':
tag = 'master'
else:
tag = 'v{}'.format(__version__)
extlinks = {
'blob':
('https://github.com/chainer/chainercv/blob/{}/%s'.format(tag), ''),
'tree':
('https://github.com/chainer/chainercv/tree/{}/%s'.format(tag), ''),
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.linkcode']
try:
import sphinxcontrib.spelling # noqa
extensions.append('sphinxcontrib.spelling')
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ChainerCV'
copyright = u'2017, Preferred Networks, inc.'
author = u'Preferred Networks, inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.13.1'
# The full version, including alpha/beta/rc tags.
release = u'0.13.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Napoleon settings
napoleon_use_ivar = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
if not on_rtd:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/modified_theme.css'
if on_rtd:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/css/modified_theme.css',
],
}
# -- Options for HTMLHelp output ------------------------------------------
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'ChainerCVdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ChainerCV.tex', u'ChainerCV Documentation',
u'Preferred Networks, inc.', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chainercv', u'ChainerCV Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ChainerCV', u'ChainerCV Documentation',
author, 'ChainerCV', 'One line description of project.',
'Miscellaneous'),
]
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
source_root = None
def _is_egg_directory(path):
return (path.endswith('.egg') and
os.path.isdir(os.path.join(path, 'EGG-INFO')))
def _is_git_root(path):
return os.path.isdir(os.path.join(path, '.git'))
def _import_object_from_name(module_name, fullname):
obj = sys.modules.get(module_name)
if obj is None:
return None
for comp in fullname.split('.'):
obj = getattr(obj, comp)
return obj
_source_root = None
def _find_source_root(source_abs_path):
# Note that READTHEDOCS* environment variable cannot be used, because they
# are not set under docker environment.
global _source_root
if _source_root is None:
dir = os.path.dirname(source_abs_path)
while True:
if _is_egg_directory(dir) or _is_git_root(dir):
# Reached the root directory
_source_root = dir
break
dir_ = os.path.dirname(dir)
if len(dir_) == len(dir):
raise RuntimeError('Couldn\'t parse root directory from '
'source file: {}'.format(source_abs_path))
dir = dir_
return _source_root
def _get_source_relative_path(source_abs_path):
return os.path.relpath(source_abs_path, _find_source_root(source_abs_path))
def _get_sourcefile_and_linenumber(obj):
# Retrieve the original function wrapped by contextlib.contextmanager
if callable(obj):
closure = getattr(obj, '__closure__', None)
if closure is not None:
obj = closure[0].cell_contents
# Get the source file name and line number at which obj is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
# obj is not a module, class, function, ..etc.
return None, None
# inspect can return None for cython objects
if filename is None:
return None, None
# Get the source line number
_, linenum = inspect.getsourcelines(obj)
return filename, linenum
def linkcode_resolve(domain, info):
if domain != 'py' or not info['module']:
return None
# Import the object from module path
obj = _import_object_from_name(info['module'], info['fullname'])
# If it's not defined in the internal module, return None.
mod = inspect.getmodule(obj)
if mod is None:
return None
if not (mod.__name__ == 'chainercv'
or mod.__name__.startswith('chainercv.')):
return None
# Retrieve source file name and line number
filename, linenum = _get_sourcefile_and_linenumber(obj)
if filename is None or linenum is None:
return None
filename = os.path.realpath(filename)
relpath = _get_source_relative_path(filename)
return 'https://github.com/chainer/chainercv/blob/{}/{}#L{}'.format(
tag, relpath, linenum)
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test scenario configs."""
import collections
import dataclasses
from typing import AbstractSet, Collection, Mapping, Sequence
import immutabledict
@dataclasses.dataclass(frozen=True)
class Scenario:
description: str
tags: AbstractSet[str]
substrate: str
is_focal: Sequence[bool]
bots: AbstractSet[str]
SCENARIOS: Mapping[str, Scenario] = immutabledict.immutabledict(
# keep-sorted start numeric=yes block=yes
allelopathic_harvest_0=Scenario(
description='focals are resident and a visitor prefers green',
tags=frozenset({
'resident',
}),
substrate='allelopathic_harvest',
is_focal=(True,) * 15 + (False,) * 1,
bots=frozenset({
'ah3gs_bot_finding_berry_two_the_most_tasty_0',
'ah3gs_bot_finding_berry_two_the_most_tasty_1',
'ah3gs_bot_finding_berry_two_the_most_tasty_4',
'ah3gs_bot_finding_berry_two_the_most_tasty_5',
}),
),
allelopathic_harvest_1=Scenario(
description='visiting a green preferring population',
tags=frozenset({
'convention_following',
'visitor',
}),
substrate='allelopathic_harvest',
is_focal=(True,) * 4 + (False,) * 12,
bots=frozenset({
'ah3gs_bot_finding_berry_two_the_most_tasty_0',
'ah3gs_bot_finding_berry_two_the_most_tasty_1',
'ah3gs_bot_finding_berry_two_the_most_tasty_4',
'ah3gs_bot_finding_berry_two_the_most_tasty_5',
}),
),
arena_running_with_scissors_in_the_matrix_0=Scenario(
description='versus gullible bots',
tags=frozenset({
'deception',
'half_and_half',
'versus_free',
}),
substrate='arena_running_with_scissors_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'arena_rws_free_0',
'arena_rws_free_1',
'arena_rws_free_2',
}),
),
arena_running_with_scissors_in_the_matrix_1=Scenario(
description='versus mixture of pure bots',
tags=frozenset({
'half_and_half',
'versus_pure_all',
}),
substrate='arena_running_with_scissors_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'arena_rws_pure_paper_0',
'arena_rws_pure_paper_1',
'arena_rws_pure_paper_2',
'arena_rws_pure_paper_3',
'arena_rws_pure_rock_0',
'arena_rws_pure_rock_1',
'arena_rws_pure_rock_2',
'arena_rws_pure_rock_3',
'arena_rws_pure_scissors_0',
'arena_rws_pure_scissors_1',
'arena_rws_pure_scissors_2',
'arena_rws_pure_scissors_3',
}),
),
arena_running_with_scissors_in_the_matrix_2=Scenario(
description='versus pure rock bots',
tags=frozenset({
'half_and_half',
'versus_pure_rock',
}),
substrate='arena_running_with_scissors_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'arena_rws_pure_rock_0',
'arena_rws_pure_rock_1',
'arena_rws_pure_rock_2',
'arena_rws_pure_rock_3',
}),
),
arena_running_with_scissors_in_the_matrix_3=Scenario(
description='versus pure paper bots',
tags=frozenset({
'half_and_half',
'versus_pure_paper',
}),
substrate='arena_running_with_scissors_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'arena_rws_pure_paper_0',
'arena_rws_pure_paper_1',
'arena_rws_pure_paper_2',
'arena_rws_pure_paper_3',
}),
),
arena_running_with_scissors_in_the_matrix_4=Scenario(
description='versus pure scissors bots',
tags=frozenset({
'half_and_half',
'versus_pure_scissors',
}),
substrate='arena_running_with_scissors_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'arena_rws_pure_scissors_0',
'arena_rws_pure_scissors_1',
'arena_rws_pure_scissors_2',
'arena_rws_pure_scissors_3',
}),
),
bach_or_stravinsky_in_the_matrix_0=Scenario(
description='visiting pure bach fans',
tags=frozenset({
'convention_following',
'versus_pure_bach',
'visitor',
}),
substrate='bach_or_stravinsky_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'bach_fan_0',
'bach_fan_1',
'bach_fan_2',
}),
),
bach_or_stravinsky_in_the_matrix_1=Scenario(
description='visiting pure stravinsky fans',
tags=frozenset({
'convention_following',
'versus_pure_stravinsky',
'visitor',
}),
substrate='bach_or_stravinsky_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'stravinsky_fan_0',
'stravinsky_fan_1',
'stravinsky_fan_2',
}),
),
capture_the_flag_0=Scenario(
description='focal team versus shaped a3c bot team',
tags=frozenset({
'half_and_half',
'learned_teamwork',
}),
substrate='capture_the_flag',
is_focal=(True, False) * 4,
bots=frozenset({
'ctf_pseudorewards_for_main_game_events_a3c_2',
'ctf_pseudorewards_for_main_game_events_a3c_6',
}),
),
capture_the_flag_1=Scenario(
description='focal team versus shaped vmpo bot team',
tags=frozenset({
'half_and_half',
'learned_teamwork',
}),
substrate='capture_the_flag',
is_focal=(True, False,) * 4,
bots=frozenset({
'ctf_pseudorewards_for_main_game_events_vmpo_0',
'ctf_pseudorewards_for_main_game_events_vmpo_3',
'ctf_pseudorewards_for_main_game_events_vmpo_4',
'ctf_pseudorewards_for_main_game_events_vmpo_6',
'ctf_pseudorewards_for_main_game_events_vmpo_7',
}),
),
capture_the_flag_2=Scenario(
description='ad hoc teamwork with shaped a3c bots',
tags=frozenset({
'ad_hoc_teamwork',
'visitor',
}),
substrate='capture_the_flag',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'ctf_pseudorewards_for_main_game_events_a3c_2',
'ctf_pseudorewards_for_main_game_events_a3c_6',
}),
),
capture_the_flag_3=Scenario(
description='ad hoc teamwork with shaped vmpo bots',
tags=frozenset({
'ad_hoc_teamwork',
'visitor',
}),
substrate='capture_the_flag',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'ctf_pseudorewards_for_main_game_events_vmpo_0',
'ctf_pseudorewards_for_main_game_events_vmpo_3',
'ctf_pseudorewards_for_main_game_events_vmpo_4',
'ctf_pseudorewards_for_main_game_events_vmpo_6',
'ctf_pseudorewards_for_main_game_events_vmpo_7',
}),
),
chemistry_branched_chain_reaction_0=Scenario(
description='focals meet X preferring bots',
tags=frozenset({
'half_and_half',
}),
substrate='chemistry_branched_chain_reaction',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'chemistry_branched_chain_reaction_X_specialist_0',
'chemistry_branched_chain_reaction_X_specialist_1',
'chemistry_branched_chain_reaction_X_specialist_2',
}),
),
chemistry_branched_chain_reaction_1=Scenario(
description='focals meet Y preferring bots',
tags=frozenset({
'half_and_half',
}),
substrate='chemistry_branched_chain_reaction',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'chemistry_branched_chain_reaction_Y_specialist_0',
'chemistry_branched_chain_reaction_Y_specialist_1',
'chemistry_branched_chain_reaction_Y_specialist_2',
}),
),
chemistry_branched_chain_reaction_2=Scenario(
description='focals are resident',
tags=frozenset({
'resident',
}),
substrate='chemistry_branched_chain_reaction',
is_focal=(True,) * 7 + (False,) * 1,
bots=frozenset({
'chemistry_branched_chain_reaction_X_specialist_0',
'chemistry_branched_chain_reaction_X_specialist_1',
'chemistry_branched_chain_reaction_X_specialist_2',
'chemistry_branched_chain_reaction_Y_specialist_0',
'chemistry_branched_chain_reaction_Y_specialist_1',
'chemistry_branched_chain_reaction_Y_specialist_2',
}),
),
chemistry_branched_chain_reaction_3=Scenario(
description='visiting another population',
tags=frozenset({
'convention_following',
'visitor',
}),
substrate='chemistry_branched_chain_reaction',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'chemistry_branched_chain_reaction_X_specialist_0',
'chemistry_branched_chain_reaction_X_specialist_1',
'chemistry_branched_chain_reaction_X_specialist_2',
'chemistry_branched_chain_reaction_Y_specialist_0',
'chemistry_branched_chain_reaction_Y_specialist_1',
'chemistry_branched_chain_reaction_Y_specialist_2',
}),
),
chemistry_metabolic_cycles_0=Scenario(
description='focals meet food1 preferring bots',
tags=frozenset({
'half_and_half',
}),
substrate='chemistry_metabolic_cycles',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'chemistry_metabolic_cycles_food1_specialist_0',
'chemistry_metabolic_cycles_food1_specialist_1',
}),
),
chemistry_metabolic_cycles_1=Scenario(
description='focals meet food2 preferring bots',
tags=frozenset({
'half_and_half',
}),
substrate='chemistry_metabolic_cycles',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'chemistry_metabolic_cycles_food2_specialist_0',
'chemistry_metabolic_cycles_food2_specialist_1',
}),
),
chemistry_metabolic_cycles_2=Scenario(
description='focals are resident',
tags=frozenset({
'resident',
}),
substrate='chemistry_metabolic_cycles',
is_focal=(True,) * 7 + (False,) * 1,
bots=frozenset({
'chemistry_metabolic_cycles_food1_specialist_0',
'chemistry_metabolic_cycles_food1_specialist_1',
'chemistry_metabolic_cycles_food2_specialist_0',
'chemistry_metabolic_cycles_food2_specialist_1',
}),
),
chemistry_metabolic_cycles_3=Scenario(
description='visiting another population',
tags=frozenset({
'visitor',
}),
substrate='chemistry_metabolic_cycles',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'chemistry_metabolic_cycles_food1_specialist_0',
'chemistry_metabolic_cycles_food1_specialist_1',
'chemistry_metabolic_cycles_food2_specialist_0',
'chemistry_metabolic_cycles_food2_specialist_1',
}),
),
chicken_in_the_matrix_0=Scenario(
description='meeting a mixture of pure bots',
tags=frozenset({
'half_and_half',
'versus_pure_all',
}),
substrate='chicken_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'chicken_pure_dove_0',
'chicken_pure_dove_1',
'chicken_pure_dove_2',
'chicken_pure_dove_3',
'chicken_pure_hawk_0',
'chicken_pure_hawk_1',
'chicken_pure_hawk_2',
'chicken_pure_hawk_3',
}),
),
chicken_in_the_matrix_1=Scenario(
description='visiting a pure dove population',
tags=frozenset({
'versus_pure_dove',
'visitor',
}),
substrate='chicken_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'chicken_pure_dove_0',
'chicken_pure_dove_1',
'chicken_pure_dove_2',
'chicken_pure_dove_3',
}),
),
chicken_in_the_matrix_2=Scenario(
description='focals are resident and visitors are hawks',
tags=frozenset({
'resident',
'versus_pure_hawk',
}),
substrate='chicken_in_the_matrix',
is_focal=(True,) * 5 + (False,) * 3,
bots=frozenset({
'chicken_pure_hawk_0',
'chicken_pure_hawk_1',
'chicken_pure_hawk_2',
'chicken_pure_hawk_3',
}),
),
chicken_in_the_matrix_3=Scenario(
description='visiting a gullible population',
tags=frozenset({
'deception',
'versus_free',
'visitor',
}),
substrate='chicken_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'chicken_free_0',
'chicken_free_1',
'chicken_free_2',
'chicken_free_3',
}),
),
chicken_in_the_matrix_4=Scenario(
description='visiting grim reciprocators',
tags=frozenset({
'reciprocity',
'versus_puppet',
'visitor',
}),
substrate='chicken_in_the_matrix',
is_focal=(True,) * 2 + (False,) * 6,
bots=frozenset({
'chicken_puppet_grim',
}),
),
clean_up_0=Scenario(
description='visiting an altruistic population',
tags=frozenset({
'versus_cleaners',
'visitor',
}),
substrate='clean_up',
is_focal=(True,) * 3 + (False,) * 4,
bots=frozenset({
'cleanup_cleaner_1',
'cleanup_cleaner_2',
}),
),
clean_up_1=Scenario(
description='focals are resident and visitors free ride',
tags=frozenset({
'resident',
'versus_consumers',
}),
substrate='clean_up',
is_focal=(True,) * 4 + (False,) * 3,
bots=frozenset({
'cleanup_consumer_0',
'cleanup_consumer_1',
'cleanup_consumer_2',
}),
),
clean_up_2=Scenario(
description='visiting a turn-taking population that cleans first',
tags=frozenset({
'versus_puppet',
'visitor',
}),
substrate='clean_up',
is_focal=(True,) * 3 + (False,) * 4,
bots=frozenset({
'cleanup_puppet_alternate_clean_first',
}),
),
clean_up_3=Scenario(
description='visiting a turn-taking population that eats first',
tags=frozenset({
'versus_puppet',
'visitor',
}),
substrate='clean_up',
is_focal=(True,) * 3 + (False,) * 4,
bots=frozenset({
'cleanup_puppet_alternate_eat_first',
}),
),
clean_up_4=Scenario(
description='focals are visited by one reciprocator',
tags=frozenset({
'resident',
'versus_puppet',
}),
substrate='clean_up',
is_focal=(True,) * 6 + (False,) * 1,
bots=frozenset({
'cleanup_puppet_reciprocator_threshold_low',
}),
),
clean_up_5=Scenario(
description='focals are visited by two suspicious reciprocators',
tags=frozenset({
'resident',
'versus_puppet',
}),
substrate='clean_up',
is_focal=(True,) * 5 + (False,) * 2,
bots=frozenset({
'cleanup_puppet_reciprocator_threshold_mid',
}),
),
clean_up_6=Scenario(
description='focals are visited by one suspicious reciprocator',
tags=frozenset({
'resident',
'versus_puppet',
}),
substrate='clean_up',
is_focal=(True,) * 6 + (False,) * 1,
bots=frozenset({
'cleanup_puppet_reciprocator_threshold_mid',
}),
),
collaborative_cooking_impassable_0=Scenario(
description='visiting a vmpo population',
tags=frozenset({
'convention_following',
'visitor',
}),
substrate='collaborative_cooking_impassable',
is_focal=(True,) * 1 + (False,) * 3,
bots=frozenset({
'collaborative_cooking_impassable_vmpo_pop_size_ten_0',
'collaborative_cooking_impassable_vmpo_pop_size_ten_2',
'collaborative_cooking_impassable_vmpo_pop_size_ten_3',
'collaborative_cooking_impassable_vmpo_pop_size_ten_4',
'collaborative_cooking_impassable_vmpo_pop_size_ten_6',
'collaborative_cooking_impassable_vmpo_pop_size_ten_7',
'collaborative_cooking_impassable_vmpo_pop_size_ten_9',
}),
),
collaborative_cooking_impassable_1=Scenario(
description='focals are resident',
tags=frozenset({
'resident',
}),
substrate='collaborative_cooking_impassable',
is_focal=(True,) * 3 + (False,) * 1,
bots=frozenset({
'collaborative_cooking_impassable_vmpo_pop_size_ten_0',
'collaborative_cooking_impassable_vmpo_pop_size_ten_2',
'collaborative_cooking_impassable_vmpo_pop_size_ten_3',
'collaborative_cooking_impassable_vmpo_pop_size_ten_4',
'collaborative_cooking_impassable_vmpo_pop_size_ten_6',
'collaborative_cooking_impassable_vmpo_pop_size_ten_7',
'collaborative_cooking_impassable_vmpo_pop_size_ten_9',
}),
),
collaborative_cooking_passable_0=Scenario(
description='visiting uncoordinated generalists',
tags=frozenset({
'convention_following',
'versus_uncoordinated_generalist',
'visitor',
}),
substrate='collaborative_cooking_passable',
is_focal=(True,) * 1 + (False,) * 3,
bots=frozenset({
'collaborative_cooking_passable_vmpo_pop_size_ten_5',
}),
),
collaborative_cooking_passable_1=Scenario(
description='focals are resident and visited by an uncoordinated generalist',
tags=frozenset({
'resident',
'versus_uncoordinated_generalist',
}),
substrate='collaborative_cooking_passable',
is_focal=(True,) * 3 + (False,) * 1,
bots=frozenset({
'collaborative_cooking_passable_vmpo_pop_size_ten_5',
}),
),
commons_harvest_closed_0=Scenario(
description='focals are resident and visited by two zappers',
tags=frozenset({
'resident',
}),
substrate='commons_harvest_closed',
is_focal=(True,) * 14 + (False,) * 2,
bots=frozenset({
'closed_commons_zapper_0',
'closed_commons_zapper_1',
'closed_commons_zapper_2',
'closed_commons_zapper_3',
}),
),
commons_harvest_closed_1=Scenario(
description='focals are resident and visited by six zappers',
tags=frozenset({
'resident',
}),
substrate='commons_harvest_closed',
is_focal=(True,) * 10 + (False,) * 6,
bots=frozenset({
'closed_commons_zapper_0',
'closed_commons_zapper_1',
'closed_commons_zapper_2',
'closed_commons_zapper_3',
}),
),
commons_harvest_closed_2=Scenario(
description='visiting a population of zappers',
tags=frozenset({
'visitor',
}),
substrate='commons_harvest_closed',
is_focal=(True,) * 4 + (False,) * 12,
bots=frozenset({
'closed_commons_zapper_0',
'closed_commons_zapper_1',
'closed_commons_zapper_2',
'closed_commons_zapper_3',
}),
),
commons_harvest_open_0=Scenario(
description='focals are resident and visited by two zappers',
tags=frozenset({
'resident',
}),
substrate='commons_harvest_open',
is_focal=(True,) * 14 + (False,) * 2,
bots=frozenset({
'open_commons_zapper_0',
'open_commons_zapper_1',
}),
),
commons_harvest_open_1=Scenario(
description='focals are resident and visited by six zappers',
tags=frozenset({
'resident',
}),
substrate='commons_harvest_open',
is_focal=(True,) * 10 + (False,) * 6,
bots=frozenset({
'open_commons_zapper_0',
'open_commons_zapper_1',
}),
),
commons_harvest_partnership_0=Scenario(
description='meeting good partners',
tags=frozenset({
'half_and_half',
'versus_good_partners',
}),
substrate='commons_harvest_partnership',
is_focal=(True,) * 8 + (False,) * 8,
bots=frozenset({
'partnership_commons_putative_good_partner_4',
'partnership_commons_putative_good_partner_5',
'partnership_commons_putative_good_partner_7',
}),
),
commons_harvest_partnership_1=Scenario(
description='focals are resident and visitors are good partners',
tags=frozenset({
'resident',
'versus_good_partners',
}),
substrate='commons_harvest_partnership',
is_focal=(True,) * 12 + (False,) * 4,
bots=frozenset({
'partnership_commons_putative_good_partner_4',
'partnership_commons_putative_good_partner_5',
'partnership_commons_putative_good_partner_7',
}),
),
commons_harvest_partnership_2=Scenario(
description='visiting a population of good partners',
tags=frozenset({
'versus_good_partners',
'visitor',
}),
substrate='commons_harvest_partnership',
is_focal=(True,) * 4 + (False,) * 12,
bots=frozenset({
'partnership_commons_putative_good_partner_4',
'partnership_commons_putative_good_partner_5',
'partnership_commons_putative_good_partner_7',
}),
),
commons_harvest_partnership_3=Scenario(
description='focals are resident and visited by two zappers',
tags=frozenset({
'resident',
'versus_zappers',
}),
substrate='commons_harvest_partnership',
is_focal=(True,) * 14 + (False,) * 2,
bots=frozenset({
'partnership_commons_zapper_1',
'partnership_commons_zapper_2',
}),
),
commons_harvest_partnership_4=Scenario(
description='focals are resident and visited by six zappers',
tags=frozenset({
'resident',
'versus_zappers',
}),
substrate='commons_harvest_partnership',
is_focal=(True,) * 10 + (False,) * 6,
bots=frozenset({
'partnership_commons_zapper_1',
'partnership_commons_zapper_2',
}),
),
commons_harvest_partnership_5=Scenario(
description='visiting a population of zappers',
tags=frozenset({
'versus_zappers',
'visitor',
}),
substrate='commons_harvest_partnership',
is_focal=(True,) * 4 + (False,) * 12,
bots=frozenset({
'partnership_commons_zapper_1',
'partnership_commons_zapper_2',
}),
),
king_of_the_hill_0=Scenario(
description='focal team versus default vmpo bot team',
tags=frozenset({
'half_and_half',
'learned_teamwork',
}),
substrate='king_of_the_hill',
is_focal=(True, False) * 4,
bots=frozenset({
'koth_default_vmpo_0',
'koth_default_vmpo_1',
'koth_default_vmpo_2',
'koth_default_vmpo_3',
'koth_default_vmpo_4',
'koth_default_vmpo_5',
'koth_default_vmpo_6',
'koth_default_vmpo_7',
}),
),
king_of_the_hill_1=Scenario(
description='focal team versus shaped a3c bot team',
tags=frozenset({
'half_and_half',
'learned_teamwork',
}),
substrate='king_of_the_hill',
is_focal=(True, False) * 4,
bots=frozenset({
'koth_zap_while_in_control_a3c_0',
'koth_zap_while_in_control_a3c_1',
'koth_zap_while_in_control_a3c_2',
'koth_zap_while_in_control_a3c_3',
'koth_zap_while_in_control_a3c_4',
'koth_zap_while_in_control_a3c_5',
'koth_zap_while_in_control_a3c_6',
'koth_zap_while_in_control_a3c_7',
}),
),
king_of_the_hill_2=Scenario(
description='focal team versus shaped vmpo bot team',
tags=frozenset({
'half_and_half',
'learned_teamwork',
}),
substrate='king_of_the_hill',
is_focal=(True, False) * 4,
bots=frozenset({
'koth_zap_while_in_control_vmpo_0',
'koth_zap_while_in_control_vmpo_1',
'koth_zap_while_in_control_vmpo_2',
'koth_zap_while_in_control_vmpo_3',
'koth_zap_while_in_control_vmpo_4',
'koth_zap_while_in_control_vmpo_5',
'koth_zap_while_in_control_vmpo_6',
'koth_zap_while_in_control_vmpo_7',
}),
),
king_of_the_hill_3=Scenario(
description='ad hoc teamwork with default vmpo bots',
tags=frozenset({
'ad_hoc_teamwork',
'visitor',
}),
substrate='king_of_the_hill',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'koth_default_vmpo_0',
'koth_default_vmpo_1',
'koth_default_vmpo_2',
'koth_default_vmpo_3',
'koth_default_vmpo_4',
'koth_default_vmpo_5',
'koth_default_vmpo_6',
'koth_default_vmpo_7',
}),
),
king_of_the_hill_4=Scenario(
description='ad hoc teamwork with shaped a3c bots',
tags=frozenset({
'ad_hoc_teamwork',
'visitor',
}),
substrate='king_of_the_hill',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'koth_zap_while_in_control_a3c_0',
'koth_zap_while_in_control_a3c_1',
'koth_zap_while_in_control_a3c_2',
'koth_zap_while_in_control_a3c_3',
'koth_zap_while_in_control_a3c_4',
'koth_zap_while_in_control_a3c_5',
'koth_zap_while_in_control_a3c_6',
'koth_zap_while_in_control_a3c_7',
}),
),
king_of_the_hill_5=Scenario(
description='ad hoc teamwork with shaped vmpo bots',
tags=frozenset({
'ad_hoc_teamwork',
'visitor',
}),
substrate='king_of_the_hill',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'koth_zap_while_in_control_vmpo_0',
'koth_zap_while_in_control_vmpo_1',
'koth_zap_while_in_control_vmpo_2',
'koth_zap_while_in_control_vmpo_3',
'koth_zap_while_in_control_vmpo_4',
'koth_zap_while_in_control_vmpo_5',
'koth_zap_while_in_control_vmpo_6',
'koth_zap_while_in_control_vmpo_7',
}),
),
prisoners_dilemma_in_the_matrix_0=Scenario(
description='visiting unconditional cooperators',
tags=frozenset({
'versus_pure_cooperators',
'visitor',
}),
substrate='prisoners_dilemma_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'prisoners_dilemma_cooperator_2',
'prisoners_dilemma_cooperator_4',
}),
),
prisoners_dilemma_in_the_matrix_1=Scenario(
description='focals are resident and visitors are unconditional cooperators',
tags=frozenset({
'resident',
'versus_pure_cooperators',
}),
substrate='prisoners_dilemma_in_the_matrix',
is_focal=(True,) * 6 + (False,) * 2,
bots=frozenset({
'prisoners_dilemma_cooperator_2',
'prisoners_dilemma_cooperator_4',
}),
),
prisoners_dilemma_in_the_matrix_2=Scenario(
description='focals are resident and visitors defect',
tags=frozenset({
'resident',
'versus_pure_defectors',
}),
substrate='prisoners_dilemma_in_the_matrix',
is_focal=(True,) * 6 + (False,) * 2,
bots=frozenset({
'prisoners_dilemma_defector_0',
'prisoners_dilemma_defector_2',
}),
),
prisoners_dilemma_in_the_matrix_3=Scenario(
description='meeting gullible bots',
tags=frozenset({
'deception',
'half_and_half',
'versus_free',
}),
substrate='prisoners_dilemma_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'prisoners_dilemma_free_0',
'prisoners_dilemma_free_1',
'prisoners_dilemma_free_2',
}),
),
prisoners_dilemma_in_the_matrix_4=Scenario(
description='visiting a population of grim reciprocators',
tags=frozenset({
'reciprocity',
'versus_puppet',
'visitor',
}),
substrate='prisoners_dilemma_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'prisoners_dilemma_puppet_grim_threshold_high',
}),
),
prisoners_dilemma_in_the_matrix_5=Scenario(
description='visiting a population of hair-trigger grim reciprocators',
tags=frozenset({
'reciprocity',
'versus_puppet',
'visitor',
}),
substrate='prisoners_dilemma_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'prisoners_dilemma_puppet_grim_threshold_low',
}),
),
pure_coordination_in_the_matrix_0=Scenario(
description='focals are resident and visitor is mixed',
tags=frozenset({
'resident',
'versus_pure_all',
}),
substrate='pure_coordination_in_the_matrix',
is_focal=(True,) * 7 + (False,) * 1,
bots=frozenset({
'pure_coordination_type_1_specialist_0',
'pure_coordination_type_1_specialist_1',
'pure_coordination_type_2_specialist_0',
'pure_coordination_type_2_specialist_1',
'pure_coordination_type_3_specialist_0',
'pure_coordination_type_3_specialist_1',
}),
),
pure_coordination_in_the_matrix_1=Scenario(
description='visiting resource 1 fans',
tags=frozenset({
'versus_pure_type_1',
'visitor',
}),
substrate='pure_coordination_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'pure_coordination_type_1_specialist_0',
'pure_coordination_type_1_specialist_1',
}),
),
pure_coordination_in_the_matrix_2=Scenario(
description='visiting resource 2 fans',
tags=frozenset({
'versus_pure_type_2',
'visitor',
}),
substrate='pure_coordination_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'pure_coordination_type_2_specialist_0',
'pure_coordination_type_2_specialist_1',
}),
),
pure_coordination_in_the_matrix_3=Scenario(
description='visiting resource 3 fans',
tags=frozenset({
'versus_pure_type_3',
'visitor',
}),
substrate='pure_coordination_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'pure_coordination_type_3_specialist_0',
'pure_coordination_type_3_specialist_1',
}),
),
pure_coordination_in_the_matrix_4=Scenario(
description='meeting uncoordinated strangers',
tags=frozenset({
'half_and_half',
'versus_pure_all',
}),
substrate='pure_coordination_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'pure_coordination_type_1_specialist_0',
'pure_coordination_type_1_specialist_1',
'pure_coordination_type_2_specialist_0',
'pure_coordination_type_2_specialist_1',
'pure_coordination_type_3_specialist_0',
'pure_coordination_type_3_specialist_1',
}),
),
rationalizable_coordination_in_the_matrix_0=Scenario(
description='focals are resident and visitor is mixed',
tags=frozenset({
'resident',
'versus_pure_all',
}),
substrate='rationalizable_coordination_in_the_matrix',
is_focal=(True,) * 7 + (False,) * 1,
bots=frozenset({
'rationalizable_coordination_type_1_specialist_0',
'rationalizable_coordination_type_1_specialist_1',
'rationalizable_coordination_type_2_specialist_0',
'rationalizable_coordination_type_2_specialist_1',
'rationalizable_coordination_type_3_specialist_0',
'rationalizable_coordination_type_3_specialist_1',
}),
),
rationalizable_coordination_in_the_matrix_1=Scenario(
description='visiting resource 1 fans',
tags=frozenset({
'versus_pure_type_1',
'visitor',
}),
substrate='rationalizable_coordination_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'rationalizable_coordination_type_1_specialist_0',
'rationalizable_coordination_type_1_specialist_1',
}),
),
rationalizable_coordination_in_the_matrix_2=Scenario(
description='visiting resource 2 fans',
tags=frozenset({
'versus_pure_type_2',
'visitor',
}),
substrate='rationalizable_coordination_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'rationalizable_coordination_type_2_specialist_0',
'rationalizable_coordination_type_2_specialist_1',
}),
),
rationalizable_coordination_in_the_matrix_3=Scenario(
description='visiting resource 3 fans',
tags=frozenset({
'versus_pure_type_3',
'visitor',
}),
substrate='rationalizable_coordination_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'rationalizable_coordination_type_3_specialist_0',
'rationalizable_coordination_type_3_specialist_1',
}),
),
rationalizable_coordination_in_the_matrix_4=Scenario(
description='meeting uncoordinated strangers',
tags=frozenset({
'half_and_half',
'versus_pure_all',
}),
substrate='rationalizable_coordination_in_the_matrix',
is_focal=(True,) * 4 + (False,) * 4,
bots=frozenset({
'rationalizable_coordination_type_1_specialist_0',
'rationalizable_coordination_type_1_specialist_1',
'rationalizable_coordination_type_2_specialist_0',
'rationalizable_coordination_type_2_specialist_1',
'rationalizable_coordination_type_3_specialist_0',
'rationalizable_coordination_type_3_specialist_1',
}),
),
running_with_scissors_in_the_matrix_0=Scenario(
description='versus gullible opponent',
tags=frozenset({
'deception',
'half_and_half',
'versus_free',
}),
substrate='running_with_scissors_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 1,
bots=frozenset({
'classic_rws_free_0',
'classic_rws_free_1',
'classic_rws_free_2',
}),
),
running_with_scissors_in_the_matrix_1=Scenario(
description='versus mixed strategy opponent',
tags=frozenset({
'half_and_half',
'versus_pure_all',
}),
substrate='running_with_scissors_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 1,
bots=frozenset({
'classic_rws_pure_paper_0',
'classic_rws_pure_paper_1',
'classic_rws_pure_paper_2',
'classic_rws_pure_paper_3',
'classic_rws_pure_rock_0',
'classic_rws_pure_rock_1',
'classic_rws_pure_rock_2',
'classic_rws_pure_rock_3',
'classic_rws_pure_scissors_0',
'classic_rws_pure_scissors_1',
'classic_rws_pure_scissors_2',
'classic_rws_pure_scissors_3',
}),
),
running_with_scissors_in_the_matrix_2=Scenario(
description='versus pure rock opponent',
tags=frozenset({
'half_and_half',
'versus_pure_rock',
}),
substrate='running_with_scissors_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 1,
bots=frozenset({
'classic_rws_pure_rock_0',
'classic_rws_pure_rock_1',
'classic_rws_pure_rock_2',
'classic_rws_pure_rock_3',
}),
),
running_with_scissors_in_the_matrix_3=Scenario(
description='versus pure paper opponent',
tags=frozenset({
'half_and_half',
'versus_pure_paper',
}),
substrate='running_with_scissors_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 1,
bots=frozenset({
'classic_rws_pure_paper_0',
'classic_rws_pure_paper_1',
'classic_rws_pure_paper_2',
'classic_rws_pure_paper_3',
}),
),
running_with_scissors_in_the_matrix_4=Scenario(
description='versus pure scissors opponent',
tags=frozenset({
'half_and_half',
'versus_pure_scissors',
}),
substrate='running_with_scissors_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 1,
bots=frozenset({
'classic_rws_pure_scissors_0',
'classic_rws_pure_scissors_1',
'classic_rws_pure_scissors_2',
'classic_rws_pure_scissors_3',
}),
),
stag_hunt_in_the_matrix_0=Scenario(
description='visiting a population of stags',
tags=frozenset({
'versus_pure_stag',
'visitor',
}),
substrate='stag_hunt_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'stag_hunt_stag_specialist_3',
'stag_hunt_stag_specialist_5',
}),
),
stag_hunt_in_the_matrix_1=Scenario(
description='visiting a population of hares',
tags=frozenset({
'versus_pure_hare',
'visitor',
}),
substrate='stag_hunt_in_the_matrix',
is_focal=(True,) * 1 + (False,) * 7,
bots=frozenset({
'stag_hunt_hare_specialist_0',
'stag_hunt_hare_specialist_1',
'stag_hunt_hare_specialist_2',
}),
),
stag_hunt_in_the_matrix_2=Scenario(
description='visiting a population of grim reciprocators',
tags=frozenset({
'reciprocity',
'versus_puppet',
'visitor',
}),
substrate='stag_hunt_in_the_matrix',
is_focal=(True,) * 2 + (False,) * 6,
bots=frozenset({
'stag_hunt_puppet_grim',
}),
),
territory_open_0=Scenario(
description='focals are resident and visited by a shaped bot',
tags=frozenset({
'resident',
}),
substrate='territory_open',
is_focal=(True,) * 8 + (False,) * 1,
bots=frozenset({
'territory_open_painter_0',
'territory_open_painter_1',
'territory_open_painter_2',
'territory_open_painter_3',
}),
),
territory_open_1=Scenario(
description='visiting a population of shaped bots',
tags=frozenset({
'convention_following',
'visitor',
}),
substrate='territory_open',
is_focal=(True,) * 1 + (False,) * 8,
bots=frozenset({
'territory_open_painter_0',
'territory_open_painter_1',
'territory_open_painter_2',
'territory_open_painter_3',
}),
),
territory_rooms_0=Scenario(
description='focals are resident and visited by an aggressor',
tags=frozenset({
'resident',
}),
substrate='territory_rooms',
is_focal=(True,) * 8 + (False,) * 1,
bots=frozenset({
'territory_closed_reply_to_zapper_0',
'territory_closed_reply_to_zapper_1',
}),
),
territory_rooms_1=Scenario(
description='visiting a population of aggressors',
tags=frozenset({
'convention_following',
'visitor',
}),
substrate='territory_rooms',
is_focal=(True,) * 1 + (False,) * 8,
bots=frozenset({
'territory_closed_reply_to_zapper_0',
'territory_closed_reply_to_zapper_1',
}),
),
# keep-sorted end
)
def scenarios_by_substrate(
scenarios: Mapping[str, Scenario]
) -> Mapping[str, Collection[str]]:
by_substrate = collections.defaultdict(list)
for scenario_name, scenario in scenarios.items():
by_substrate[scenario.substrate].append(scenario_name)
for key, value in by_substrate.items():
by_substrate[key] = tuple(value)
return immutabledict.immutabledict(**by_substrate)
|
|
from __future__ import unicode_literals
import os
import logging
import random
import sys
from classification.rext.kernelmodels import ReModel
from subprocess import Popen, PIPE
import platform
import itertools
import codecs
from classification.results import ResultsRE
from config import config
from text.pair import Pairs
class JSREKernel(ReModel):
def __init__(self, corpus, relationtype, modelname="slk_classifier.model", train=False, ner="goldstandard"):
super(JSREKernel, self).__init__()
# self.modelname = relationtype + "_" + modelname + "_jsre"
self.modelname = modelname
self.pairtype = relationtype
self.test_jsre = []
self.pairs = {}
self.resultsfile = None
self.examplesfile = None
self.ner_model = ner
self.entitytypes = (config.relation_types[self.pairtype]["source_types"], config.relation_types[self.pairtype]["target_types"])
self.corpus = corpus
def load_classifier(self, outputfile="jsre_results.txt"):
self.resultsfile = self.temp_dir + self.pairtype + "_" + outputfile
self.examplesfile = self.temp_dir + self.modelname + ".txt"
if os.path.isfile(self.temp_dir + self.pairtype + "_" + outputfile):
os.remove(self.temp_dir + self.pairtype + "_" + outputfile)
if not os.path.isfile(self.basedir + self.modelname):
print "model", self.basedir + self.modelname, "not found"
sys.exit()
if platform.system() == "Windows":
sep = ";"
else:
sep = ":"
#logging.debug("testing %s with %s to %s", temp_dir + inputfile,
# basedir + model, temp_dir + outputfile)
libs = ["libsvm-2.8.jar", "log4j-1.2.8.jar", "commons-digester.jar", "commons-beanutils.jar", "commons-logging.jar", "commons-collections.jar"]
classpath = 'bin/jsre/jsre-1.1/bin'+ sep + sep.join(["bin/jsre/jsre-1.1/lib/" + l for l in libs])
self.test_jsre = ['java', '-mx4g', '-classpath', classpath, "org.itc.irst.tcc.sre.Predict",
self.examplesfile, self.basedir + self.modelname,
self.resultsfile]
#print ' '.join(jsrecommand)
def train(self):
self.generatejSREdata(self.corpus, train=True, pairtype=self.pairtype)
if os.path.isfile(self.basedir + self.modelname):
print "removed old model"
os.remove(self.basedir + self.modelname)
if not os.path.isfile(self.temp_dir + self.modelname + ".txt"):
print "could not find training file " + self.basedir + self.modelname + ".txt"
sys.exit()
if platform.system() == "Windows":
sep = ";"
else:
sep = ":"
libs = ["libsvm-2.8.jar", "log4j-1.2.8.jar", "commons-digester.jar", "commons-beanutils.jar",
"commons-logging.jar", "commons-collections.jar"]
classpath = 'bin/jsre/jsre-1.1/bin/' + sep + sep.join(["bin/jsre/jsre-1.1/lib/" + l for l in libs])
jsrecall = ['java', '-mx8g', '-classpath', classpath, "org.itc.irst.tcc.sre.Train",
"-k", "SL", "-n", "3", "-w", "3", "-m", "3072", # "-c", str(3),
self.temp_dir + self.modelname + ".txt", self.basedir + self.modelname]
logging.info("saving model to {}".format(self.basedir + self.modelname))
print " ".join(jsrecall)
jsrecall = Popen(jsrecall) #, stdout=PIPE, stderr=PIPE)
res = jsrecall.communicate()
if not os.path.isfile(self.basedir + self.modelname):
print "error with jsre! model file was no created"
print res[1]
sys.exit()
else:
statinfo = os.stat(self.basedir + self.modelname)
if statinfo.st_size == 0:
print "error with jsre! model has 0 bytes"
print res[0]
print res[1]
sys.exit()
#logging.debug(res)
def test(self):
self.generatejSREdata(self.corpus, train=False, pairtype=self.pairtype)
# print " ".join(self.test_jsre)
jsrecall = Popen(self.test_jsre, stdout=PIPE, stderr=PIPE)
res = jsrecall.communicate()
#logging.debug(res[0].strip().split('\n')[-2:])
#os.system(' '.join(jsrecommand))
if not os.path.isfile(self.resultsfile):
print "something went wrong with JSRE!"
print res
sys.exit()
logging.debug("done.")
def get_sentence_instance(self, sentence, e1id, e2id, pair):
tokens = [t for t in sentence.tokens]
#start, end = pair[0].tokens[0].order, pair[1].tokens[-1].order
#if pair[0].tokens[0].order > pair[1].tokens[-1].order:
# start, end = end, start
#tokens = [t for t in sentence.tokens[start:end]]
tokens_text = [t.text for t in tokens]
# print tokens_text
pos = [t.pos for t in tokens]
lemmas = [t.lemma for t in tokens]
ner = [t.tag for t in tokens]
#logging.debug("{} {} {} {}".format(len(tokens1), len(pos), len(lemmas), len(ner)))
return self.blind_all_entities(tokens_text, sentence.entities.elist["goldstandard"],
[e1id, e2id], pos, lemmas, ner)
def annotate_sentence(self, sentence):
self.write_sentence_data_to_file(sentence)
self.run_jsre()
return self.open_results()
def run_jsre(self):
jsrecall = Popen(self.test_jsre, stdout=PIPE, stderr=PIPE)
res = jsrecall.communicate()
if not os.path.isfile(self.resultsfile):
print "something went wrong with JSRE!"
print res
else:
with open(self.resultsfile, 'r') as results:
return results.read()
def write_sentence_data_to_file(self, sentence):
if os.path.isfile(self.temp_dir + self.modelname + ".txt"):
logging.info("removed old data")
os.remove(self.temp_dir + self.modelname + ".txt")
pcount = 0
examplelines = []
if self.ner_model == "all":
sentence_entities = []
for esource in sentence.entities.elist:
sentence_entities += [entity for entity in sentence.entities.elist[esource]]
else:
sentence_entities = [entity for entity in sentence.entities.elist[self.ner_model]]
#print sentence.sid, self.ner_model, len(sentence.entities.elist[self.ner_model]), sentence_entities
# logging.debug("sentence {} has {} entities ({})".format(sentence.sid, len(sentence_entities), len(sentence.entities.elist["goldstandard"])))
for pair in itertools.permutations(sentence_entities, 2):
if pair[0].type in self.entitytypes[0] and pair[1].type in self.entitytypes[1]: # or\
pid = sentence.sid + ".p" + str(pcount)
self.pairs[pid] = pair
tokens_text, pos, lemmas, ner = self.get_sentence_instance(sentence, pair[0].eid, pair[1].eid, pair)
body = self.generatejSRE_line(tokens_text, pos, lemmas, ner)
examplelines.append('0\t' + pid + '.i' + '0\t' + body + '\n')
pcount += 1
logging.debug("writing {} lines to file...".format(len(examplelines)))
with codecs.open(self.temp_dir + self.modelname + ".txt", 'a', "utf-8") as trainfile:
for il, l in enumerate(examplelines):
trainfile.write(l)
def annotate_sentences(self, sentences):
"""
Process multiple sentences at once
:param sentences: List of sentence objects (should be from the same doc
:return: Dictionary {sid: (pred, original}
"""
for sentence in sentences:
self.write_sentence_data_to_file(sentence)
self.run_jsre()
results = {}
with open(self.resultsfile, 'r') as resfile:
pred = resfile.readlines()
with codecs.open(self.examplesfile, 'r', 'utf-8') as trainfile:
original = trainfile.readlines()
for sentence in sentences:
sentence_pred, sentence_original = [], []
for i in range(len(pred)):
original_tsv = original[i].split('\t')
sid = '.'.join(original_tsv[1].split('.')[:-2])
if sid == sentence.sid:
sentence_pred.append(pred[i])
sentence_original.append(original[i])
results[sentence.sid] = (sentence_pred, sentence_original)
return results
def open_results(self):
with open(self.resultsfile, 'r') as resfile:
pred = resfile.readlines()
with codecs.open(self.examplesfile, 'r', 'utf-8') as trainfile:
original = trainfile.readlines()
return pred, original
def process_sentence(self, pred, original, sentence):
"""
Given the raw output and input of JSRE, return list of relations
:param pred: JSRE output string
:param original: JSRE input string
:param sentence: sentence object
:return: list of pairs
"""
pairs = []
for i in range(len(pred)):
original_tsv = original[i].split('\t')
pid = '.'.join(original_tsv[1].split('.')[:-1])
p = float(pred[i].strip())
if p == 1:
pair = sentence.add_relation(self.pairs[pid][0], self.pairs[pid][1], self.pairtype,
relation=True)
pairs.append(pair)
return pairs
def generatejSREdata(self, corpus, train=False, pairtype="all"):
if os.path.isfile(self.temp_dir + self.modelname + ".txt"):
# print "removed old data"
os.remove(self.temp_dir + self.modelname + ".txt")
pcount = 0
truepcount = 0
strue = 0
sfalse = 0
skipped = 0
for sentence in corpus.get_sentences(self.ner_model):
#for did in corpus.documents:
did = sentence.did
examplelines = []
pos_sentences = set()
sids = []
# print len(corpus.type_sentences[pairtype])
sentence_entities = [entity for entity in sentence.entities.elist[self.ner_model]]
# print sentence.sid, self.ner_model, len(sentence.entities.elist[self.ner_model]), sentence_entities
# logging.debug("sentence {} has {} entities ({})".format(sentence.sid, len(sentence_entities), len(sentence.entities.elist["goldstandard"])))
for pair in itertools.permutations(sentence_entities, 2):
sid1 = pair[0].eid.split(".")[-2]
sid2 = pair[1].eid.split(".")[-2]
# if pairtype in corpus.type_sentences and pair[0].sid not in corpus.type_sentences[pairtype]:
# continue
sids.append((pair[0].sid, pair[0].sid))
sn1 = int(sid1[1:])
sn2 = int(sid2[1:])
if pair[0].start == pair[1].start or pair[0].end == pair[1].end:
continue
if pairtype in ("Has_Sequence_Identical_To", "Is_Functionally_Equivalent_To") and pair[0].type != pair[1].type:
continue
if pair[0].type in self.entitytypes[0] and pair[1].type in self.entitytypes[1]: # or\
# logging.debug(pair)
# print e1id, e2id
e1id = pair[0].eid
e2id = pair[1].eid
pid = did + ".p" + str(pcount)
# self.pairs[pid] = (e1id, e2id)
self.pairs[pid] = pair
tokens_text, pos, lemmas, ner = self.get_sentence_instance(sentence, e1id, e2id, pair)
trueddi = 0
if (e2id, pairtype) in pair[0].targets:
#if any((pair[1].eid, pt) in pair[0].targets for pt in config.event_types[self.pairtype]["subtypes"]):
trueddi = 1
truepcount += 1
strue += 1
else:
sfalse += 1
# true/total ratio
if train is True and trueddi == 0 and 1.0*strue/(strue+sfalse) < 0.001:
sfalse -= 1
skipped += 1
continue
else:
#pos_sentences.add(pair[0].sid)
#pos_sentences.add(pair[1].sid)
body = self.generatejSRE_line(tokens_text, pos, lemmas, ner)
examplelines.append(str(trueddi) + '\t' + pid + '.i' + '0\t' + body + '\n')
pcount += 1
# print strue, sfalse, skipped
#for il, l in enumerate(examplelines):
logging.debug("writing {} lines to file...".format(len(examplelines)))
with codecs.open(self.temp_dir + self.modelname + ".txt", 'a', "utf-8") as trainfile:
for il, l in enumerate(examplelines):
# print sids[il], random.sample(pos_sentences, 1)
#if sids[il][0] in pos_sentences or sids[il][1] in pos_sentences or not train:
trainfile.write(l)
# logging.info("wrote " + temp_dir + savefile)
logging.info("True/total relations:{}/{} ({})".format(truepcount, pcount, str(1.0*truepcount/(pcount+1))))
def generatejSRE_line(self, pairtext, pos, lemmas, ner):
candidates = [False,False]
body = ''
elements = []
for it in range(len(pairtext)):
#for it in range(len(pairtext)):
if pairtext[it] == "#candidatea#":
#print pairtext[i],
tokentype = 'ENTITY'
#tokentype = etypes[0]
tokenlabel = 'A'
candidates[0] = True
#tokentext = "#candidate#"
#tokentext = entitytext[0]
tokentext = pairtext[it].lstrip()
lemma = tokentext
elif pairtext[it] == "#candidateb#":
#print pairtext[i]
tokentype = 'ENTITY'
#tokentype = etypes[0]
tokenlabel = 'T'
#tokentext = "#candidate#"
tokentext = pairtext[it].lstrip()
#tokentext = entitytext[1]
lemma = tokentext
candidates[1] = True
elif pairtext[it] == "#entity#":
tokentype = 'DRUG'
tokenlabel = 'O'
tokentext = pairtext[it].lstrip()
lemma = tokentext
else:
# logging.debug("{}".format(pairtext[it].lstrip()))
tokentype = ner[it]
tokenlabel = 'O'
tokentext = pairtext[it].lstrip()
lemma = lemmas[it]
if tokentext == '-RRB-':
tokentext = ')'
lemma = ')'
elif tokentext == '-LRB-':
tokentext = '('
lemma = '('
#if ' ' in pairtext[it][0].lstrip() or '\n' in pairtext[it][0].lstrip():
# print "token with spaces!"
# print pairs[pair][ddi.PAIR_TOKENS][it][0].lstrip()
# sys.exit()
elements.append("&&".join([str(it), tokentext,
lemma,
pos[it],
tokentype, tokenlabel]))
#logging.debug("%s\t%s\t%s", str(trueddi), pair, body)
if not candidates[0]:
logging.debug("missing first candidate on pair ")
elements = ["0&&#candidate#&&#candidate#&&-None-&&ENTITY&&T"] + [str(n+1) + e[1:] for n, e in enumerate(elements)]
# print pairtext
# sys.exit()
if not candidates[1]:
logging.debug("missing second candidate on pair")
elements.append(str(it+1) + "&&#candidate#&&#candidate#&&-None-&&ENTITY&&T")
# print pairtext
# sys.exit()
body = " ".join(elements)
return body
def get_predictions(self, corpus):
# real_pair_type = config.event_types[self.pairtype]["subtypes"][0]
#pred_y = []
with open(self.resultsfile, 'r') as resfile:
pred = resfile.readlines()
with codecs.open(self.examplesfile, 'r', 'utf-8') as trainfile:
original = trainfile.readlines()
if len(pred) != len(original):
print "different number of predictions!"
sys.exit()
results = ResultsRE(self.resultsfile)
temppreds = {}
for i in range(len(pred)):
original_tsv = original[i].split('\t')
# logging.debug(original_tsv)
pid = '.'.join(original_tsv[1].split('.')[:-1])
p = float(pred[i].strip())
if p == 0:
p = -1
if p == 2:
print "p=2!"
p = 1
if p == 1:
did = '.'.join(pid.split(".")[:-1])
if did not in results.document_pairs:
results.document_pairs[did] = Pairs()
pair = corpus.documents[did].add_relation(self.pairs[pid][0], self.pairs[pid][1], self.pairtype, relation=True)
# pair = corpus.documents[did].add_relation(self.pairs[pid][0], self.pairs[pid][1], real_pair_type, relation=True)
#pair = self.get_pair(pid, corpus)
results.pairs[pid] = pair
results.document_pairs[did].add_pair(pair, "jsre")
# logging.debug("{} - {} SLK: {}".format(pair.entities[0], pair.entities[1], p))
#if pair not in temppreds:
# temppreds[pair] = []
#temppreds[pair].append(p)
results.pairs[pid].recognized_by["jsre"] = p
'''for pair in temppreds:
if relations.SLK_PRED not in pairs[pair]:
pairs[pair][relations.SLK_PRED] = {}
p = mode(temppreds[pair])[0][0]
if len(set(temppreds[pair])) > 1:
print temppreds[pair], p
pairs[pair][relations.SLK_PRED][dditype] = p
#if pairs[pair][ddi.SLK_PRED][dditype] and not pairs[pair][ddi.SLK_PRED]["all"]:
# logging.info("type classifier %s found a new true pair: %s", dditype, pair)
for pair in pairs:
if relations.SLK_PRED not in pairs[pair]:
pairs[pair][relations.SLK_PRED] = {}
if dditype not in pairs[pair][relations.SLK_PRED]:
pairs[pair][relations.SLK_PRED][dditype] = -1'''
results.corpus = corpus
return results
|
|
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
This script initializes RackHD stack after install.
- loads SKU packs
- loads default SKU
- sets auth user
- restarts nodes for discovery
- discovers switches and/or PDUs if available
- checks node discovery
- assigns node OBM settings
- checks pollers for data
'''
import fit_path # NOQA: unused import
import os
import subprocess
import json
import time
import unittest
import fit_common
import pdu_lib
import flogging
from ucsmsdk import ucshandle
from ucsmsdk.utils.ucsbackup import import_ucs_backup
log = flogging.get_loggers()
# Locals
MAX_CYCLES = 60
class rackhd_stack_init(unittest.TestCase):
def test01_set_auth_user(self):
fit_common.remote_shell('rm auth.json')
auth_json = open('auth.json', 'w')
auth_json.write('{"username":"' + fit_common.fitcreds()["api"][0]["admin_user"] + '", "password":"' +
fit_common.fitcreds()["api"][0]["admin_pass"] + '", "role":"Administrator"}')
auth_json.close()
fit_common.scp_file_to_ora('auth.json')
rc = fit_common.remote_shell("curl -ks -X POST -H 'Content-Type:application/json' https://localhost:" +
str(fit_common.fitports()['https']) + "/api/2.0/users -d @auth.json")
if rc['exitcode'] != 0:
log.info_5("ALERT: Auth admin user not set! Please manually set the admin user account if required.")
def test02_preload_sku_packs(self):
log.info_5("**** Downloading SKU packs from GitHub")
subprocess.call("rm -rf temp.sku; rm -rf on-skupack", shell=True)
os.mkdir("on-skupack")
# download all SKU repos and merge into on-skupack
for url in fit_common.fitskupack():
log.info_5("**** Cloning SKU Packs from " + url)
subprocess.call("git clone " + url + " temp.sku", shell=True)
subprocess.call('cp -R temp.sku/* on-skupack; rm -rf temp.sku', shell=True)
# build build SKU packs
for subdir, dirs, files in os.walk('on-skupack'):
for skus in dirs:
if skus not in ["debianstatic", ".git"] and os.path.isfile('on-skupack/' + skus + '/config.json'):
subprocess.call("cd on-skupack;mkdir -p " + skus + "/tasks " + skus + "/static " +
skus + "/workflows " + skus + "/templates", shell=True)
subprocess.call("cd on-skupack; ./build-package.bash " +
skus + " " + skus + " >/dev/null 2>&1", shell=True)
break
# upload SKU packs to ORA
log.info_5("**** Loading SKU Packs to server")
for subdir, dirs, files in os.walk('on-skupack/tarballs'):
for skupacks in files:
log.info_5("**** Loading SKU Pack for " + skupacks)
file = open(fit_common.TEST_PATH + "on-skupack/tarballs/" + skupacks)
fit_common.rackhdapi("/api/2.0/skus/pack", action="binary-post", payload=file.read())
file.close()
break
# check SKU directory against source files
error_message = ""
skulist = json.dumps(fit_common.rackhdapi("/api/2.0/skus")['json'])
for subdir, dirs, files in os.walk('on-skupack'):
for skus in dirs:
if skus not in ["debianstatic", ".git", "packagebuild", "tarballs"] and \
os.path.isfile('on-skupack/' + skus + '/config.json'):
try:
configfile = json.loads(open("on-skupack/" + skus + "/config.json").read())
# check if sku pack got installed
if configfile['name'] not in skulist:
log.error("FAILURE - Missing SKU: " + configfile['name'])
error_message += " Missing SKU: " + configfile['name']
except:
# Check is the sku pack config.json file is valid format, fails skupack install if invalid
log.error("FAILURE - Corrupt config.json in SKU Pack: " + str(skus) + " - not loaded")
error_message += " Corrupt config.json in SKU Pack: " + str(skus)
break
self.assertEqual(error_message, "", error_message)
def test03_preload_default_sku(self):
# Load default SKU for unidentified compute nodes
payload = {"name": "Unidentified-Compute", "rules": [{"path": "bmc.IP Address"}]}
api_data = fit_common.rackhdapi("/api/2.0/skus", action='post', payload=payload)
self.assertIn(api_data['status'], [201, 409],
'Incorrect HTTP return code, expecting 201 or 409, got ' + str(api_data['status']))
def test04_power_on_nodes(self):
# This powers on nodes via PDU or, if no PDU, power cycles nodes via IPMI to start discovery
# ServerTech PDU case
if pdu_lib.check_pdu_type() != "Unknown":
log.info_5('**** PDU found, powering on PDU outlets')
self.assertTrue(pdu_lib.pdu_control_compute_nodes("on"), 'Failed to power on all outlets')
# Wait about 30 seconds for the outlets to all come on and nodes to DHCP
fit_common.countdown(30)
# no PDU case
else:
log.info_5('**** No supported PDU found, restarting nodes using IPMI.')
# Power cycle all nodes via IPMI, display warning if no nodes found
if fit_common.power_control_all_nodes("off") == 0:
log.info_5('**** No BMC IP addresses found in arp table, continuing without node restart.')
else:
# power on all nodes under any circumstances
fit_common.power_control_all_nodes("on")
# Optionally install control switch node if present
@unittest.skipUnless("control" in fit_common.fitcfg(), "")
def test05_discover_control_switch_node(self):
log.info_5("**** Creating control switch node.")
payload = {"type": "switch",
"name": "Control",
"autoDiscover": True,
"obms": [{"service": "snmp",
"config": {"host": fit_common.fitcfg()['control'],
"community": fit_common.fitcreds()['snmp'][0]['community']}}]}
api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got ' +
str(api_data['status']))
# Optionally install data switch node if present
@unittest.skipUnless("data" in fit_common.fitcfg(), "")
def test06_discover_data_switch_node(self):
log.info_5("**** Creating data switch node.")
payload = {"type": "switch",
"name": "Data",
"autoDiscover": True,
"obms": [{"service": "snmp",
"config": {"host": fit_common.fitcfg()['data'],
"community": fit_common.fitcreds()['snmp'][0]['community']}}]}
api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got ' +
str(api_data['status']))
# Optionally install PDU node if present
@unittest.skipUnless("pdu" in fit_common.fitcfg(), "")
def test07_discover_pdu_node(self):
log.info_5("**** Creating PDU node.")
payload = {"type": "pdu",
"name": "PDU",
"autoDiscover": True,
"obms": [{"service": "snmp",
"config": {"host": fit_common.fitcfg()['pdu'],
"community": fit_common.fitcreds()['snmp'][0]['community']}}]}
api_data = fit_common.rackhdapi("/api/2.0/nodes/", action='post', payload=payload)
self.assertEqual(api_data['status'], 201, 'Incorrect HTTP return code, expecting 201, got ' +
str(api_data['status']))
def test08_check_compute_nodes(self):
log.info_5("**** Waiting for compute nodes.")
c_index = 0
for c_index in range(0, MAX_CYCLES):
if "compute" in fit_common.rackhdapi("/api/2.0/nodes")['text']:
break
else:
time.sleep(30)
self.assertLess(c_index, MAX_CYCLES - 1, "No compute nodes found.")
def test09_check_discovery(self):
log.info_5("**** Waiting for node Discovery to complete.\n",)
# Determine if there are any active workflows. If returned value is true, obmSettings, SKUs
# and active workflows are all either present or complete. If the returned is false,
# there was a timeout and all nodes have not obtained obmSetting, SKUs, or all active
# workflows have not completed.
# Wait 10 minutes ( MAX_CYCLES * 10 seconds) for this to occur.
self.assertTrue(self.check_for_active_workflows(MAX_CYCLES), "Node discovery not completed")
def check_for_active_workflows(self, max_time):
'''
Determine if are any active workflows.
:param Time to wait (in 10 second intervals)
:return: True - No active workflows
False - Workflows are active
'''
for _ in range(0, max_time):
nodes_data = fit_common.rackhdapi("/api/2.0/nodes")
if nodes_data['status'] == 200 and len(nodes_data['json']) > 0:
# if there are nodes present, determine if discovery has completed on them
discovery_complete = True
for node in nodes_data['json']:
if node['type'] == 'compute':
self.assertIn('id', node, 'node does not contain id')
node_id = node['id']
# determine if there are any active worlflows. If so, discovery not completed
if fit_common.check_active_workflows(node_id):
discovery_complete = False
break
if discovery_complete:
return True
time.sleep(10)
return False
def test10_apply_obm_settings(self):
log.info_5("**** Apply OBM setting to compute nodes.")
self.assertTrue(fit_common.apply_obm_settings(), "OBM settings failed.")
@unittest.skipUnless("bmc" in fit_common.fitcfg(), "")
@unittest.skip("Skipping 'test10_add_management_server' bug RAC-4063")
def test11_add_management_server(self):
log.info_5("**** Creating management server.")
usr = ""
pwd = ""
# find correct BMC passwords from credentials list
for creds in fit_common.fitcreds()['bmc']:
if fit_common.remote_shell('ipmitool -I lanplus -H ' + fit_common.fitcfg()['bmc'] +
' -U ' + creds['username'] + ' -P ' +
creds['password'] + ' fru')['exitcode'] == 0:
usr = creds['username']
pwd = creds['password']
# create management node using these creds
if usr != "" and pwd != "":
payload = {"name": "Management Server " + str(time.time()),
"type": "mgmt",
"autoDiscover": True,
"obms": [{"service": "ipmi-obm-service",
"config": {"host": fit_common.fitcfg()['bmc'],
"user": usr,
"password": pwd}}]}
api_data = fit_common.rackhdapi("/api/2.0/nodes", action='post', payload=payload)
self.assertEqual(api_data['status'], 201,
'Incorrect HTTP return code, expecting 201, got ' + str(api_data['status']))
else:
self.fail("Unable to contact management server BMC, skipping MGMT node create")
def test12_check_pollers(self):
log.info_5("**** Waiting for pollers.")
# Determine if there are any pollers present. If the return value is true, there are pollers
# active. If the return value is false, pollers are not active.
# Wait 10 minutes ( MAX_CYCLES * 10 seconds) for this to occur.
self.assertTrue(self.check_for_active_pollers(MAX_CYCLES), 'No pollers')
log.info_5("**** Waiting for pollers data.")
# Determine if all the pollers have data. If the return value is true, all pollers have data
# If the return value is false, poller are working but not collecting data.
# Wait 10 minutes ( MAX_CYCLES * 10 seconds) for this to occur.
self.assertTrue(self.check_for_active_poller_data(MAX_CYCLES), 'All pollers are not active')
def check_for_active_pollers(self, max_time):
'''
Determine if all poller are active.
:param Time to wait (in 10 second intervals)
:return: True - Poller active
False - Pollers not active
'''
for _ in range(0, max_time):
api_data = fit_common.rackhdapi('/api/2.0/pollers')
if len(api_data['json']) > 0:
return True
time.sleep(30)
return False
def check_for_active_poller_data(self, max_time):
'''
Determine if all poller have data.
:param Time to wait (in 10 second intervals)
:return: True - Poller have data
False - Not all poller have data
'''
poller_list = []
api_data = fit_common.rackhdapi('/api/2.0/pollers')
if api_data:
# set up a list of poller ids
for index in api_data['json']:
poller_list.append(index['id'])
if poller_list != []:
for _ in range(0, max_time):
# move backwards through the list allowing completed poller ids to be popped
# off the list
for i in reversed(range(len(poller_list))):
id = poller_list[i]
poll_data = fit_common.rackhdapi("/api/2.0/pollers/" + id + "/data/current")
# Check if data current returned 200 and data in the poll, if so, remove from list
if poll_data['status'] == 200 and len(poll_data['json']) != 0:
poller_list.pop(i)
if poller_list == []:
# return when all pollers look good
return True
time.sleep(10)
if poller_list != []:
log.error("Poller IDs with error or no data: {}".format(json.dumps(poller_list, indent=4)))
return False
# Optionally configure UCS Manager if present
@unittest.skipUnless("ucsm_ip" in fit_common.fitcfg(), "")
def test13_load_ucs_manager_config(self):
"""
loads the test configuration into the UCS Manger
"""
handle = ucshandle.UcsHandle(fit_common.fitcfg()['ucsm_ip'], fit_common.fitcfg()['ucsm_user'],
fit_common.fitcfg()['ucsm_pass'])
self.assertTrue(handle.login(), 'Failed to log in to UCS Manager!')
path, file = os.path.split(fit_common.fitcfg()['ucsm_config_file'])
import_ucs_backup(handle, file_dir=path, file_name=file)
self.assertTrue(handle.logout(), 'Failed to log out from UCS Manager!')
if __name__ == '__main__':
unittest.main()
|
|
"""Emulated fixed-point arithmetic, also useful for quires.
"""
from ..titanic import digital
from ..titanic import gmpmath
from ..fpbench import fpcast as ast
from ..titanic.ops import OP
from . import evalctx
from . import mpnum
from . import ieee754
from . import posit
from . import fixed
from . import interpreter
from ..titanic import ndarray
class MPMF(mpnum.MPNum):
_ctx : evalctx.EvalCtx = ieee754.ieee_ctx(11, 64)
@property
def ctx(self):
return self._ctx
def as_ctx(self):
ctx = self.ctx
if isinstance(ctx, evalctx.IEEECtx):
return ieee754.Float(self, ctx=ctx)
elif isinstance(ctx, evalctx.PositCtx):
return posit.Posit(self, ctx=ctx)
elif isinstance(ctx, evalctx.FixedCtx):
return fixed.Fixed(self, ctx=ctx)
else:
# TODO: ?
# raise ValueError('unknown context {}'.format(repr(ctx)))
return digital.Digital(self)
def is_identical_to(self, other):
return self.as_ctx().is_identical_to(other)
def __init__(self, x=None, ctx=None, **kwargs):
if ctx is None:
ctx = type(self)._ctx
if x is None or isinstance(x, digital.Digital):
super().__init__(x=x, **kwargs)
else:
if kwargs:
raise ValueError('cannot specify additional values {}'.format(repr(kwargs)))
f = gmpmath.mpfr(x, ctx.p)
unrounded = gmpmath.mpfr_to_digital(f)
super().__init__(x=self._round_to_context(unrounded, ctx=ctx, strict=True))
if isinstance(ctx, evalctx.IEEECtx):
self._ctx = ieee754.ieee_ctx(ctx.es, ctx.nbits, rm=ctx.rm)
elif isinstance(ctx, evalctx.PositCtx):
self._ctx = posit.posit_ctx(ctx.es, ctx.nbits)
elif isinstance(ctx, evalctx.FixedCtx):
self._ctx = fixed.fixed_ctx(ctx.scale, ctx.nbits, rm=ctx.rm, of=ctx.of)
else:
raise ValueError('unsupported context {}'.format(repr(ctx)))
def __repr__(self):
return '{}(negative={}, c={}, exp={}, inexact={}, rc={}, isinf={}, isnan={}, ctx={})'.format(
type(self).__name__, repr(self._negative), repr(self._c), repr(self._exp),
repr(self._inexact), repr(self._rc), repr(self._isinf), repr(self._isnan), repr(self._ctx)
)
def __str__(self):
return str(gmpmath.digital_to_mpfr(self))
def __float__(self):
return float(gmpmath.digital_to_mpfr(self))
@classmethod
def _select_context(cls, *args, ctx=None):
if ctx is None:
p = -1
for f in args:
if isinstance(f, cls) and f.ctx.p > p:
p = f.ctx.p
ctx = f.ctx
if ctx is None:
raise ValueError('arguments do not contain a context?\n{}'.format(repr(args)))
if isinstance(ctx, evalctx.IEEECtx):
return ieee754.ieee_ctx(ctx.es, ctx.nbits, rm=ctx.rm)
elif isinstance(ctx, evalctx.PositCtx):
return posit.posit_ctx(ctx.es, ctx.nbits)
elif isinstance(ctx, evalctx.FixedCtx):
return fixed.fixed_ctx(ctx.scale, ctx.nbits, rm=ctx.rm, of=ctx.of)
else:
raise ValueError('unsupported context {}'.format(repr(ctx)))
@classmethod
def _round_to_context(cls, unrounded, ctx=None, strict=False):
if ctx is None:
if hasattr(unrounded, 'ctx'):
ctx = unrounded.ctx
else:
raise ValueError('unable to determine context to round {}'.format(repr(unrounded)))
if isinstance(ctx, evalctx.IEEECtx):
rounded = ieee754.Float._round_to_context(unrounded, ctx=ctx, strict=strict)
elif isinstance(ctx, evalctx.PositCtx):
rounded = posit.Posit._round_to_context(unrounded, ctx=ctx, strict=strict)
elif isinstance(ctx, evalctx.FixedCtx):
rounded = fixed.Fixed._round_to_context(unrounded, ctx=ctx, strict=strict)
else:
raise ValueError('unsupported context {}'.format(repr(ctx)))
return cls(rounded, ctx=ctx)
def isnormal(self):
x = self.as_ctx()
if isinstance(x, mpnum.MPNum):
return x.isnormal()
else:
return not (
self.is_zero()
or self.isinf
or self.isnan
)
# TODO: hack, provide a fake constructor-like thing to make contexts of varying types
def mpmf_ctype(bindings=None, props=None):
ctx = MPMF._ctx.let(bindings=bindings)
return evalctx.determine_ctx(ctx, props)
class Interpreter(interpreter.StandardInterpreter):
dtype = MPMF
ctype = staticmethod(mpmf_ctype)
def arg_to_digital(self, x, ctx):
return self.dtype(x, ctx=ctx)
def _eval_constant(self, e, ctx):
try:
return None, self.constants[e.value]
except KeyError:
return None, self.round_to_context(gmpmath.compute_constant(e.value, prec=ctx.p), ctx=ctx)
# unfortunately, interpreting these values efficiently requries info from the context,
# so it has to be implemented per interpreter...
def _eval_integer(self, e, ctx):
x = digital.Digital(m=e.i, exp=0, inexact=False)
return None, self.round_to_context(x, ctx=ctx)
def _eval_rational(self, e, ctx):
p = digital.Digital(m=e.p, exp=0, inexact=False)
q = digital.Digital(m=e.q, exp=0, inexact=False)
x = gmpmath.compute(OP.div, p, q, prec=ctx.p)
return None, self.round_to_context(x, ctx=ctx)
def _eval_digits(self, e, ctx):
x = gmpmath.compute_digits(e.m, e.e, e.b, prec=ctx.p)
return None, self.round_to_context(x, ctx=ctx)
# this is what makes it mpmf actually
def _eval_ctx(self, e, ctx):
return None, self.evaluate(e.body, evalctx.determine_ctx(ctx, e.props))
def round_to_context(self, x, ctx):
"""Not actually used???"""
return self.dtype._round_to_context(x, ctx=ctx, strict=False)
# copy-pasta hack
def arg_ctx(self, core, args, ctx=None, override=True):
if len(core.inputs) != len(args):
raise ValueError('incorrect number of arguments: got {}, expecting {} ({})'.format(
len(args), len(core.inputs), ' '.join((name for name, props, shape in core.inputs))))
if ctx is None:
ctx = self.ctype(props=core.props)
elif override:
allprops = {}
allprops.update(core.props)
allprops.update(ctx.props)
ctx = evalctx.determine_ctx(ctx, allprops)
else:
ctx = evalctx.determine_ctx(ctx, core.props)
arg_bindings = []
for arg, (name, props, shape) in zip(args, core.inputs):
local_ctx = evalctx.determine_ctx(ctx, props)
if isinstance(arg, self.dtype):
argval = self.round_to_context(arg, ctx=local_ctx)
elif isinstance(arg, ast.Expr):
argval = self.evaluate(arg, local_ctx)
elif isinstance(arg, ndarray.NDArray):
rounded_data = [
self.round_to_context(d, ctx=local_ctx) if isinstance(arg, self.dtype) else self.arg_to_digital(d, local_ctx)
for d in arg.data
]
argval = ndarray.NDArray(shape=arg.shape, data=rounded_data)
elif isinstance(arg, list):
nd_unrounded = ndarray.NDArray(shape=None, data=arg)
rounded_data = [
self.round_to_context(d, ctx=local_ctx) if isinstance(arg, self.dtype) else self.arg_to_digital(d, local_ctx)
for d in nd_unrounded.data
]
argval = ndarray.NDArray(shape=nd_unrounded.shape, data=rounded_data)
else:
argval = self.arg_to_digital(arg, local_ctx)
if isinstance(argval, ndarray.NDArray):
if not shape:
raise interpreter.EvaluatorError('not expecting a tensor, got shape {}'.format(repr(argval.shape)))
if len(shape) != len(argval.shape):
raise interpreter.EvaluatorError('tensor input has wrong shape: expecting {}, got {}'.format(repr(shape), repr(argval.shape)))
for dim, argdim in zip(shape, argval.shape):
if isinstance(dim, int) and dim != argdim:
raise interpreter.EvaluatorError('tensor input has wrong shape: expecting {}, got {}'.format(repr(shape), repr(argval.shape)))
elif isinstance(dim, str):
arg_bindings.append((dim, self.dtype(argdim)))
arg_bindings.append((name, argval))
return ctx.let(bindings=arg_bindings)
|
|
import argparse
import logging
import os
from ceph_deploy import hosts
from ceph_deploy.cliutil import priority
from ceph_deploy.lib import remoto
from ceph_deploy.util.constants import default_components
from ceph_deploy.util.paths import gpg
LOG = logging.getLogger(__name__)
def sanitize_args(args):
"""
args may need a bunch of logic to set proper defaults that argparse is
not well suited for.
"""
if args.release is None:
args.release = 'hammer'
args.default_release = True
# XXX This whole dance is because --stable is getting deprecated
if args.stable is not None:
LOG.warning('the --stable flag is deprecated, use --release instead')
args.release = args.stable
# XXX Tango ends here.
return args
def detect_components(args, distro):
"""
Since the package split, now there are various different Ceph components to
install like:
* ceph
* ceph-mon
* ceph-osd
* ceph-mds
This helper function should parse the args that may contain specifics about
these flags and return the default if none are passed in (which is, install
everything)
"""
# the flag that prevents all logic here is the `--repo` flag which is used
# when no packages should be installed, just the repo files, so check for
# that here and return an empty list (which is equivalent to say 'no
# packages should be installed')
if args.repo:
return []
flags = {
'install_osd': 'ceph-osd',
'install_rgw': 'ceph-radosgw',
'install_mds': 'ceph-mds',
'install_mon': 'ceph-mon',
'install_common': 'ceph-common',
}
if distro.is_rpm:
defaults = default_components.rpm
else:
defaults = default_components.deb
# different naming convention for deb than rpm for radosgw
flags['install_rgw'] = 'radosgw'
if args.install_all:
return defaults
else:
components = []
for k, v in flags.items():
if getattr(args, k, False):
components.append(v)
# if we have some components selected from flags then return that,
# otherwise return defaults because no flags and no `--repo` means we
# should get all of them by default
return components or defaults
def install(args):
args = sanitize_args(args)
if args.repo:
return install_repo(args)
if args.version_kind == 'stable':
version = args.release
else:
version = getattr(args, args.version_kind)
version_str = args.version_kind
if version:
version_str += ' version {version}'.format(version=version)
LOG.debug(
'Installing %s on cluster %s hosts %s',
version_str,
args.cluster,
' '.join(args.host),
)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
# XXX this should get removed once ceph packages are split for
# upstream. If default_release is True, it means that the user is
# trying to install on a RHEL machine and should expect to get RHEL
# packages. Otherwise, it will need to specify either a specific
# version, or repo, or a development branch. Other distro users
# should not see any differences.
use_rhceph=args.default_release,
)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
components = detect_components(args, distro)
if distro.init == 'sysvinit' and args.cluster != 'ceph':
LOG.error('refusing to install on host: %s, with custom cluster name: %s' % (
hostname,
args.cluster,
)
)
LOG.error('custom cluster names are not supported on sysvinit hosts')
continue
rlogger = logging.getLogger(hostname)
rlogger.info('installing Ceph on %s' % hostname)
cd_conf = getattr(args, 'cd_conf', None)
# custom repo arguments
repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url
gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url
gpg_fallback = gpg.url('release')
if gpg_url is None and repo_url:
LOG.warning('--gpg-url was not used, will fallback')
LOG.warning('using GPG fallback: %s', gpg_fallback)
gpg_url = gpg_fallback
if args.local_mirror:
remoto.rsync(hostname, args.local_mirror, '/opt/ceph-deploy/repo', distro.conn.logger, sudo=True)
repo_url = 'file:///opt/ceph-deploy/repo'
gpg_url = 'file:///opt/ceph-deploy/repo/release.asc'
if repo_url: # triggers using a custom repository
# the user used a custom repo url, this should override anything
# we can detect from the configuration, so warn about it
if cd_conf:
if cd_conf.get_default_repo():
rlogger.warning('a default repo was found but it was \
overridden on the CLI')
if args.release in cd_conf.get_repos():
rlogger.warning('a custom repo was found but it was \
overridden on the CLI')
rlogger.info('using custom repository location: %s', repo_url)
distro.mirror_install(
distro,
repo_url,
gpg_url,
args.adjust_repos,
components=components,
)
# Detect and install custom repos here if needed
elif should_use_custom_repo(args, cd_conf, repo_url):
LOG.info('detected valid custom repositories from config file')
custom_repo(distro, args, cd_conf, rlogger)
else: # otherwise a normal installation
distro.install(
distro,
args.version_kind,
version,
args.adjust_repos,
components=components,
)
# Check the ceph version we just installed
hosts.common.ceph_version(distro.conn)
distro.conn.exit()
def should_use_custom_repo(args, cd_conf, repo_url):
"""
A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator.
"""
if repo_url:
# repo_url signals a CLI override, return False immediately
return False
if cd_conf:
if cd_conf.has_repos:
has_valid_release = args.release in cd_conf.get_repos()
has_default_repo = cd_conf.get_default_repo()
if has_valid_release or has_default_repo:
return True
return False
def custom_repo(distro, args, cd_conf, rlogger, install_ceph=None):
"""
A custom repo install helper that will go through config checks to retrieve
repos (and any extra repos defined) and install those
``cd_conf`` is the object built from argparse that holds the flags and
information needed to determine what metadata from the configuration to be
used.
"""
default_repo = cd_conf.get_default_repo()
components = detect_components(args, distro)
if args.release in cd_conf.get_repos():
LOG.info('will use repository from conf: %s' % args.release)
default_repo = args.release
elif default_repo:
LOG.info('will use default repository: %s' % default_repo)
# At this point we know there is a cd_conf and that it has custom
# repos make sure we were able to detect and actual repo
if not default_repo:
LOG.warning('a ceph-deploy config was found with repos \
but could not default to one')
else:
options = dict(cd_conf.items(default_repo))
options['install_ceph'] = False if install_ceph is False else True
extra_repos = cd_conf.get_list(default_repo, 'extra-repos')
rlogger.info('adding custom repository file')
try:
distro.repo_install(
distro,
default_repo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, default_repo))
for xrepo in extra_repos:
rlogger.info('adding extra repo file: %s.repo' % xrepo)
options = dict(cd_conf.items(xrepo))
try:
distro.repo_install(
distro,
xrepo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, xrepo))
def install_repo(args):
"""
For a user that only wants to install the repository only (and avoid
installing Ceph and its dependencies).
"""
cd_conf = getattr(args, 'cd_conf', None)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
# XXX this should get removed once Ceph packages are split for
# upstream. If default_release is True, it means that the user is
# trying to install on a RHEL machine and should expect to get RHEL
# packages. Otherwise, it will need to specify either a specific
# version, or repo, or a development branch. Other distro users should
# not see any differences.
use_rhceph=args.default_release,
)
rlogger = logging.getLogger(hostname)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
def uninstall(args):
LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm')
LOG.info('like: librbd1 and librados2')
LOG.debug(
'Uninstalling on cluster %s hosts %s',
args.cluster,
' '.join(args.host),
)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
use_rhceph=True)
LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(hostname)
rlogger.info('uninstalling Ceph on %s' % hostname)
distro.uninstall(distro)
distro.conn.exit()
def purge(args):
LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm')
LOG.info('like: librbd1 and librados2')
LOG.debug(
'Purging from cluster %s hosts %s',
args.cluster,
' '.join(args.host),
)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
use_rhceph=True
)
LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(hostname)
rlogger.info('purging host ... %s' % hostname)
distro.uninstall(distro, purge=True)
distro.conn.exit()
def purgedata(args):
LOG.debug(
'Purging data from cluster %s hosts %s',
args.cluster,
' '.join(args.host),
)
installed_hosts = []
for hostname in args.host:
distro = hosts.get(hostname, username=args.username)
ceph_is_installed = distro.conn.remote_module.which('ceph')
if ceph_is_installed:
installed_hosts.append(hostname)
distro.conn.exit()
if installed_hosts:
LOG.error("Ceph is still installed on: %s", installed_hosts)
raise RuntimeError("refusing to purge data while Ceph is still installed")
for hostname in args.host:
distro = hosts.get(hostname, username=args.username)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
rlogger = logging.getLogger(hostname)
rlogger.info('purging data on %s' % hostname)
# Try to remove the contents of /var/lib/ceph first, don't worry
# about errors here, we deal with them later on
remoto.process.check(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
]
)
# If we failed in the previous call, then we probably have OSDs
# still mounted, so we unmount them here
if distro.conn.remote_module.path_exists('/var/lib/ceph'):
rlogger.warning(
'OSDs may still be mounted, trying to unmount them'
)
remoto.process.run(
distro.conn,
[
'find', '/var/lib/ceph',
'-mindepth', '1',
'-maxdepth', '2',
'-type', 'd',
'-exec', 'umount', '{}', ';',
]
)
# And now we try again to remove the contents, since OSDs should be
# unmounted, but this time we do check for errors
remoto.process.run(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
]
)
remoto.process.run(
distro.conn,
[
'rm', '-rf', '--one-file-system', '--', '/etc/ceph/',
]
)
distro.conn.exit()
class StoreVersion(argparse.Action):
"""
Like ``"store"`` but also remember which one of the exclusive
options was set.
There are three kinds of versions: stable, testing and dev.
This sets ``version_kind`` to be the right one of the above.
This kludge essentially lets us differentiate explicitly set
values from defaults.
"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
if self.dest == 'release':
self.dest = 'stable'
namespace.version_kind = self.dest
@priority(20)
def make(parser):
"""
Install Ceph packages on remote hosts.
"""
version = parser.add_mutually_exclusive_group()
# XXX deprecated in favor of release
version.add_argument(
'--stable',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='[DEPRECATED] install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--release',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--testing',
nargs=0,
action=StoreVersion,
help='install the latest development release',
)
version.add_argument(
'--dev',
nargs='?',
action=StoreVersion,
const='master',
metavar='BRANCH_OR_TAG',
help='install a bleeding edge build from Git branch\
or tag (default: %(default)s)',
)
version.set_defaults(
stable=None, # XXX deprecated in favor of release
release=None, # Set the default release in sanitize_args()
dev='master',
version_kind='stable',
)
parser.add_argument(
'--mon',
dest='install_mon',
action='store_true',
help='install the mon component only',
)
parser.add_argument(
'--mds',
dest='install_mds',
action='store_true',
help='install the mds component only',
)
parser.add_argument(
'--rgw',
dest='install_rgw',
action='store_true',
help='install the rgw component only',
)
parser.add_argument(
'--osd',
dest='install_osd',
action='store_true',
help='install the osd component only',
)
parser.add_argument(
'--cli', '--common',
dest='install_common',
action='store_true',
help='install the common component only',
)
parser.add_argument(
'--all',
dest='install_all',
action='store_true',
help='install all Ceph components (e.g. mon,osd,mds,rgw). This is the default',
)
repo = parser.add_mutually_exclusive_group()
repo.add_argument(
'--adjust-repos',
dest='adjust_repos',
action='store_true',
help='install packages modifying source repos',
)
repo.add_argument(
'--no-adjust-repos',
dest='adjust_repos',
action='store_false',
help='install packages without modifying source repos',
)
repo.add_argument(
'--repo',
action='store_true',
help='install repo files only (skips package installation)',
)
repo.set_defaults(
adjust_repos=True,
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to install on',
)
parser.add_argument(
'--local-mirror',
nargs='?',
const='PATH',
default=None,
help='Fetch packages and push them to hosts for a local repo mirror',
)
parser.add_argument(
'--repo-url',
nargs='?',
dest='repo_url',
help='specify a repo URL that mirrors/contains Ceph packages',
)
parser.add_argument(
'--gpg-url',
nargs='?',
dest='gpg_url',
help='specify a GPG key URL to be used with custom repos\
(defaults to ceph.com)'
)
parser.set_defaults(
func=install,
)
@priority(80)
def make_uninstall(parser):
"""
Remove Ceph packages from remote hosts.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to uninstall Ceph from',
)
parser.set_defaults(
func=uninstall,
)
@priority(80)
def make_purge(parser):
"""
Remove Ceph packages from remote hosts and purge all data.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph from',
)
parser.set_defaults(
func=purge,
)
@priority(80)
def make_purge_data(parser):
"""
Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph data from',
)
parser.set_defaults(
func=purgedata,
)
|
|
#!/usr/bin/env python
import sys
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from cmapdb import CMapDB, CMapParser, FileUnicodeMap, CMap
from encodingdb import EncodingDB, name2unicode
from psparser import PSStackParser
from psparser import PSEOF
from psparser import LIT, KWD, STRICT
from psparser import PSLiteral, literal_name
from pdftypes import PDFException, resolve1
from pdftypes import int_value, num_value
from pdftypes import list_value, dict_value, stream_value
from fontmetrics import FONT_METRICS
from utils import apply_matrix_norm, nunpack, choplist
def get_widths(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i, w) in enumerate(v):
widths[char1+i] = w
r = []
elif isinstance(v, int):
r.append(v)
if len(r) == 3:
(char1, char2, w) = r
for i in xrange(char1, char2+1):
widths[i] = w
r = []
return widths
#assert get_widths([1]) == {}
#assert get_widths([1,2,3]) == {1:3, 2:3}
#assert get_widths([1,[2,3],6,[7,8]]) == {1:2,2:3, 6:7,7:8}
def get_widths2(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i, (w, vx, vy)) in enumerate(choplist(3, v)):
widths[char1+i] = (w, (vx, vy))
r = []
elif isinstance(v, int):
r.append(v)
if len(r) == 5:
(char1, char2, w, vx, vy) = r
for i in xrange(char1, char2+1):
widths[i] = (w, (vx, vy))
r = []
return widths
#assert get_widths2([1]) == {}
#assert get_widths2([1,2,3,4,5]) == {1:(3, (4,5)), 2:(3, (4,5))}
#assert get_widths2([1,[2,3,4,5],6,[7,8,9]]) == {1:(2, (3,4)), 6:(7, (8,9))}
## FontMetricsDB
##
class FontMetricsDB(object):
@classmethod
def get_metrics(klass, fontname):
return FONT_METRICS[fontname]
## Type1FontHeaderParser
##
class Type1FontHeaderParser(PSStackParser):
KEYWORD_BEGIN = KWD('begin')
KEYWORD_END = KWD('end')
KEYWORD_DEF = KWD('def')
KEYWORD_PUT = KWD('put')
KEYWORD_DICT = KWD('dict')
KEYWORD_ARRAY = KWD('array')
KEYWORD_READONLY = KWD('readonly')
KEYWORD_FOR = KWD('for')
KEYWORD_FOR = KWD('for')
def __init__(self, data):
PSStackParser.__init__(self, data)
self._cid2unicode = {}
return
def get_encoding(self):
while 1:
try:
(cid, name) = self.nextobject()
except PSEOF:
break
try:
self._cid2unicode[cid] = name2unicode(name)
except KeyError:
pass
return self._cid2unicode
def do_keyword(self, pos, token):
if token is self.KEYWORD_PUT:
((_, key), (_, value)) = self.pop(2)
if (isinstance(key, int) and
isinstance(value, PSLiteral)):
self.add_results((key, literal_name(value)))
return
NIBBLES = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', 'e', 'e-', None, '-')
## CFFFont
## (Format specified in Adobe Technical Note: #5176
## "The Compact Font Format Specification")
##
def getdict(data):
d = {}
fp = StringIO(data)
stack = []
while 1:
c = fp.read(1)
if not c:
break
b0 = ord(c)
if b0 <= 21:
d[b0] = stack
stack = []
continue
if b0 == 30:
s = ''
loop = True
while loop:
b = ord(fp.read(1))
for n in (b >> 4, b & 15):
if n == 15:
loop = False
else:
s += NIBBLES[n]
value = float(s)
elif 32 <= b0 and b0 <= 246:
value = b0-139
else:
b1 = ord(fp.read(1))
if 247 <= b0 and b0 <= 250:
value = ((b0-247) << 8)+b1+108
elif 251 <= b0 and b0 <= 254:
value = -((b0-251) << 8)-b1-108
else:
b2 = ord(fp.read(1))
if 128 <= b1:
b1 -= 256
if b0 == 28:
value = b1 << 8 | b2
else:
value = b1 << 24 | b2 << 16 | struct.unpack('>H', fp.read(2))[0]
stack.append(value)
return d
class CFFFont(object):
STANDARD_STRINGS = (
'.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft',
'parenright', 'asterisk', 'plus', 'comma', 'hyphen', 'period',
'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a',
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'braceleft', 'bar', 'braceright', 'asciitilde', 'exclamdown',
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash',
'dagger', 'daggerdbl', 'periodcentered', 'paragraph', 'bullet',
'quotesinglbase', 'quotedblbase', 'quotedblright',
'guillemotright', 'ellipsis', 'perthousand', 'questiondown',
'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut',
'ogonek', 'caron', 'emdash', 'AE', 'ordfeminine', 'Lslash',
'Oslash', 'OE', 'ordmasculine', 'ae', 'dotlessi', 'lslash',
'oslash', 'oe', 'germandbls', 'onesuperior', 'logicalnot', 'mu',
'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn',
'threequarters', 'twosuperior', 'registered', 'minus', 'eth',
'multiply', 'threesuperior', 'copyright', 'Aacute',
'Acircumflex', 'Adieresis', 'Agrave', 'Aring', 'Atilde',
'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde',
'Oacute', 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde',
'Scaron', 'Uacute', 'Ucircumflex', 'Udieresis', 'Ugrave',
'Yacute', 'Ydieresis', 'Zcaron', 'aacute', 'acircumflex',
'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', 'eacute',
'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex',
'odieresis', 'ograve', 'otilde', 'scaron', 'uacute',
'ucircumflex', 'udieresis', 'ugrave', 'yacute', 'ydieresis',
'zcaron', 'exclamsmall', 'Hungarumlautsmall', 'dollaroldstyle',
'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader',
'onedotenleader', 'zerooldstyle', 'oneoldstyle', 'twooldstyle',
'threeoldstyle', 'fouroldstyle', 'fiveoldstyle', 'sixoldstyle',
'sevenoldstyle', 'eightoldstyle', 'nineoldstyle',
'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior',
'dsuperior', 'esuperior', 'isuperior', 'lsuperior', 'msuperior',
'nsuperior', 'osuperior', 'rsuperior', 'ssuperior', 'tsuperior',
'ff', 'ffi', 'ffl', 'parenleftinferior', 'parenrightinferior',
'Circumflexsmall', 'hyphensuperior', 'Gravesmall', 'Asmall',
'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall',
'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall',
'Nsmall', 'Osmall', 'Psmall', 'Qsmall', 'Rsmall', 'Ssmall',
'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall',
'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall',
'Zcaronsmall', 'Dieresissmall', 'Brevesmall', 'Caronsmall',
'Dotaccentsmall', 'Macronsmall', 'figuredash', 'hypheninferior',
'Ogoneksmall', 'Ringsmall', 'Cedillasmall', 'questiondownsmall',
'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior',
'fivesuperior', 'sixsuperior', 'sevensuperior', 'eightsuperior',
'ninesuperior', 'zeroinferior', 'oneinferior', 'twoinferior',
'threeinferior', 'fourinferior', 'fiveinferior', 'sixinferior',
'seveninferior', 'eightinferior', 'nineinferior',
'centinferior', 'dollarinferior', 'periodinferior',
'commainferior', 'Agravesmall', 'Aacutesmall',
'Acircumflexsmall', 'Atildesmall', 'Adieresissmall',
'Aringsmall', 'AEsmall', 'Ccedillasmall', 'Egravesmall',
'Eacutesmall', 'Ecircumflexsmall', 'Edieresissmall',
'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall',
'Oacutesmall', 'Ocircumflexsmall', 'Otildesmall',
'Odieresissmall', 'OEsmall', 'Oslashsmall', 'Ugravesmall',
'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000',
'001.001', '001.002', '001.003', 'Black', 'Bold', 'Book',
'Light', 'Medium', 'Regular', 'Roman', 'Semibold',
)
class INDEX(object):
def __init__(self, fp):
self.fp = fp
self.offsets = []
(count, offsize) = struct.unpack('>HB', self.fp.read(3))
for i in xrange(count+1):
self.offsets.append(nunpack(self.fp.read(offsize)))
self.base = self.fp.tell()-1
self.fp.seek(self.base+self.offsets[-1])
return
def __repr__(self):
return '<INDEX: size=%d>' % len(self)
def __len__(self):
return len(self.offsets)-1
def __getitem__(self, i):
self.fp.seek(self.base+self.offsets[i])
return self.fp.read(self.offsets[i+1]-self.offsets[i])
def __iter__(self):
return iter(self[i] for i in xrange(len(self)))
def __init__(self, name, fp):
self.name = name
self.fp = fp
# Header
(_major, _minor, hdrsize, offsize) = struct.unpack('BBBB', self.fp.read(4))
self.fp.read(hdrsize-4)
# Name INDEX
self.name_index = self.INDEX(self.fp)
# Top DICT INDEX
self.dict_index = self.INDEX(self.fp)
# String INDEX
self.string_index = self.INDEX(self.fp)
# Global Subr INDEX
self.subr_index = self.INDEX(self.fp)
# Top DICT DATA
self.top_dict = getdict(self.dict_index[0])
(charset_pos,) = self.top_dict.get(15, [0])
(encoding_pos,) = self.top_dict.get(16, [0])
(charstring_pos,) = self.top_dict.get(17, [0])
# CharStrings
self.fp.seek(charstring_pos)
self.charstring = self.INDEX(self.fp)
self.nglyphs = len(self.charstring)
# Encodings
self.code2gid = {}
self.gid2code = {}
self.fp.seek(encoding_pos)
format = self.fp.read(1)
if format == '\x00':
# Format 0
(n,) = struct.unpack('B', self.fp.read(1))
for (code, gid) in enumerate(struct.unpack('B'*n, self.fp.read(n))):
self.code2gid[code] = gid
self.gid2code[gid] = code
elif format == '\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
code = 0
for i in xrange(n):
(first, nleft) = struct.unpack('BB', self.fp.read(2))
for gid in xrange(first, first+nleft+1):
self.code2gid[code] = gid
self.gid2code[gid] = code
code += 1
else:
raise ValueError('unsupported encoding format: %r' % format)
# Charsets
self.name2gid = {}
self.gid2name = {}
self.fp.seek(charset_pos)
format = self.fp.read(1)
if format == '\x00':
# Format 0
n = self.nglyphs-1
for (gid, sid) in enumerate(struct.unpack('>'+'H'*n, self.fp.read(2*n))):
gid += 1
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
elif format == '\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
sid = 0
for i in xrange(n):
(first, nleft) = struct.unpack('BB', self.fp.read(2))
for gid in xrange(first, first+nleft+1):
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
sid += 1
elif format == '\x02':
# Format 2
assert 0
else:
raise ValueError('unsupported charset format: %r' % format)
#print self.code2gid
#print self.name2gid
#assert 0
return
def getstr(self, sid):
if sid < len(self.STANDARD_STRINGS):
return self.STANDARD_STRINGS[sid]
return self.string_index[sid-len(self.STANDARD_STRINGS)]
## TrueTypeFont
##
class TrueTypeFont(object):
class CMapNotFound(Exception):
pass
def __init__(self, name, fp):
self.name = name
self.fp = fp
self.tables = {}
self.fonttype = fp.read(4)
(ntables, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
for _ in xrange(ntables):
(name, tsum, offset, length) = struct.unpack('>4sLLL', fp.read(16))
self.tables[name] = (offset, length)
return
def create_unicode_map(self):
if 'cmap' not in self.tables:
raise TrueTypeFont.CMapNotFound
(base_offset, length) = self.tables['cmap']
fp = self.fp
fp.seek(base_offset)
(version, nsubtables) = struct.unpack('>HH', fp.read(4))
subtables = []
for i in xrange(nsubtables):
subtables.append(struct.unpack('>HHL', fp.read(8)))
char2gid = {}
# Only supports subtable type 0, 2 and 4.
for (_1, _2, st_offset) in subtables:
fp.seek(base_offset+st_offset)
(fmttype, fmtlen, fmtlang) = struct.unpack('>HHH', fp.read(6))
if fmttype == 0:
char2gid.update(enumerate(struct.unpack('>256B', fp.read(256))))
elif fmttype == 2:
subheaderkeys = struct.unpack('>256H', fp.read(512))
firstbytes = [0]*8192
for (i, k) in enumerate(subheaderkeys):
firstbytes[k/8] = i
nhdrs = max(subheaderkeys)/8 + 1
hdrs = []
for i in xrange(nhdrs):
(firstcode, entcount, delta, offset) = struct.unpack('>HHhH', fp.read(8))
hdrs.append((i, firstcode, entcount, delta, fp.tell()-2+offset))
for (i, firstcode, entcount, delta, pos) in hdrs:
if not entcount:
continue
first = firstcode + (firstbytes[i] << 8)
fp.seek(pos)
for c in xrange(entcount):
gid = struct.unpack('>H', fp.read(2))
if gid:
gid += delta
char2gid[first+c] = gid
elif fmttype == 4:
(segcount, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
segcount /= 2
ecs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
fp.read(2)
scs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
idds = struct.unpack('>%dh' % segcount, fp.read(2*segcount))
pos = fp.tell()
idrs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
for (ec, sc, idd, idr) in zip(ecs, scs, idds, idrs):
if idr:
fp.seek(pos+idr)
for c in xrange(sc, ec+1):
char2gid[c] = (struct.unpack('>H', fp.read(2))[0] + idd) & 0xffff
else:
for c in xrange(sc, ec+1):
char2gid[c] = (c + idd) & 0xffff
else:
assert 0
# create unicode map
unicode_map = FileUnicodeMap()
for (char, gid) in char2gid.iteritems():
unicode_map.add_cid2unichr(gid, char)
return unicode_map
## Fonts
##
class PDFFontError(PDFException):
pass
class PDFUnicodeNotDefined(PDFFontError):
pass
LITERAL_STANDARD_ENCODING = LIT('StandardEncoding')
LITERAL_TYPE1C = LIT('Type1C')
# PDFFont
class PDFFont(object):
def __init__(self, descriptor, widths, default_width=None):
self.descriptor = descriptor
self.widths = widths
self.fontname = resolve1(descriptor.get('FontName', 'unknown'))
if isinstance(self.fontname, PSLiteral):
self.fontname = literal_name(self.fontname)
self.flags = int_value(descriptor.get('Flags', 0))
self.ascent = num_value(descriptor.get('Ascent', 0))
self.descent = num_value(descriptor.get('Descent', 0))
self.italic_angle = num_value(descriptor.get('ItalicAngle', 0))
self.default_width = default_width or num_value(descriptor.get('MissingWidth', 0))
self.leading = num_value(descriptor.get('Leading', 0))
self.bbox = list_value(descriptor.get('FontBBox', (0, 0, 0, 0)))
self.hscale = self.vscale = .001
return
def __repr__(self):
return '<PDFFont>'
def is_vertical(self):
return False
def is_multibyte(self):
return False
def decode(self, bytes):
return map(ord, bytes)
def get_ascent(self):
return self.ascent * self.vscale
def get_descent(self):
return self.descent * self.vscale
def get_width(self):
w = self.bbox[2]-self.bbox[0]
if w == 0:
w = -self.default_width
return w * self.hscale
def get_height(self):
h = self.bbox[3]-self.bbox[1]
if h == 0:
h = self.ascent - self.descent
return h * self.vscale
def char_width(self, cid):
try:
return self.widths[cid] * self.hscale
except KeyError:
try:
return self.widths[self.to_unichr(cid)] * self.hscale
except (KeyError, PDFUnicodeNotDefined):
return self.default_width * self.hscale
def char_disp(self, cid):
return 0
def string_width(self, s):
return sum(self.char_width(cid) for cid in self.decode(s))
# PDFSimpleFont
class PDFSimpleFont(PDFFont):
def __init__(self, descriptor, widths, spec):
# Font encoding is specified either by a name of
# built-in encoding or a dictionary that describes
# the differences.
if 'Encoding' in spec:
encoding = resolve1(spec['Encoding'])
else:
encoding = LITERAL_STANDARD_ENCODING
if isinstance(encoding, dict):
name = literal_name(encoding.get('BaseEncoding', LITERAL_STANDARD_ENCODING))
diff = list_value(encoding.get('Differences', None))
self.cid2unicode = EncodingDB.get_encoding(name, diff)
else:
self.cid2unicode = EncodingDB.get_encoding(literal_name(encoding))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, StringIO(strm.get_data())).run()
PDFFont.__init__(self, descriptor, widths)
return
def to_unichr(self, cid):
if self.unicode_map:
try:
return self.unicode_map.get_unichr(cid)
except KeyError:
pass
try:
return self.cid2unicode[cid]
except KeyError:
raise PDFUnicodeNotDefined(None, cid)
# PDFType1Font
class PDFType1Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
try:
(descriptor, widths) = FontMetricsDB.get_metrics(self.basefont)
except KeyError:
descriptor = dict_value(spec.get('FontDescriptor', {}))
firstchar = int_value(spec.get('FirstChar', 0))
lastchar = int_value(spec.get('LastChar', 255))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict((i+firstchar, w) for (i, w) in enumerate(widths))
PDFSimpleFont.__init__(self, descriptor, widths, spec)
if 'Encoding' not in spec and 'FontFile' in descriptor:
# try to recover the missing encoding info from the font file.
self.fontfile = stream_value(descriptor.get('FontFile'))
length1 = int_value(self.fontfile['Length1'])
data = self.fontfile.get_data()[:length1]
parser = Type1FontHeaderParser(StringIO(data))
self.cid2unicode = parser.get_encoding()
return
def __repr__(self):
return '<PDFType1Font: basefont=%r>' % self.basefont
# PDFTrueTypeFont
class PDFTrueTypeFont(PDFType1Font):
def __repr__(self):
return '<PDFTrueTypeFont: basefont=%r>' % self.basefont
# PDFType3Font
class PDFType3Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
firstchar = int_value(spec.get('FirstChar', 0))
lastchar = int_value(spec.get('LastChar', 0))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict((i+firstchar, w) for (i, w) in enumerate(widths))
if 'FontDescriptor' in spec:
descriptor = dict_value(spec['FontDescriptor'])
else:
descriptor = {'Ascent': 0, 'Descent': 0,
'FontBBox': spec['FontBBox']}
PDFSimpleFont.__init__(self, descriptor, widths, spec)
self.matrix = tuple(list_value(spec.get('FontMatrix')))
(_, self.descent, _, self.ascent) = self.bbox
(self.hscale, self.vscale) = apply_matrix_norm(self.matrix, (1, 1))
return
def __repr__(self):
return '<PDFType3Font>'
# PDFCIDFont
class PDFCIDFont(PDFFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
self.cidsysteminfo = dict_value(spec.get('CIDSystemInfo', {}))
self.cidcoding = '%s-%s' % (self.cidsysteminfo.get('Registry', 'unknown'),
self.cidsysteminfo.get('Ordering', 'unknown'))
try:
name = literal_name(spec['Encoding'])
except KeyError:
if STRICT:
raise PDFFontError('Encoding is unspecified')
name = 'unknown'
try:
self.cmap = CMapDB.get_cmap(name)
except CMapDB.CMapNotFound, e:
if STRICT:
raise PDFFontError(e)
self.cmap = CMap()
try:
descriptor = dict_value(spec['FontDescriptor'])
except KeyError:
if STRICT:
raise PDFFontError('FontDescriptor is missing')
descriptor = {}
ttf = None
if 'FontFile2' in descriptor:
self.fontfile = stream_value(descriptor.get('FontFile2'))
ttf = TrueTypeFont(self.basefont,
StringIO(self.fontfile.get_data()))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, StringIO(strm.get_data())).run()
elif self.cidcoding in ('Adobe-Identity', 'Adobe-UCS'):
if ttf:
try:
self.unicode_map = ttf.create_unicode_map()
except TrueTypeFont.CMapNotFound:
pass
else:
try:
self.unicode_map = CMapDB.get_unicode_map(self.cidcoding, self.cmap.is_vertical())
except CMapDB.CMapNotFound, e:
pass
self.vertical = self.cmap.is_vertical()
if self.vertical:
# writing mode: vertical
widths = get_widths2(list_value(spec.get('W2', [])))
self.disps = dict((cid, (vx, vy)) for (cid, (_, (vx, vy))) in widths.iteritems())
(vy, w) = spec.get('DW2', [880, -1000])
self.default_disp = (None, vy)
widths = dict((cid, w) for (cid, (w, _)) in widths.iteritems())
default_width = w
else:
# writing mode: horizontal
self.disps = {}
self.default_disp = 0
widths = get_widths(list_value(spec.get('W', [])))
default_width = spec.get('DW', 1000)
PDFFont.__init__(self, descriptor, widths, default_width=default_width)
return
def __repr__(self):
return '<PDFCIDFont: basefont=%r, cidcoding=%r>' % (self.basefont, self.cidcoding)
def is_vertical(self):
return self.vertical
def is_multibyte(self):
return True
def decode(self, bytes):
return self.cmap.decode(bytes)
def char_disp(self, cid):
"Returns an integer for horizontal fonts, a tuple for vertical fonts."
return self.disps.get(cid, self.default_disp)
def to_unichr(self, cid):
try:
if not self.unicode_map:
raise KeyError(cid)
return self.unicode_map.get_unichr(cid)
except KeyError:
raise PDFUnicodeNotDefined(self.cidcoding, cid)
# main
def main(argv):
for fname in argv[1:]:
fp = file(fname, 'rb')
#font = TrueTypeFont(fname, fp)
font = CFFFont(fname, fp)
print font
fp.close()
return
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
import itertools
import operator
import sys
if sys.version_info[0] != 3:
from functools import reduce
from functools import wraps
from peewee import *
from playhouse.tests.base import compiler
from playhouse.tests.base import database_initializer
from playhouse.tests.base import log_console
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import PeeweeTestCase
from playhouse.tests.base import skip_unless
from playhouse.tests.base import test_db
from playhouse.tests.models import *
compound_db = database_initializer.get_in_memory_database()
class CompoundBase(Model):
class Meta:
database = compound_db
class Alpha(CompoundBase):
alpha = IntegerField()
class Beta(CompoundBase):
beta = IntegerField()
other = IntegerField(default=0)
class Gamma(CompoundBase):
gamma = IntegerField()
other = IntegerField(default=1)
class TestCompoundSelectSQL(PeeweeTestCase):
def setUp(self):
super(TestCompoundSelectSQL, self).setUp()
compound_db.compound_select_parentheses = False # Restore default.
self.a1 = Alpha.select(Alpha.alpha).where(Alpha.alpha < 2)
self.a2 = Alpha.select(Alpha.alpha).where(Alpha.alpha > 5)
self.b1 = Beta.select(Beta.beta).where(Beta.beta < 3)
self.b2 = Beta.select(Beta.beta).where(Beta.beta > 4)
def test_simple_sql(self):
lhs = Alpha.select(Alpha.alpha)
rhs = Beta.select(Beta.beta)
sql, params = (lhs | rhs).sql()
self.assertEqual(sql, (
'SELECT "t1"."alpha" FROM "alpha" AS t1 UNION '
'SELECT "t2"."beta" FROM "beta" AS t2'))
sql, params = (
Alpha.select(Alpha.alpha) |
Beta.select(Beta.beta) |
Gamma.select(Gamma.gamma)).sql()
self.assertEqual(sql, (
'SELECT "t1"."alpha" FROM "alpha" AS t1 UNION '
'SELECT "t2"."beta" FROM "beta" AS t2 UNION '
'SELECT "t3"."gamma" FROM "gamma" AS t3'))
sql, params = (
Alpha.select(Alpha.alpha) |
(Beta.select(Beta.beta) |
Gamma.select(Gamma.gamma))).sql()
self.assertEqual(sql, (
'SELECT "t3"."alpha" FROM "alpha" AS t3 UNION '
'SELECT "t1"."beta" FROM "beta" AS t1 UNION '
'SELECT "t2"."gamma" FROM "gamma" AS t2'))
def test_simple_same_model(self):
queries = [Alpha.select(Alpha.alpha) for i in range(3)]
lhs = queries[0] | queries[1]
compound = lhs | queries[2]
sql, params = compound.sql()
self.assertEqual(sql, (
'SELECT "t1"."alpha" FROM "alpha" AS t1 UNION '
'SELECT "t2"."alpha" FROM "alpha" AS t2 UNION '
'SELECT "t3"."alpha" FROM "alpha" AS t3'))
lhs = queries[0]
compound = lhs | (queries[1] | queries[2])
sql, params = compound.sql()
self.assertEqual(sql, (
'SELECT "t3"."alpha" FROM "alpha" AS t3 UNION '
'SELECT "t1"."alpha" FROM "alpha" AS t1 UNION '
'SELECT "t2"."alpha" FROM "alpha" AS t2'))
def test_where_clauses(self):
sql, params = (self.a1 | self.a2).sql()
self.assertEqual(sql, (
'SELECT "t1"."alpha" FROM "alpha" AS t1 WHERE ("t1"."alpha" < ?) '
'UNION '
'SELECT "t2"."alpha" FROM "alpha" AS t2 WHERE ("t2"."alpha" > ?)'))
self.assertEqual(params, [2, 5])
sql, params = (self.a1 | self.b1).sql()
self.assertEqual(sql, (
'SELECT "t1"."alpha" FROM "alpha" AS t1 WHERE ("t1"."alpha" < ?) '
'UNION '
'SELECT "t2"."beta" FROM "beta" AS t2 WHERE ("t2"."beta" < ?)'))
self.assertEqual(params, [2, 3])
sql, params = (self.a1 | self.b1 | self.a2 | self.b2).sql()
self.assertEqual(sql, (
'SELECT "t1"."alpha" FROM "alpha" AS t1 WHERE ("t1"."alpha" < ?) '
'UNION '
'SELECT "t2"."beta" FROM "beta" AS t2 WHERE ("t2"."beta" < ?) '
'UNION '
'SELECT "t4"."alpha" FROM "alpha" AS t4 WHERE ("t4"."alpha" > ?) '
'UNION '
'SELECT "t3"."beta" FROM "beta" AS t3 WHERE ("t3"."beta" > ?)'))
self.assertEqual(params, [2, 3, 5, 4])
def test_outer_limit(self):
sql, params = (self.a1 | self.a2).limit(3).sql()
self.assertEqual(sql, (
'SELECT "t1"."alpha" FROM "alpha" AS t1 WHERE ("t1"."alpha" < ?) '
'UNION '
'SELECT "t2"."alpha" FROM "alpha" AS t2 WHERE ("t2"."alpha" > ?) '
'LIMIT 3'))
def test_union_in_from(self):
compound = (self.a1 | self.a2).alias('cq')
sql, params = Alpha.select(compound.c.alpha).from_(compound).sql()
self.assertEqual(sql, (
'SELECT "cq"."alpha" FROM ('
'SELECT "t1"."alpha" FROM "alpha" AS t1 WHERE ("t1"."alpha" < ?) '
'UNION '
'SELECT "t2"."alpha" FROM "alpha" AS t2 WHERE ("t2"."alpha" > ?)'
') AS cq'))
compound = (self.a1 | self.b1 | self.b2).alias('cq')
sql, params = Alpha.select(SQL('1')).from_(compound).sql()
self.assertEqual(sql, (
'SELECT 1 FROM ('
'SELECT "t1"."alpha" FROM "alpha" AS t1 WHERE ("t1"."alpha" < ?) '
'UNION '
'SELECT "t2"."beta" FROM "beta" AS t2 WHERE ("t2"."beta" < ?) '
'UNION '
'SELECT "t3"."beta" FROM "beta" AS t3 WHERE ("t3"."beta" > ?)'
') AS cq'))
self.assertEqual(params, [2, 3, 4])
def test_parentheses(self):
compound_db.compound_select_parentheses = True
sql, params = (self.a1 | self.a2).sql()
self.assertEqual(sql, (
'(SELECT "t1"."alpha" FROM "alpha" AS t1 '
'WHERE ("t1"."alpha" < ?)) '
'UNION '
'(SELECT "t2"."alpha" FROM "alpha" AS t2 '
'WHERE ("t2"."alpha" > ?))'))
self.assertEqual(params, [2, 5])
def test_multiple_with_parentheses(self):
compound_db.compound_select_parentheses = True
queries = [Alpha.select(Alpha.alpha) for i in range(3)]
lhs = queries[0] | queries[1]
compound = lhs | queries[2]
sql, params = compound.sql()
self.assertEqual(sql, (
'((SELECT "t1"."alpha" FROM "alpha" AS t1) UNION '
'(SELECT "t2"."alpha" FROM "alpha" AS t2)) UNION '
'(SELECT "t3"."alpha" FROM "alpha" AS t3)'))
lhs = queries[0]
compound = lhs | (queries[1] | queries[2])
sql, params = compound.sql()
self.assertEqual(sql, (
'(SELECT "t3"."alpha" FROM "alpha" AS t3) UNION '
'((SELECT "t1"."alpha" FROM "alpha" AS t1) UNION '
'(SELECT "t2"."alpha" FROM "alpha" AS t2))'))
def test_inner_limit(self):
compound_db.compound_select_parentheses = True
a1 = Alpha.select(Alpha.alpha).where(Alpha.alpha < 2).limit(2)
a2 = Alpha.select(Alpha.alpha).where(Alpha.alpha > 5).limit(4)
sql, params = (a1 | a2).limit(3).sql()
self.assertEqual(sql, (
'(SELECT "t1"."alpha" FROM "alpha" AS t1 WHERE ("t1"."alpha" < ?) '
'LIMIT 2) '
'UNION '
'(SELECT "t2"."alpha" FROM "alpha" AS t2 WHERE ("t2"."alpha" > ?) '
'LIMIT 4) '
'LIMIT 3'))
def test_union_subquery(self):
union = (Alpha.select(Alpha.alpha) |
Beta.select(Beta.beta))
query = Alpha.select().where(Alpha.alpha << union)
sql, params = query.sql()
self.assertEqual(sql, (
'SELECT "t1"."id", "t1"."alpha" '
'FROM "alpha" AS t1 WHERE ("t1"."alpha" IN ('
'SELECT "t1"."alpha" FROM "alpha" AS t1 '
'UNION '
'SELECT "t2"."beta" FROM "beta" AS t2))'))
class TestCompoundSelectQueries(ModelTestCase):
requires = [User, UniqueModel, OrderedModel]
# User -> username, UniqueModel -> name, OrderedModel -> title
test_values = {
User.username: ['a', 'b', 'c', 'd'],
OrderedModel.title: ['a', 'c', 'e'],
UniqueModel.name: ['b', 'd', 'e'],
}
def setUp(self):
super(TestCompoundSelectQueries, self).setUp()
for field, values in self.test_values.items():
for value in values:
field.model_class.create(**{field.name: value})
def requires_op(op):
def decorator(fn):
@wraps(fn)
def inner(self):
if op in test_db.compound_operations:
return fn(self)
else:
log_console('"%s" not supported, skipping %s' %
(op, fn.__name__))
return inner
return decorator
def assertValues(self, query, expected):
self.assertEqual(sorted(query.tuples()),
[(x,) for x in sorted(expected)])
def assertPermutations(self, op, expected):
fields = {
User: User.username,
UniqueModel: UniqueModel.name,
OrderedModel: OrderedModel.title,
}
for key in itertools.permutations(fields.keys(), 2):
if key in expected:
left, right = key
query = op(left.select(fields[left]).order_by(),
right.select(fields[right]).order_by())
# Ensure the sorted tuples returned from the query are equal
# to the sorted values we expected for this combination.
self.assertValues(query, expected[key])
@requires_op('UNION')
def test_union(self):
all_letters = ['a', 'b', 'c', 'd', 'e']
self.assertPermutations(operator.or_, {
(User, UniqueModel): all_letters,
(User, OrderedModel): all_letters,
(UniqueModel, User): all_letters,
(UniqueModel, OrderedModel): all_letters,
(OrderedModel, User): all_letters,
(OrderedModel, UniqueModel): all_letters,
})
@requires_op('UNION ALL')
def test_union(self):
all_letters = ['a', 'b', 'c', 'd', 'e']
users = User.select(User.username)
uniques = UniqueModel.select(UniqueModel.name)
query = users.union_all(uniques)
results = [row[0] for row in query.tuples()]
self.assertEqual(sorted(results), ['a', 'b', 'b', 'c', 'd', 'd', 'e'])
@requires_op('UNION')
def test_union_from(self):
uq = (User
.select(User.username.alias('name'))
.where(User.username << ['a', 'b', 'd']))
oq = (OrderedModel
.select(OrderedModel.title.alias('name'))
.where(OrderedModel.title << ['a', 'b'])
.order_by())
iq = (UniqueModel
.select(UniqueModel.name.alias('name'))
.where(UniqueModel.name << ['c', 'd']))
union_q = (uq | oq | iq).alias('union_q')
query = (User
.select(union_q.c.name)
.from_(union_q)
.order_by(union_q.c.name.desc()))
self.assertEqual([row[0] for row in query.tuples()], ['d', 'b', 'a'])
@requires_op('UNION')
def test_union_count(self):
a = User.select().where(User.username == 'a')
c_and_d = User.select().where(User.username << ['c', 'd'])
self.assertEqual(a.count(), 1)
self.assertEqual(c_and_d.count(), 2)
union = a | c_and_d
self.assertEqual(union.wrapped_count(), 3)
overlapping = User.select() | c_and_d
self.assertEqual(overlapping.wrapped_count(), 4)
@requires_op('INTERSECT')
def test_intersect(self):
self.assertPermutations(operator.and_, {
(User, UniqueModel): ['b', 'd'],
(User, OrderedModel): ['a', 'c'],
(UniqueModel, User): ['b', 'd'],
(UniqueModel, OrderedModel): ['e'],
(OrderedModel, User): ['a', 'c'],
(OrderedModel, UniqueModel): ['e'],
})
@requires_op('EXCEPT')
def test_except(self):
self.assertPermutations(operator.sub, {
(User, UniqueModel): ['a', 'c'],
(User, OrderedModel): ['b', 'd'],
(UniqueModel, User): ['e'],
(UniqueModel, OrderedModel): ['b', 'd'],
(OrderedModel, User): ['e'],
(OrderedModel, UniqueModel): ['a', 'c'],
})
@requires_op('INTERSECT')
@requires_op('EXCEPT')
def test_symmetric_difference(self):
self.assertPermutations(operator.xor, {
(User, UniqueModel): ['a', 'c', 'e'],
(User, OrderedModel): ['b', 'd', 'e'],
(UniqueModel, User): ['a', 'c', 'e'],
(UniqueModel, OrderedModel): ['a', 'b', 'c', 'd'],
(OrderedModel, User): ['b', 'd', 'e'],
(OrderedModel, UniqueModel): ['a', 'b', 'c', 'd'],
})
def test_model_instances(self):
union = (User.select(User.username) |
UniqueModel.select(UniqueModel.name))
query = union.order_by(SQL('username').desc()).limit(3)
self.assertEqual([user.username for user in query],
['e', 'd', 'c'])
@requires_op('UNION')
@requires_op('INTERSECT')
def test_complex(self):
left = User.select(User.username).where(User.username << ['a', 'b'])
right = UniqueModel.select(UniqueModel.name).where(
UniqueModel.name << ['b', 'd', 'e'])
query = (left & right).order_by(SQL('1'))
self.assertEqual(list(query.dicts()), [{'username': 'b'}])
query = (left | right).order_by(SQL('1'))
self.assertEqual(list(query.dicts()), [
{'username': 'a'},
{'username': 'b'},
{'username': 'd'},
{'username': 'e'}])
@requires_op('UNION')
def test_union_subquery(self):
union = (User.select(User.username).where(User.username == 'a') |
UniqueModel.select(UniqueModel.name))
query = (User
.select(User.username)
.where(User.username << union)
.order_by(User.username.desc()))
self.assertEqual(list(query.dicts()), [
{'username': 'd'},
{'username': 'b'},
{'username': 'a'}])
@skip_unless(lambda: isinstance(test_db, PostgresqlDatabase))
class TestCompoundWithOrderLimit(ModelTestCase):
requires = [User]
def setUp(self):
super(TestCompoundWithOrderLimit, self).setUp()
for username in ['a', 'b', 'c', 'd', 'e', 'f']:
User.create(username=username)
def test_union_with_order_limit(self):
lhs = (User
.select(User.username)
.where(User.username << ['a', 'b', 'c']))
rhs = (User
.select(User.username)
.where(User.username << ['d', 'e', 'f']))
cq = (lhs.order_by(User.username.desc()).limit(2) |
rhs.order_by(User.username.desc()).limit(2))
results = [user.username for user in cq]
self.assertEqual(sorted(results), ['b', 'c', 'e', 'f'])
cq = cq.order_by(cq.c.username.desc()).limit(3)
results = [user.username for user in cq]
self.assertEqual(results, ['f', 'e', 'c'])
|
|
import os
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from rest_framework.exceptions import ValidationError as DRF_ValidationError
from hs_core.testing import MockIRODSTestCaseMixin
from hs_core import hydroshare
from hs_core.models import Coverage, ResourceFile
from hs_core.views.utils import remove_folder, move_or_rename_file_or_folder
from hs_app_netCDF.models import OriginalCoverage, Variable
from hs_file_types.models import NetCDFLogicalFile, NetCDFFileMetaData
from hs_file_types.models.base import METADATA_FILE_ENDSWITH, RESMAP_FILE_ENDSWITH
from .utils import assert_netcdf_file_type_metadata, CompositeResourceTestMixin, \
get_path_with_no_file_extension
class NetCDFFileTypeTest(MockIRODSTestCaseMixin, TransactionTestCase,
CompositeResourceTestMixin):
def setUp(self):
super(NetCDFFileTypeTest, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
self.res_title = "Testing NetCDF File Type"
self.netcdf_file_name = 'netcdf_valid.nc'
self.netcdf_file = 'hs_file_types/tests/{}'.format(self.netcdf_file_name)
self.netcdf_invalid_file_name = 'netcdf_invalid.nc'
self.netcdf_invalid_file = 'hs_file_types/tests/{}'.format(self.netcdf_invalid_file_name)
def test_create_aggregation_from_nc_file_1(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is at the root of the folder hierarchy
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
base_file_name, _ = os.path.splitext(res_file.file_name)
expected_res_file_folder_path = res_file.file_folder
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test extracted metadata
assert_netcdf_file_type_metadata(self, self.res_title,
aggr_folder=expected_res_file_folder_path)
# test file level keywords
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(len(logical_file.metadata.keywords), 1)
self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent')
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_nc_file_2(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is not at the root of the folder hierarchy but in a folder
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test extracted metadata
assert_netcdf_file_type_metadata(self, self.res_title, aggr_folder=new_folder)
# test file level keywords
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(len(logical_file.metadata.keywords), 1)
self.assertEqual(logical_file.metadata.keywords[0], 'Snow water equivalent')
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_nc_file_3(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is not at the root of the folder hierarchy but in a folder. The
# same folder contains another file that's not going part of the aggregation
# location of the nc file before aggregation is created: /my_folder/netcdf_valid.nc
# location of another file before aggregation is created: /my_folder/netcdf_invalid.nc
# location of nc file after aggregation is created:
# /my_folder/netcdf_valid.nc
# location of another file after aggregation is created: /my_folder/netcdf_invalid.nc
self.create_composite_resource()
new_folder = 'my_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# add another file to the same folder
self.add_file_to_resource(file_to_add=self.netcdf_invalid_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(self.composite_resource.files.all().count(), 3)
# test logical file/aggregation
self.assertEqual(len(list(self.composite_resource.logical_files)), 1)
logical_file = list(self.composite_resource.logical_files)[0]
self.assertEqual(logical_file.files.count(), 2)
base_nc_file_name, _ = os.path.splitext(self.netcdf_file_name)
expected_file_folder = new_folder
for res_file in logical_file.files.all():
self.assertEqual(res_file.file_folder, expected_file_folder)
self.assertTrue(isinstance(logical_file, NetCDFLogicalFile))
self.assertTrue(logical_file.metadata, NetCDFLogicalFile)
# test the location of the file that's not part of the netcdf aggregation
other_res_file = None
for res_file in self.composite_resource.files.all():
if not res_file.has_logical_file:
other_res_file = res_file
break
self.assertEqual(other_res_file.file_folder, new_folder)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_nc_file_4(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# the nc file in this case is not at the root of the folder hierarchy but in a folder. The
# same folder contains another folder
# location nc file before aggregation is created: /my_folder/netcdf_valid.nc
# location of another folder before aggregation is created: /my_folder/another_folder
# location of nc file after aggregation is created:
# /my_folder/netcdf_valid.nc
self.create_composite_resource()
new_folder = 'my_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
another_folder = '{}/another_folder'.format(new_folder)
ResourceFile.create_folder(self.composite_resource, another_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# test logical file/aggregation
self.assertEqual(len(list(self.composite_resource.logical_files)), 1)
logical_file = list(self.composite_resource.logical_files)[0]
self.assertEqual(logical_file.files.count(), 2)
base_nc_file_name, _ = os.path.splitext(self.netcdf_file_name)
expected_file_folder = new_folder
for res_file in logical_file.files.all():
self.assertEqual(res_file.file_folder, expected_file_folder)
self.assertTrue(isinstance(logical_file, NetCDFLogicalFile))
self.assertTrue(logical_file.metadata, NetCDFLogicalFile)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_for_netcdf_resource_title(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which includes metadata extraction
# and testing that the resource title gets set with the
# extracted metadata if the original title is 'untitled resource'
self.res_title = 'untitled resource'
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.metadata.title.value, self.res_title)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# check that there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# set the nc file to NetCDF file type
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test resource title was updated with the extracted netcdf data
res_title = "Snow water equivalent estimation at TWDEF site from Oct 2009 to June 2010"
self.assertEqual(self.composite_resource.metadata.title.value, res_title)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_invalid_nc_file_1(self):
# here we are using an invalid netcdf file for setting it
# to netCDF file type which should fail
self.create_composite_resource(self.netcdf_invalid_file)
self._test_invalid_file()
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_create_aggregation_from_invalid_nc_file_2(self):
# here we are using a valid nc file for setting it
# to NetCDF file type which already been previously set to this file type - should fail
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# set nc file to aggregation
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# check that the nc resource file is associated with a logical file
res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, '.nc')[0]
self.assertEqual(res_file.has_logical_file, True)
self.assertEqual(res_file.logical_file_type_name, "NetCDFLogicalFile")
# trying to set this nc file again to netcdf file type should raise
# ValidationError
with self.assertRaises(ValidationError):
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_metadata_CRUD(self):
# here we are using a valid nc file for creating a NetCDF file type (aggregation)
# then testing with metadata CRUD actions for the aggregation
self.create_composite_resource()
new_folder = 'nc_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
# make the netcdf file part of the NetCDFLogicalFile
res_file = self.composite_resource.files.first()
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
netcdf_logical_file = NetCDFLogicalFile.create(self.composite_resource)
netcdf_logical_file.save()
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
netcdf_logical_file.add_resource_file(res_file)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.logical_file_type_name, 'NetCDFLogicalFile')
self.assertEqual(netcdf_logical_file.files.count(), 1)
# create keywords - note it is possible to have duplicate keywords
# appropriate view functions need to disallow duplicate keywords
keywords = ['key-1', 'key-1', 'key-2']
netcdf_logical_file.metadata.keywords = keywords
netcdf_logical_file.metadata.save()
self.assertEqual(len(keywords), len(netcdf_logical_file.metadata.keywords))
for keyword in keywords:
self.assertIn(keyword, netcdf_logical_file.metadata.keywords)
# create OriginalCoverage element
self.assertEqual(netcdf_logical_file.metadata.original_coverage, None)
coverage_data = {'northlimit': 121.345, 'southlimit': 42.678, 'eastlimit': 123.789,
'westlimit': 40.789, 'units': 'meters'}
netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data)
self.assertNotEqual(netcdf_logical_file.metadata.original_coverage, None)
self.assertEqual(float(netcdf_logical_file.metadata.original_coverage.value['northlimit']),
121.345)
# test updating OriginalCoverage element
orig_coverage = netcdf_logical_file.metadata.original_coverage
coverage_data = {'northlimit': 111.333, 'southlimit': 42.678, 'eastlimit': 123.789,
'westlimit': 40.789, 'units': 'meters'}
netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id,
value=coverage_data)
self.assertEqual(float(netcdf_logical_file.metadata.original_coverage.value['northlimit']),
111.333)
# trying to create a 2nd OriginalCoverage element should raise exception
with self.assertRaises(Exception):
netcdf_logical_file.metadata.create_element('OriginalCoverage', value=coverage_data)
# trying to update bounding box values with non-numeric values
# (e.g., 'north_limit' key with a non-numeric value) should raise exception
coverage_data = {'northlimit': '121.345a', 'southlimit': 42.678, 'eastlimit': 123.789,
'westlimit': 40.789, 'units': 'meters'}
with self.assertRaises(ValidationError):
netcdf_logical_file.metadata.update_element('OriginalCoverage', orig_coverage.id,
value=coverage_data)
# test creating spatial coverage
# there should not be any spatial coverage for the netcdf file type
self.assertEqual(netcdf_logical_file.metadata.spatial_coverage, None)
coverage_data = {'projection': 'WGS 84 EPSG:4326', 'northlimit': 41.87,
'southlimit': 41.863,
'eastlimit': -111.505,
'westlimit': -111.511, 'units': 'meters'}
# create spatial coverage
netcdf_logical_file.metadata.create_element('Coverage', type="box", value=coverage_data)
spatial_coverage = netcdf_logical_file.metadata.spatial_coverage
self.assertEqual(float(spatial_coverage.value['northlimit']), 41.87)
# test updating spatial coverage
coverage_data = {'projection': 'WGS 84 EPSG:4326', 'northlimit': 41.87706,
'southlimit': 41.863,
'eastlimit': -111.505,
'westlimit': -111.511, 'units': 'meters'}
netcdf_logical_file.metadata.update_element('Coverage', element_id=spatial_coverage.id,
type="box", value=coverage_data)
spatial_coverage = netcdf_logical_file.metadata.spatial_coverage
self.assertEqual(float(spatial_coverage.value['northlimit']), 41.87706)
# create Variable element
self.assertEqual(netcdf_logical_file.metadata.variables.count(), 0)
variable_data = {'name': 'variable_name', 'type': 'Int', 'unit': 'deg F',
'shape': 'variable_shape'}
netcdf_logical_file.metadata.create_element('Variable', **variable_data)
self.assertEqual(netcdf_logical_file.metadata.variables.count(), 1)
self.assertEqual(netcdf_logical_file.metadata.variables.first().name, 'variable_name')
# test that multiple Variable elements can be created
variable_data = {'name': 'variable_name_2', 'type': 'Int', 'unit': 'deg F',
'shape': 'variable_shape_2'}
netcdf_logical_file.metadata.create_element('Variable', **variable_data)
self.assertEqual(netcdf_logical_file.metadata.variables.count(), 2)
# test update Variable element
variable = netcdf_logical_file.metadata.variables.first()
variable_data = {'name': 'variable_name_updated', 'type': 'Int', 'unit': 'deg F',
'shape': 'variable_shape'}
netcdf_logical_file.metadata.update_element('Variable', variable.id, **variable_data)
variable = netcdf_logical_file.metadata.variables.get(id=variable.id)
self.assertEqual(variable.name, 'variable_name_updated')
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_metadata_on_logical_file_delete(self):
# test that when the NetCDFLogicalFile instance is deleted
# all metadata associated with it also get deleted
self.create_composite_resource(self.netcdf_file)
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test that we have one logical file of type NetCDFLogicalFile as a result
# of metadata extraction
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# test that we have the metadata elements
# there should be 4 Coverage objects - 2 at the resource level and
# the other 2 at the file type level
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(logical_file.metadata.coverages.all().count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertNotEqual(logical_file.metadata.originalCoverage, None)
self.assertEqual(Variable.objects.count(), 5)
self.assertEqual(logical_file.metadata.variables.all().count(), 5)
# delete the logical file
logical_file.logical_delete(self.user)
# test that we have no logical file of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata deleted - there should be 2 resource level coverages
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_remove_aggregation(self):
# test that when an instance NetCDFLogicalFile Type (aggregation) is deleted
# all resource files associated with that aggregation is not deleted but the associated
# metadata is deleted
self.create_composite_resource(self.netcdf_file)
nc_res_file = self.composite_resource.files.first()
base_file_name, _ = os.path.splitext(nc_res_file.file_name)
expected_folder_name = nc_res_file.file_folder
# set the nc file to NetCDFLogicalFile aggregation
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test that we have one logical file (aggregation) of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
logical_file = NetCDFLogicalFile.objects.first()
self.assertEqual(logical_file.files.all().count(), 2)
self.assertEqual(self.composite_resource.files.all().count(), 2)
self.assertEqual(set(self.composite_resource.files.all()),
set(logical_file.files.all()))
# delete the aggregation (logical file) object using the remove_aggregation function
# this should delete the system generated txt file when the netcdf logical file was created
logical_file.remove_aggregation()
# test there is no NetCDFLogicalFile object
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# test there is no NetCDFFileMetaData object
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# check the files associated with the aggregation not deleted
self.assertEqual(self.composite_resource.files.all().count(), 1)
# check the file folder is not deleted
nc_file = self.composite_resource.files.first()
self.assertTrue(nc_file.file_name.endswith('.nc'))
self.assertEqual(nc_file.file_folder, expected_folder_name)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_metadata_on_resource_delete(self):
# test that when the composite resource is deleted
# all metadata associated with NetCDFLogicalFile Type is deleted
self.create_composite_resource(self.netcdf_file)
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test that we have one logical file of type NetCDFLogicalFile as a result
# of metadata extraction
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
# test that we have the metadata elements
# there should be 4 Coverage objects - 2 at the resource level and
# the other 2 at the file type level
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(Variable.objects.count(), 5)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
# delete resource
hydroshare.delete_resource(self.composite_resource.short_id)
# test that we have no logical file of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata deleted
self.assertEqual(Coverage.objects.count(), 0)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
def test_aggregation_metadata_on_file_delete(self):
# test that when any resource file that is part of a NetCDFLogicalFile is deleted
# all metadata associated with NetCDFLogicalFile is deleted
# test for both .nc and .txt delete
# test with deleting of 'nc' file
self._test_file_metadata_on_file_delete(ext='.nc')
# test with deleting of 'txt' file
self._test_file_metadata_on_file_delete(ext='.txt')
def test_aggregation_folder_delete(self):
# when a file is set to NetCDFLogicalFile type
# system automatically creates folder using the name of the file
# that was used to set the file type
# Here we need to test that when that folder gets deleted, all files
# in that folder gets deleted, the logicalfile object gets deleted and
# the associated metadata objects get deleted
self.create_composite_resource()
new_folder = 'nc_folder'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
base_file_name, _ = os.path.splitext(nc_res_file.file_name)
expected_folder_name = nc_res_file.file_folder
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test that we have one logical file of type NetCDFLogicalFile as a result
# of metadata extraction
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
# should have one NetCDFFileMetadata object
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
# there should be 2 content files
self.assertEqual(self.composite_resource.files.count(), 2)
# test that there are metadata associated with the logical file
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(Variable.objects.count(), 5)
# delete the folder for the logical file
folder_path = "data/contents/{}".format(expected_folder_name)
remove_folder(self.user, self.composite_resource.short_id, folder_path)
# there should no content files
self.assertEqual(self.composite_resource.files.count(), 0)
# there should not be any netCDF logical file or metadata file
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata associated with the logical file got deleted - there still be
# 2 resource level coverages
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_file_rename(self):
# test that a file can't renamed for any resource file
# that's part of the NetCDF logical file
self.create_composite_resource()
self.add_file_to_resource(file_to_add=self.netcdf_file)
res_file = self.composite_resource.files.first()
expected_folder_path = res_file.file_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test renaming of files that are associated with aggregation raises exception
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
base_file_name, ext = os.path.splitext(res_file.file_name)
self.assertEqual(res_file.file_folder, expected_folder_path)
if expected_folder_path is not None:
src_path = 'data/contents/{0}/{1}'.format(expected_folder_path, res_file.file_name)
else:
src_path = 'data/contents/{}'.format(res_file.file_name)
new_file_name = 'some_netcdf.{}'.format(ext)
self.assertNotEqual(res_file.file_name, new_file_name)
if expected_folder_path is not None:
tgt_path = 'data/contents/{}/{}'.format(expected_folder_path, new_file_name)
else:
tgt_path = 'data/contents/{}'.format(new_file_name)
with self.assertRaises(DRF_ValidationError):
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_file_move(self):
# test any resource file that's part of the NetCDF logical file can't be moved
self.create_composite_resource()
self.add_file_to_resource(file_to_add=self.netcdf_file)
nc_res_file = self.composite_resource.files.first()
# create the aggregation using the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test renaming of files that are associated with raster LFO - which should raise exception
self.assertEqual(self.composite_resource.files.count(), 2)
res_file = self.composite_resource.files.first()
expected_folder_path = nc_res_file.file_folder
self.assertEqual(res_file.file_folder, expected_folder_path)
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# moving any of the resource files to this new folder should raise exception
tgt_path = 'data/contents/{}'.format(new_folder)
for res_file in self.composite_resource.files.all():
with self.assertRaises(DRF_ValidationError):
src_path = os.path.join('data', 'contents', res_file.short_path)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_rename(self):
# test changes to aggregation name, aggregation metadata xml file path, and aggregation
# resource map xml file path on folder (that contains netcdf aggregation) name change
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
expected_folder_path = nc_res_file.file_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, expected_folder_path)
# test aggregation name
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
# test renaming folder
src_path = 'data/contents/{}'.format(expected_folder_path)
tgt_path = 'data/contents/{}_1'.format(expected_folder_path)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, '{}_1'.format(expected_folder_path))
# test aggregation name update
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
nc_res_file.refresh_from_db()
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_parent_folder_rename(self):
# test changes to aggregation name, aggregation metadata xml file path, and aggregation
# resource map xml file path on aggregation folder parent folder name change
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
aggregation_folder_name = new_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# test renaming of files that are associated with aggregation raises exception
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, aggregation_folder_name)
# test aggregation name
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
# create a folder to be the parent folder of the aggregation folder
parent_folder = 'parent_folder'
ResourceFile.create_folder(self.composite_resource, parent_folder)
# move the aggregation folder to the parent folder
src_path = 'data/contents/{}'.format(aggregation_folder_name)
tgt_path = 'data/contents/{0}/{1}'.format(parent_folder, aggregation_folder_name)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
file_folder = '{}/{}'.format(parent_folder, aggregation_folder_name)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, file_folder)
# renaming parent folder
parent_folder_rename = 'parent_folder_1'
src_path = 'data/contents/{}'.format(parent_folder)
tgt_path = 'data/contents/{}'.format(parent_folder_rename)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
file_folder = '{}/{}'.format(parent_folder_rename, aggregation_folder_name)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, file_folder)
# test aggregation name after folder rename
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
nc_res_file.refresh_from_db()
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths after folder rename
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_move_1(self):
# test changes to aggregation name, aggregation metadata xml file path, and aggregation
# resource map xml file path on moving a folder that contains netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
aggregation_folder_name = nc_res_file.file_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
self.assertEqual(self.composite_resource.files.count(), 2)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, aggregation_folder_name)
# create a folder to move the aggregation folder there
parent_folder = 'parent_folder'
ResourceFile.create_folder(self.composite_resource, parent_folder)
# move the aggregation folder to the parent folder
src_path = 'data/contents/{}'.format(aggregation_folder_name)
tgt_path = 'data/contents/{0}/{1}'.format(parent_folder, aggregation_folder_name)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
file_folder = '{0}/{1}'.format(parent_folder, aggregation_folder_name)
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.file_folder, file_folder)
# test aggregation name update
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
nc_res_file.refresh_from_db()
self.assertEqual(logical_file.aggregation_name, nc_res_file.short_path)
# test aggregation xml file paths
nc_file_path = get_path_with_no_file_extension(nc_res_file.short_path)
expected_meta_file_path = '{0}{1}'.format(nc_file_path, METADATA_FILE_ENDSWITH)
self.assertEqual(logical_file.metadata_short_file_path, expected_meta_file_path)
expected_map_file_path = '{0}{1}'.format(nc_file_path, RESMAP_FILE_ENDSWITH)
self.assertEqual(logical_file.map_short_file_path, expected_map_file_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_move_2(self):
# test a folder can be moved into a folder that contains a netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
aggregation_folder_name = new_folder
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
# create a folder to move into the aggregation folder
folder_to_move = 'folder_to_move'
ResourceFile.create_folder(self.composite_resource, folder_to_move)
# move the folder_to_move into the aggregation folder
src_path = 'data/contents/{}'.format(folder_to_move)
tgt_path = 'data/contents/{}'.format(aggregation_folder_name)
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_aggregation_folder_sub_folder_creation(self):
# test a folder can be created inside a folder that contains a netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
self.assertEqual(nc_res_file.file_folder, new_folder)
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
res_file = self.composite_resource.files.first()
self.assertNotEqual(res_file.file_folder, None)
# create a folder inside the aggregation folder
new_folder = '{}/sub_folder'.format(res_file.file_folder)
ResourceFile.create_folder(self.composite_resource, new_folder)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_file_move_to_aggregation_folder_allowed(self):
# test a file can be moved into a folder that contains a netcdf aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
nc_res_file = self.composite_resource.files.first()
self.assertEqual(nc_res_file.file_folder, new_folder)
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, nc_res_file.id)
res_file = self.composite_resource.files.first()
self.assertNotEqual(res_file.file_folder, '')
# add a file to the resource which will try to move into the aggregation folder
res_file_to_move = self.add_file_to_resource(file_to_add=self.netcdf_invalid_file)
src_path = os.path.join('data', 'contents', res_file_to_move.short_path)
tgt_path = 'data/contents/{}'.format(res_file.file_folder)
# move file to aggregation folder
move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, src_path,
tgt_path)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_upload_file_to_aggregation_folder_allowed(self):
# test no file can be uploaded into a folder that represents an aggregation
self.create_composite_resource()
new_folder = 'netcdf_aggr'
ResourceFile.create_folder(self.composite_resource, new_folder)
# add the the nc file to the resource at the above folder
self.add_file_to_resource(file_to_add=self.netcdf_file, upload_folder=new_folder)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.file_folder, new_folder)
# create aggregation from the nc file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
res_file = self.composite_resource.files.first()
self.assertNotEqual(res_file.file_folder, '')
# add a file to the resource at the aggregation folder
self.add_file_to_resource(file_to_add=self.netcdf_invalid_file,
upload_folder=res_file.file_folder)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def _test_invalid_file(self):
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
# trying to set this invalid tif file to NetCDF file type should raise
# ValidationError
with self.assertRaises(ValidationError):
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
# test that the invalid file did not get deleted
self.assertEqual(self.composite_resource.files.all().count(), 1)
# check that the resource file is not associated with any logical file
self.assertEqual(res_file.has_logical_file, False)
def _test_file_metadata_on_file_delete(self, ext):
self.create_composite_resource(self.netcdf_file)
res_file = self.composite_resource.files.first()
# extract metadata from the tif file
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
# test that we have one logical file of type NetCDFLogicalFile
self.assertEqual(NetCDFLogicalFile.objects.count(), 1)
self.assertEqual(NetCDFFileMetaData.objects.count(), 1)
res_file = self.composite_resource.files.first()
logical_file = res_file.logical_file
# there should be 2 coverage elements - one spatial and the other one temporal
self.assertEqual(logical_file.metadata.coverages.all().count(), 2)
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
self.assertNotEqual(logical_file.metadata.temporal_coverage, None)
# there should be one original coverage
self.assertNotEqual(logical_file.metadata.originalCoverage, None)
# testing extended metadata element: variables
self.assertEqual(logical_file.metadata.variables.all().count(), 5)
# there should be 4 coverage objects - 2 at the resource level
# and the other 2 at the file type level
self.assertEqual(Coverage.objects.count(), 4)
self.assertEqual(OriginalCoverage.objects.count(), 1)
self.assertEqual(Variable.objects.count(), 5)
# delete content file specified by extension (ext parameter)
res_file = hydroshare.utils.get_resource_files_by_extension(
self.composite_resource, ext)[0]
hydroshare.delete_resource_file(self.composite_resource.short_id,
res_file.id,
self.user)
# test that we don't have logical file of type NetCDFLogicalFile Type
self.assertEqual(NetCDFLogicalFile.objects.count(), 0)
self.assertEqual(NetCDFFileMetaData.objects.count(), 0)
# test that all metadata deleted - there should be still 2 resource level coverages
self.assertEqual(self.composite_resource.metadata.coverages.all().count(), 2)
self.assertEqual(Coverage.objects.count(), 2)
self.assertEqual(OriginalCoverage.objects.count(), 0)
self.assertEqual(Variable.objects.count(), 0)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
self.composite_resource.delete()
def test_main_file(self):
self.create_composite_resource(self.netcdf_file)
self.assertEqual(self.composite_resource.files.all().count(), 1)
res_file = self.composite_resource.files.first()
self.assertEqual(res_file.has_logical_file, False)
NetCDFLogicalFile.set_file_type(self.composite_resource, self.user, res_file.id)
self.assertEqual(1, NetCDFLogicalFile.objects.count())
self.assertEqual(".nc", NetCDFLogicalFile.objects.first().get_main_file_type())
self.assertEqual(self.netcdf_file_name,
NetCDFLogicalFile.objects.first().get_main_file.file_name)
self.assertFalse(self.composite_resource.dangling_aggregations_exist())
|
|
__author__ = "Martin Pilat"
import sys
import time
import joblib
import pprint
import os
import ml_metrics as mm
import numpy as np
import pandas as pd
from sklearn import cross_validation, preprocessing, decomposition, feature_selection, metrics
import networkx as nx
import custom_models
import utils
import inspect
from sklearn.base import ClassifierMixin, RegressorMixin
cache_dir = 'cache'
if os.path.exists('/media/ramdisk'):
cache_dir = '/media/ramdisk/cache'
print('Using ramdisk')
memory = joblib.Memory(cachedir=cache_dir, verbose=False)
@memory.cache
def fit_model(model, values, targets, sample_weight=None):
if isinstance(model, ClassifierMixin) or isinstance(model, RegressorMixin) or isinstance(model, custom_models.KMeansSplitter):
if 'sample_weight' in inspect.signature(model.fit).parameters:
return model.fit(values, targets, sample_weight=sample_weight)
return model.fit(values, targets)
def data_ready(req, cache):
"""
Checks that all required data are in the data_cache
:param req: string or list of string containing the keys of required data in cache
:param cache: dictionary with the computed data
:return: Boolean indicating whether all required data are in cache
"""
if not isinstance(req, list):
req = [req]
return all([r in cache for r in req])
def get_data(data_list, data_cache):
"""
Gets the data specified by the keys in the data_list from the data_cache
:param data_list: string or list of strings
:param data_cache: dictionary containing the stored data
:return: a single pandas.DataFrame if input is a string, a list of DataFrames if the input is a list of strings
"""
if not isinstance(data_list, list):
data_list = [data_list]
tmp = [data_cache[d] for d in data_list]
if len(tmp) == 1:
return tmp[0]
res = ([t[0] for t in tmp], [t[1] for t in tmp])
return res
def append_all(data_frames):
if not isinstance(data_frames, list):
return data_frames
res = data_frames[0]
for i in range(1, len(data_frames)):
res.append(data_frames[i])
return res
def train_dag(dag, train_data, sample_weight=None):
models = dict()
data_cache = dict()
if isinstance(train_data[0], np.ndarray) and isinstance(train_data[1], np.ndarray): # happens inside booster
train_data = (pd.DataFrame(train_data[0]), pd.Series(train_data[1]))
data_cache[dag['input'][2]] = train_data
models['input'] = True
unfinished_models = lambda: [m for m in dag if m not in models]
data_available = lambda: [m for m in dag if data_ready(dag[m][0], data_cache)]
next_methods = lambda: [m for m in unfinished_models() if m in data_available()]
while next_methods():
for m in next_methods():
# print("Processing:", m)
# obtain the data
features, targets, *rest = get_data(dag[m][0], data_cache)
if rest:
sample_weight = rest[0]
ModelClass, model_params = utils.get_model_by_name(dag[m][1])
out_name = dag[m][2]
if dag[m][1][0] == 'stacker':
sub_dags, initial_dag, input_data = extract_subgraphs(dag, m)
model_params = dict(sub_dags=sub_dags, initial_dag=initial_dag)
model = ModelClass(**model_params)
features, targets = data_cache[input_data]
elif isinstance(out_name, list):
model = ModelClass(len(out_name), **model_params)
else:
if isinstance(ModelClass(), feature_selection.SelectKBest):
if 'feat_frac' not in model_params:
model_params['feat_frac'] = 1.0
model_params = model_params.copy()
model_params['k'] = max(1, int(model_params['feat_frac']*(features.shape[1]-1)))
del model_params['feat_frac']
if isinstance(ModelClass(), decomposition.PCA):
if 'feat_frac' not in model_params:
model_params['feat_frac'] = 1.0
model_params = model_params.copy()
model_params['n_components'] = max(1, int(model_params['feat_frac']*(features.shape[1]-1)))
del model_params['feat_frac']
model = ModelClass(**model_params)
# build the model
# some models cannot handle cases with only one class, we also need to check we are not working with a list
# of inputs for an aggregator
if custom_models.is_predictor(model) and isinstance(targets, pd.Series) and len(targets.unique()) == 1:
model = custom_models.ConstantModel(targets.iloc[0])
models[m] = fit_model(model, features, targets, sample_weight=sample_weight)
model = models[m] # needed to update model if the result was cached
# use the model to process the data
if isinstance(model, custom_models.Stacker):
data_cache[out_name] = model.train, targets.ix[model.train.index]
continue
if isinstance(model, custom_models.Aggregator):
data_cache[out_name] = model.aggregate(features, targets)
continue
if custom_models.is_transformer(model):
trans = model.transform(features)
else: # this is a classifier not a preprocessor
trans = features # the data do not change
if isinstance(features, pd.DataFrame):
targets = pd.Series(list(model.predict(features)), index=features.index)
else: # this should happen only inside booster
targets = pd.Series(list(model.predict(features)))
# save the outputs
if isinstance(trans, list): # the previous model divided the data into several data-sets
if isinstance(model, custom_models.KMeansSplitter) and sample_weight is not None:
trans = [(x, targets.loc[x.index], sample_weight[model.weight_idx[i]]) for i, x in enumerate(trans)] # need to divide the targets and the weights
else:
trans = [(x, targets.loc[x.index]) for x in trans] # need to divide the targets
for i in range(len(trans)):
data_cache[out_name[i]] = trans[i] # save all the data to the cache
else:
if isinstance(features, pd.DataFrame):
trans = pd.DataFrame(trans, index=features.index) # we have only one output, can be numpy array
else:
trans = pd.DataFrame(trans)
trans.dropna(axis='columns', how='all', inplace=True)
data_cache[out_name] = (trans, targets) # save it
return models
def test_dag(dag, models, test_data, output='preds_only'):
data_cache = dict()
finished = dict()
if isinstance(test_data[0], np.ndarray):
test_data = (pd.DataFrame(test_data[0]), test_data[1])
if isinstance(test_data[1], np.ndarray):
test_data = (test_data[0], pd.Series(test_data[1], index=test_data[0].index))
data_cache[dag['input'][2]] = test_data
finished['input'] = True
unfinished_models = lambda: [m for m in dag if m not in finished]
data_available = lambda: [m for m in dag if data_ready(dag[m][0], data_cache)]
next_methods = lambda: [m for m in unfinished_models() if m in data_available()]
while next_methods():
for m in next_methods():
# obtain the data
features, targets = get_data(dag[m][0], data_cache)
model = models[m]
out_name = dag[m][2]
if isinstance(features, pd.DataFrame) and features.empty: # we got empty dataset (after same division)
if isinstance(out_name, list): # and we should divide it further
for o in out_name:
data_cache[o] = (features, targets)
else:
data_cache[out_name] = (features, targets)
finished[m] = True
continue
# use the model to process the data
if isinstance(model, custom_models.Aggregator):
data_cache[out_name] = model.aggregate(features, targets)
finished[m] = True
continue
elif custom_models.is_transformer(model):
trans = model.transform(features)
targets = pd.Series(targets, index=features.index)
else: # this is a classifier not a preprocessor
trans = features # the data do not change
if isinstance(features, pd.DataFrame):
targets = pd.Series(list(model.predict(features)), index=features.index)
else:
targets = pd.Series(list(model.predict(features)))
# save the outputs
if isinstance(trans, list): # the previous model divided the data into several data-sets
trans = [(x, targets.loc[x.index]) for x in trans] # need to divide the targets
for i in range(len(trans)):
data_cache[out_name[i]] = trans[i] # save all the data to the cache
else:
if isinstance(features, pd.DataFrame):
trans = pd.DataFrame(trans, index=features.index) # we have only one output, can be numpy array
else:
trans = pd.DataFrame(trans)
trans.dropna(axis='columns', how='all', inplace=True)
data_cache[out_name] = (trans, targets) # save it
finished[m] = True
if output == 'all':
return data_cache['output']
if output == 'preds_only':
return data_cache['output'][1]
if output == 'feats_only':
return data_cache['output'][0]
raise AttributeError(output, 'is not a valid output type')
def normalize_spec(spec):
ins, mod, outs = spec
if len(ins) == 1:
ins = ins[0]
if len(outs) == 1:
outs = outs[0]
if len(outs) == 0:
outs = 'output'
return ins, mod, outs
def extract_subgraphs(dag, node):
out = []
dag_nx = utils.dag_to_nx(dag)
reverse_dag_nx = dag_nx.reverse()
for p in dag_nx.predecessors(node):
out.append({k: v for k, v in dag.items() if k in list(nx.dfs_preorder_nodes(reverse_dag_nx, p))})
common_nodes = [n for n in out[0] if all((n in o for o in out))]
toposort = list(nx.topological_sort(dag_nx))
sorted_common = sorted(common_nodes, key=lambda k: -toposort.index(k))
inputs = np.unique([dag[n][0] for n in dag_nx.successors(sorted_common[0]) if any([n in o for o in out])])
assert len(inputs) == 1
input_id = inputs[0]
remove_common = sorted_common
nout = []
for o in out:
no = dict()
no['input'] = ([], 'input', input_id)
for k, v in o.items():
if k in remove_common:
continue
ins = v[2]
if not isinstance(ins, list):
ins = [ins]
if ins[0] in dag[node][0]:
no[k] = v[0], v[1], 'output'
continue
no[k] = v
nout.append(no)
initial_dag = {k: v for k, v in dag.items() if k in common_nodes}
for k, v in initial_dag.items():
if isinstance(v[2], list) and input_id in v[2]:
initial_dag[k] = (v[0], v[1], [x if x != input_id else 'output' for x in v[2]])
break
if v[2] == input_id:
initial_dag[k] = (v[0], v[1], 'output')
return nout, initial_dag, input_id
def normalize_dag(dag):
dag = process_boosters(dag)
normalized_dag = {k: normalize_spec(v) for (k, v) in dag.items()}
original_len = len(normalized_dag)
aliases = {normalized_dag[k][0]: normalized_dag[k][2] for k in normalized_dag if normalized_dag[k][1][0] == "copy"}
normalized_dag = {k: v for (k, v) in normalized_dag.items() if v[1][0] != 'copy'}
new_len = len(normalized_dag)
rev_aliases = {v: k for k in aliases for v in aliases[k]}
for i in range(original_len-new_len):
normalized_dag = {k: ((rev_aliases[ins] if not isinstance(ins, list) and ins in rev_aliases else ins), mod, out)
for (k, (ins, mod, out)) in normalized_dag.items()}
return normalized_dag
def process_boosters(dag):
dag_nx = utils.dag_to_nx(dag)
processed_dag = dict()
sub_dags = []
for k, spec in dag.items():
if spec[1][0] == 'booBegin':
input_name = spec[0]
for node in nx.dfs_preorder_nodes(dag_nx, k):
node_type = dag[node][1][0]
if node == k:
continue
if node_type == 'booster':
sub_dags.append(dag[node][1][2])
if node_type == 'booEnd':
sub_dags = [normalize_dag(sd) for sd in sub_dags]
processed_dag[k] = (input_name, ['booster', {'sub_dags': sub_dags}], dag[node][2])
sub_dags = []
break
elif spec[1][0] in ['booster', 'booEnd']:
continue
else:
processed_dag[k] = spec
return processed_dag
input_cache = {}
def eval_dag(dag, filename, dag_id=None):
dag = normalize_dag(dag)
# utils.draw_dag(dag)
# pprint.pprint(dag)
if filename not in input_cache:
input_cache[filename] = pd.read_csv('data/'+filename, sep=';')
data = input_cache[filename]
feats = data[data.columns[:-1]]
targets = data[data.columns[-1]]
le = preprocessing.LabelEncoder()
ix = targets.index
targets = pd.Series(le.fit_transform(targets), index=ix)
errors = []
start_time = time.time()
for train_idx, test_idx in cross_validation.StratifiedKFold(targets, n_folds=5):
train_data = (feats.iloc[train_idx], targets.iloc[train_idx])
test_data = (feats.iloc[test_idx], targets.iloc[test_idx])
ms = train_dag(dag, train_data)
preds = test_dag(dag, ms, test_data)
acc = mm.quadratic_weighted_kappa(test_data[1], preds)
if filename == 'ml-prove.csv':
acc = metrics.accuracy_score(test_data[1], preds)
errors.append(acc)
m_errors = float(np.mean(errors))
s_errors = float(np.std(errors))
return m_errors, s_errors, time.time() - start_time
def safe_dag_eval(dag, filename, dag_id=None):
import traceback
import json
try:
return eval_dag(dag, filename, dag_id), dag_id
except Exception as e:
with open('error.'+str(dag_id), 'w') as err:
err.write(str(e)+'\n')
for line in traceback.format_tb(e.__traceback__):
err.write(line)
err.write(json.dumps(dag))
return (), dag_id
if __name__ == '__main__':
datafile = "ml-prove.csv"
dags = utils.read_json('test_err.json')
results = dict()
remaining_dags = [d for d in enumerate(dags) if str(d[0]) not in results]
print("Starting...", len(remaining_dags))
pprint.pprint(remaining_dags)
for e in map(lambda x: safe_dag_eval(x[1], datafile, x[0]), remaining_dags):
results[str(e[1])] = e
print(e)
print("Model %4d: Cross-validation error: %.5f (+-%.5f)" % (e[1], e[0][0], e[0][1]))
sys.stdout.flush()
print("-"*80)
best_error = sorted(results.values(), key=lambda x: x[0][0]-2*x[0][1], reverse=True)[0]
print("Best model CV error: %.5f (+-%.5f)" % (best_error[0][0], best_error[0][1]))
import pprint
print("Model: ", end='')
pprint.pprint(dags[best_error[1]])
|
|
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import re
import types
from optparse import OptionValueError
from subprocess import PIPE
from time import sleep
from tempfile import mkstemp
sys.path.insert(0, os.path.abspath(os.path.realpath(
os.path.dirname(sys.argv[0]))))
from automation import Automation
from runtests import Mochitest, MochitestOptions
class VMwareOptions(MochitestOptions):
def __init__(self, automation, mochitest, **kwargs):
defaults = {}
MochitestOptions.__init__(self, automation, mochitest.SCRIPT_DIRECTORY)
def checkPathCallback(option, opt_str, value, parser):
path = mochitest.getFullPath(value)
if not os.path.exists(path):
raise OptionValueError("Path %s does not exist for %s option"
% (path, opt_str))
setattr(parser.values, option.dest, path)
self.add_option("--with-vmware-vm",
action = "callback", type = "string", dest = "vmx",
callback = checkPathCallback,
help = "launches the given VM and runs mochitests inside")
defaults["vmx"] = None
self.add_option("--with-vmrun-executable",
action = "callback", type = "string", dest = "vmrun",
callback = checkPathCallback,
help = "specifies the vmrun.exe to use for VMware control")
defaults["vmrun"] = None
self.add_option("--shutdown-vm-when-done",
action = "store_true", dest = "shutdownVM",
help = "shuts down the VM when mochitests complete")
defaults["shutdownVM"] = False
self.add_option("--repeat-until-failure",
action = "store_true", dest = "repeatUntilFailure",
help = "Runs tests continuously until failure")
defaults["repeatUntilFailure"] = False
self.set_defaults(**defaults)
class VMwareMochitest(Mochitest):
_pathFixRegEx = re.compile(r'^[cC](\:[\\\/]+)')
def convertHostPathsToGuestPaths(self, string):
""" converts a path on the host machine to a path on the guest machine """
# XXXbent Lame!
return self._pathFixRegEx.sub(r'z\1', string)
def prepareGuestArguments(self, parser, options):
""" returns an array of command line arguments needed to replicate the
current set of options in the guest """
args = []
for key in options.__dict__.keys():
# Don't send these args to the vm test runner!
if key == "vmrun" or key == "vmx" or key == "repeatUntilFailure":
continue
value = options.__dict__[key]
valueType = type(value)
# Find the option in the parser's list.
option = None
for index in range(len(parser.option_list)):
if str(parser.option_list[index].dest) == key:
option = parser.option_list[index]
break
if not option:
continue
# No need to pass args on the command line if they're just going to set
# default values. The exception is list values... For some reason the
# option parser modifies the defaults as well as the values when using the
# "append" action.
if value == parser.defaults[option.dest]:
if valueType == types.StringType and \
value == self.convertHostPathsToGuestPaths(value):
continue
if valueType != types.ListType:
continue
def getArgString(arg, option):
if option.action == "store_true" or option.action == "store_false":
return str(option)
return "%s=%s" % (str(option),
self.convertHostPathsToGuestPaths(str(arg)))
if valueType == types.ListType:
# Expand lists into separate args.
for item in value:
args.append(getArgString(item, option))
else:
args.append(getArgString(value, option))
return tuple(args)
def launchVM(self, options):
""" launches the VM and enables shared folders """
# Launch VM first.
self.automation.log.info("INFO | runtests.py | Launching the VM.")
(result, stdout) = self.runVMCommand(self.vmrunargs + ("start", self.vmx))
if result:
return result
# Make sure that shared folders are enabled.
self.automation.log.info("INFO | runtests.py | Enabling shared folders in "
"the VM.")
(result, stdout) = self.runVMCommand(self.vmrunargs + \
("enableSharedFolders", self.vmx))
if result:
return result
def shutdownVM(self):
""" shuts down the VM """
self.automation.log.info("INFO | runtests.py | Shutting down the VM.")
command = self.vmrunargs + ("runProgramInGuest", self.vmx,
"c:\\windows\\system32\\shutdown.exe", "/s", "/t", "1")
(result, stdout) = self.runVMCommand(command)
return result
def runVMCommand(self, command, expectedErrors=[], silent=False):
""" runs a command in the VM using the vmrun.exe helper """
commandString = ""
for part in command:
commandString += str(part) + " "
if not silent:
self.automation.log.info("INFO | runtests.py | Running command: %s"
% commandString)
commonErrors = ["Error: Invalid user name or password for the guest OS",
"Unable to connect to host."]
expectedErrors.extend(commonErrors)
# VMware can't run commands until the VM has fully loaded so keep running
# this command in a loop until it succeeds or we try 100 times.
errorString = ""
for i in range(100):
process = Automation.Process(command, stdout=PIPE)
result = process.wait()
if result == 0:
break
for line in process.stdout.readlines():
line = line.strip()
if not line:
continue
errorString = line
break
expected = False
for error in expectedErrors:
if errorString.startswith(error):
expected = True
if not expected:
self.automation.log.warning("WARNING | runtests.py | Command \"%s\" "
"failed with result %d, : %s"
% (commandString, result, errorString))
break
if not silent:
self.automation.log.info("INFO | runtests.py | Running command again.")
return (result, process.stdout.readlines())
def monitorVMExecution(self, appname, logfilepath):
""" monitors test execution in the VM. Waits for the test process to start,
then watches the log file for test failures and checks the status of the
process to catch crashes. Returns True if mochitests ran successfully.
"""
success = True
self.automation.log.info("INFO | runtests.py | Waiting for test process to "
"start.")
listProcessesCommand = self.vmrunargs + ("listProcessesInGuest", self.vmx)
expectedErrors = [ "Error: The virtual machine is not powered on" ]
running = False
for i in range(100):
(result, stdout) = self.runVMCommand(listProcessesCommand, expectedErrors,
silent=True)
if result:
self.automation.log.warning("WARNING | runtests.py | Failed to get "
"list of processes in VM!")
return False
for line in stdout:
line = line.strip()
if line.find(appname) != -1:
running = True
break
if running:
break
sleep(1)
self.automation.log.info("INFO | runtests.py | Found test process, "
"monitoring log.")
completed = False
nextLine = 0
while running:
log = open(logfilepath, "rb")
lines = log.readlines()
if len(lines) > nextLine:
linesToPrint = lines[nextLine:]
for line in linesToPrint:
line = line.strip()
if line.find("INFO SimpleTest FINISHED") != -1:
completed = True
continue
if line.find("ERROR TEST-UNEXPECTED-FAIL") != -1:
self.automation.log.info("INFO | runtests.py | Detected test "
"failure: \"%s\"" % line)
success = False
nextLine = len(lines)
log.close()
(result, stdout) = self.runVMCommand(listProcessesCommand, expectedErrors,
silent=True)
if result:
self.automation.log.warning("WARNING | runtests.py | Failed to get "
"list of processes in VM!")
return False
stillRunning = False
for line in stdout:
line = line.strip()
if line.find(appname) != -1:
stillRunning = True
break
if stillRunning:
sleep(5)
else:
if not completed:
self.automation.log.info("INFO | runtests.py | Test process exited "
"without finishing tests, maybe crashed.")
success = False
running = stillRunning
return success
def getCurentSnapshotList(self):
""" gets a list of snapshots from the VM """
(result, stdout) = self.runVMCommand(self.vmrunargs + ("listSnapshots",
self.vmx))
snapshots = []
if result != 0:
self.automation.log.warning("WARNING | runtests.py | Failed to get list "
"of snapshots in VM!")
return snapshots
for line in stdout:
if line.startswith("Total snapshots:"):
continue
snapshots.append(line.strip())
return snapshots
def runTests(self, parser, options):
""" runs mochitests in the VM """
# Base args that must always be passed to vmrun.
self.vmrunargs = (options.vmrun, "-T", "ws", "-gu", "Replay", "-gp",
"mozilla")
self.vmrun = options.vmrun
self.vmx = options.vmx
result = self.launchVM(options)
if result:
return result
if options.vmwareRecording:
snapshots = self.getCurentSnapshotList()
def innerRun():
""" subset of the function that must run every time if we're running until
failure """
# Make a new shared file for the log file.
(logfile, logfilepath) = mkstemp(suffix=".log")
os.close(logfile)
# Get args to pass to VM process. Make sure we autorun and autoclose.
options.autorun = True
options.closeWhenDone = True
options.logFile = logfilepath
self.automation.log.info("INFO | runtests.py | Determining guest "
"arguments.")
runtestsArgs = self.prepareGuestArguments(parser, options)
runtestsPath = self.convertHostPathsToGuestPaths(self.SCRIPT_DIRECTORY)
runtestsPath = os.path.join(runtestsPath, "runtests.py")
runtestsCommand = self.vmrunargs + ("runProgramInGuest", self.vmx,
"-activeWindow", "-interactive", "-noWait",
"c:\\mozilla-build\\python25\\python.exe",
runtestsPath) + runtestsArgs
expectedErrors = [ "Unable to connect to host.",
"Error: The virtual machine is not powered on" ]
self.automation.log.info("INFO | runtests.py | Launching guest test "
"runner.")
(result, stdout) = self.runVMCommand(runtestsCommand, expectedErrors)
if result:
return (result, False)
self.automation.log.info("INFO | runtests.py | Waiting for guest test "
"runner to complete.")
mochitestsSucceeded = self.monitorVMExecution(
os.path.basename(options.app), logfilepath)
if mochitestsSucceeded:
self.automation.log.info("INFO | runtests.py | Guest tests passed!")
else:
self.automation.log.info("INFO | runtests.py | Guest tests failed.")
if mochitestsSucceeded and options.vmwareRecording:
newSnapshots = self.getCurentSnapshotList()
if len(newSnapshots) > len(snapshots):
self.automation.log.info("INFO | runtests.py | Removing last "
"recording.")
(result, stdout) = self.runVMCommand(self.vmrunargs + \
("deleteSnapshot", self.vmx,
newSnapshots[-1]))
self.automation.log.info("INFO | runtests.py | Removing guest log file.")
for i in range(30):
try:
os.remove(logfilepath)
break
except:
sleep(1)
self.automation.log.warning("WARNING | runtests.py | Couldn't remove "
"guest log file, trying again.")
return (result, mochitestsSucceeded)
if options.repeatUntilFailure:
succeeded = True
result = 0
count = 1
while result == 0 and succeeded:
self.automation.log.info("INFO | runtests.py | Beginning mochitest run "
"(%d)." % count)
count += 1
(result, succeeded) = innerRun()
else:
self.automation.log.info("INFO | runtests.py | Beginning mochitest run.")
(result, succeeded) = innerRun()
if not succeeded and options.vmwareRecording:
newSnapshots = self.getCurentSnapshotList()
if len(newSnapshots) > len(snapshots):
self.automation.log.info("INFO | runtests.py | Failed recording saved "
"as '%s'." % newSnapshots[-1])
if result:
return result
if options.shutdownVM:
result = self.shutdownVM()
if result:
return result
return 0
def main():
automation = Automation()
mochitest = VMwareMochitest(automation)
parser = VMwareOptions(automation, mochitest)
options, args = parser.parse_args()
options = parser.verifyOptions(options, mochitest)
if (options == None):
sys.exit(1)
if options.vmx is None:
parser.error("A virtual machine must be specified with " +
"--with-vmware-vm")
if options.vmrun is None:
options.vmrun = os.path.join("c:\\", "Program Files", "VMware",
"VMware VIX", "vmrun.exe")
if not os.path.exists(options.vmrun):
options.vmrun = os.path.join("c:\\", "Program Files (x86)", "VMware",
"VMware VIX", "vmrun.exe")
if not os.path.exists(options.vmrun):
parser.error("Could not locate vmrun.exe, use --with-vmrun-executable" +
" to identify its location")
sys.exit(mochitest.runTests(parser, options))
if __name__ == "__main__":
main()
|
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Piano Genie continuous eval script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
from magenta.models.piano_genie import gold
from magenta.models.piano_genie.configs import get_named_config
from magenta.models.piano_genie.loader import load_noteseqs
from magenta.models.piano_genie.model import build_genie_model
import numpy as np
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset_fp", "./data/valid*.tfrecord",
"Path to dataset containing TFRecords of NoteSequences.")
flags.DEFINE_string("train_dir", "", "The directory for this experiment.")
flags.DEFINE_string("eval_dir", "", "The directory for evaluation output.")
flags.DEFINE_string("model_cfg", "stp_iq_auto", "Hyperparameter configuration.")
flags.DEFINE_string("model_cfg_overrides", "",
"E.g. rnn_nlayers=4,rnn_nunits=256")
flags.DEFINE_string("ckpt_fp", None,
"If specified, only evaluate a single checkpoint.")
def main(unused_argv):
if not tf.gfile.IsDirectory(FLAGS.eval_dir):
tf.gfile.MakeDirs(FLAGS.eval_dir)
cfg, _ = get_named_config(FLAGS.model_cfg, FLAGS.model_cfg_overrides)
# Load data
with tf.name_scope("loader"):
feat_dict = load_noteseqs(
FLAGS.dataset_fp,
cfg.eval_batch_size,
cfg.eval_seq_len,
max_discrete_times=cfg.data_max_discrete_times,
max_discrete_velocities=cfg.data_max_discrete_velocities,
augment_stretch_bounds=None,
augment_transpose_bounds=None,
randomize_chord_order=cfg.data_randomize_chord_order,
repeat=False)
# Build model
with tf.variable_scope("phero_model"):
model_dict = build_genie_model(
feat_dict,
cfg,
cfg.eval_batch_size,
cfg.eval_seq_len,
is_training=False)
genie_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="phero_model")
# Build gold model
eval_gold = False
if cfg.stp_emb_vq or cfg.stp_emb_iq:
eval_gold = True
with tf.variable_scope("phero_model", reuse=True):
gold_feat_dict = {
"midi_pitches": tf.placeholder(tf.int32, [1, None]),
"velocities": tf.placeholder(tf.int32, [1, None]),
"delta_times_int": tf.placeholder(tf.int32, [1, None])
}
gold_seq_maxlen = gold.gold_longest()
gold_seq_varlens = tf.placeholder(tf.int32, [1])
gold_buttons = tf.placeholder(tf.int32, [1, None])
gold_model_dict = build_genie_model(
gold_feat_dict,
cfg,
1,
gold_seq_maxlen,
is_training=False,
seq_varlens=gold_seq_varlens)
gold_encodings = gold_model_dict[
"stp_emb_vq_discrete" if cfg.stp_emb_vq else "stp_emb_iq_discrete"]
gold_mask = tf.sequence_mask(
gold_seq_varlens, maxlen=gold_seq_maxlen, dtype=tf.float32)
gold_diff = tf.cast(gold_buttons, tf.float32) - tf.cast(
gold_encodings, tf.float32)
gold_diff_l2 = tf.square(gold_diff)
gold_diff_l1 = tf.abs(gold_diff)
weighted_avg = lambda t, m: tf.reduce_sum(t * m) / tf.reduce_sum(m)
gold_diff_l2 = weighted_avg(gold_diff_l2, gold_mask)
gold_diff_l1 = weighted_avg(gold_diff_l1, gold_mask)
gold_diff_l2_placeholder = tf.placeholder(tf.float32, [None])
gold_diff_l1_placeholder = tf.placeholder(tf.float32, [None])
summary_name_to_batch_tensor = {}
# Summarize quantized step embeddings
if cfg.stp_emb_vq:
summary_name_to_batch_tensor["codebook_perplexity"] = model_dict[
"stp_emb_vq_codebook_ppl"]
summary_name_to_batch_tensor["loss_vqvae"] = model_dict["stp_emb_vq_loss"]
# Summarize integer-quantized step embeddings
if cfg.stp_emb_iq:
summary_name_to_batch_tensor["discrete_perplexity"] = model_dict[
"stp_emb_iq_discrete_ppl"]
summary_name_to_batch_tensor["iq_valid_p"] = model_dict[
"stp_emb_iq_valid_p"]
summary_name_to_batch_tensor["loss_iq_range"] = model_dict[
"stp_emb_iq_range_penalty"]
summary_name_to_batch_tensor["loss_iq_contour"] = model_dict[
"stp_emb_iq_contour_penalty"]
summary_name_to_batch_tensor["loss_iq_deviate"] = model_dict[
"stp_emb_iq_deviate_penalty"]
if cfg.stp_emb_vq or cfg.stp_emb_iq:
summary_name_to_batch_tensor["contour_violation"] = model_dict[
"contour_violation"]
summary_name_to_batch_tensor["deviate_violation"] = model_dict[
"deviate_violation"]
# Summarize VAE sequence embeddings
if cfg.seq_emb_vae:
summary_name_to_batch_tensor["loss_kl"] = model_dict["seq_emb_vae_kl"]
# Reconstruction loss
summary_name_to_batch_tensor["loss_recons"] = model_dict["dec_recons_loss"]
summary_name_to_batch_tensor["ppl_recons"] = tf.exp(
model_dict["dec_recons_loss"])
if cfg.dec_pred_velocity:
summary_name_to_batch_tensor["loss_recons_velocity"] = model_dict[
"dec_recons_velocity_loss"]
summary_name_to_batch_tensor["ppl_recons_velocity"] = tf.exp(
model_dict["dec_recons_velocity_loss"])
# Create dataset summaries
summaries = []
summary_name_to_placeholder = {}
for name in summary_name_to_batch_tensor:
placeholder = tf.placeholder(tf.float32, [None])
summary_name_to_placeholder[name] = placeholder
summaries.append(tf.summary.scalar(name, tf.reduce_mean(placeholder)))
if eval_gold:
summary_name_to_placeholder["gold_diff_l2"] = gold_diff_l2_placeholder
summaries.append(
tf.summary.scalar("gold_diff_l2",
tf.reduce_mean(gold_diff_l2_placeholder)))
summary_name_to_placeholder["gold_diff_l1"] = gold_diff_l1_placeholder
summaries.append(
tf.summary.scalar("gold_diff_l1",
tf.reduce_mean(gold_diff_l1_placeholder)))
summaries = tf.summary.merge(summaries)
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)
# Create saver
step = tf.train.get_or_create_global_step()
saver = tf.train.Saver(genie_vars + [step], max_to_keep=None)
def _eval_all(sess):
"""Gathers all metrics for a ckpt."""
summaries = collections.defaultdict(list)
if eval_gold:
for midi_notes, buttons, seq_varlen in gold.gold_iterator([-6, 6]):
gold_diff_l1_seq, gold_diff_l2_seq = sess.run(
[gold_diff_l1, gold_diff_l2], {
gold_feat_dict["midi_pitches"]:
midi_notes,
gold_feat_dict["delta_times_int"]:
np.ones_like(midi_notes) * 8,
gold_seq_varlens: [seq_varlen],
gold_buttons: buttons
})
summaries["gold_diff_l1"].append(gold_diff_l1_seq)
summaries["gold_diff_l2"].append(gold_diff_l2_seq)
while True:
try:
batches = sess.run(summary_name_to_batch_tensor)
except tf.errors.OutOfRangeError:
break
for name, scalar in batches.items():
summaries[name].append(scalar)
return summaries
# Eval
if FLAGS.ckpt_fp is None:
ckpt_fp = None
while True:
latest_ckpt_fp = tf.train.latest_checkpoint(FLAGS.train_dir)
if latest_ckpt_fp != ckpt_fp:
print("Eval: {}".format(latest_ckpt_fp))
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
saver.restore(sess, latest_ckpt_fp)
ckpt_summaries = _eval_all(sess)
ckpt_summaries, ckpt_step = sess.run(
[summaries, step],
feed_dict={
summary_name_to_placeholder[n]: v
for n, v in ckpt_summaries.items()
})
summary_writer.add_summary(ckpt_summaries, ckpt_step)
saver.save(
sess, os.path.join(FLAGS.eval_dir, "ckpt"), global_step=ckpt_step)
print("Done")
ckpt_fp = latest_ckpt_fp
time.sleep(1)
else:
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
saver.restore(sess, FLAGS.ckpt_fp)
ckpt_summaries = _eval_all(sess)
ckpt_step = sess.run(step)
print("-" * 80)
print("Ckpt: {}".format(FLAGS.ckpt_fp))
print("Step: {}".format(ckpt_step))
for n, l in sorted(ckpt_summaries.items(), key=lambda x: x[0]):
print("{}: {}".format(n, np.mean(l)))
if __name__ == "__main__":
tf.app.run()
|
|
<<<<<<< HEAD
<<<<<<< HEAD
from test.support import TESTFN
import unittest
from test import audiotests
from audioop import byteswap
import sys
import sunau
class SunauTest(audiotests.AudioWriteTests,
audiotests.AudioTestsWithSourceFile):
module = sunau
class SunauPCM8Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm8.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 1
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
02FF 4B00 3104 8008 CB06 4803 BF01 03FE B8FA B4F3 29EB 1AE6 \
EDE4 C6E2 0EE0 EFE0 57E2 FBE8 13EF D8F7 97FB F5FC 08FB DFFB \
11FA 3EFB BCFC 66FF CF04 4309 C10E 5112 EE17 8216 7F14 8012 \
490E 520D EF0F CE0F E40C 630A 080A 2B0B 510E 8B11 B60E 440A \
""")
class SunauPCM16Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm16.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022EFFEA 4B5C00F9 311404EF 80DB0844 CBE006B0 48AB03F3 BFE601B5 0367FE80 \
B853FA42 B4AFF351 2997EBCD 1A5AE6DC EDF9E492 C627E277 0E06E0B7 EF29E029 \
5759E271 FB34E83F 1377EF85 D82CF727 978EFB79 F5F7FC12 0864FB9E DF30FB40 \
1183FA30 3EEAFB59 BC78FCB4 66D5FF60 CF130415 431A097D C1BA0EC7 512312A0 \
EEE11754 82071666 7FFE1448 80001298 49990EB7 52B40DC1 EFAD0F65 CE3A0FBE \
E4B70CE6 63490A57 08CC0A1D 2BBC0B09 51480E46 8BCB113C B6F60EE9 44150A5A \
""")
class SunauPCM24Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm24.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 3
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
""")
class SunauPCM32Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm32.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 4
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
""")
class SunauULAWTest(SunauTest, unittest.TestCase):
sndfilename = 'pluck-ulaw.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'ULAW'
compname = 'CCITT G.711 u-law'
frames = bytes.fromhex("""\
022CFFE8 497C00F4 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \
B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \
557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \
11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C09BC C1840EBC 517C12FC \
EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \
E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 2)
if __name__ == "__main__":
unittest.main()
=======
from test.support import TESTFN
import unittest
from test import audiotests
from audioop import byteswap
import sys
import sunau
class SunauTest(audiotests.AudioWriteTests,
audiotests.AudioTestsWithSourceFile):
module = sunau
class SunauPCM8Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm8.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 1
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
02FF 4B00 3104 8008 CB06 4803 BF01 03FE B8FA B4F3 29EB 1AE6 \
EDE4 C6E2 0EE0 EFE0 57E2 FBE8 13EF D8F7 97FB F5FC 08FB DFFB \
11FA 3EFB BCFC 66FF CF04 4309 C10E 5112 EE17 8216 7F14 8012 \
490E 520D EF0F CE0F E40C 630A 080A 2B0B 510E 8B11 B60E 440A \
""")
class SunauPCM16Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm16.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022EFFEA 4B5C00F9 311404EF 80DB0844 CBE006B0 48AB03F3 BFE601B5 0367FE80 \
B853FA42 B4AFF351 2997EBCD 1A5AE6DC EDF9E492 C627E277 0E06E0B7 EF29E029 \
5759E271 FB34E83F 1377EF85 D82CF727 978EFB79 F5F7FC12 0864FB9E DF30FB40 \
1183FA30 3EEAFB59 BC78FCB4 66D5FF60 CF130415 431A097D C1BA0EC7 512312A0 \
EEE11754 82071666 7FFE1448 80001298 49990EB7 52B40DC1 EFAD0F65 CE3A0FBE \
E4B70CE6 63490A57 08CC0A1D 2BBC0B09 51480E46 8BCB113C B6F60EE9 44150A5A \
""")
class SunauPCM24Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm24.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 3
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
""")
class SunauPCM32Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm32.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 4
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
""")
class SunauULAWTest(SunauTest, unittest.TestCase):
sndfilename = 'pluck-ulaw.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'ULAW'
compname = 'CCITT G.711 u-law'
frames = bytes.fromhex("""\
022CFFE8 497C00F4 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \
B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \
557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \
11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C09BC C1840EBC 517C12FC \
EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \
E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 2)
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from test.support import TESTFN
import unittest
from test import audiotests
from audioop import byteswap
import sys
import sunau
class SunauTest(audiotests.AudioWriteTests,
audiotests.AudioTestsWithSourceFile):
module = sunau
class SunauPCM8Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm8.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 1
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
02FF 4B00 3104 8008 CB06 4803 BF01 03FE B8FA B4F3 29EB 1AE6 \
EDE4 C6E2 0EE0 EFE0 57E2 FBE8 13EF D8F7 97FB F5FC 08FB DFFB \
11FA 3EFB BCFC 66FF CF04 4309 C10E 5112 EE17 8216 7F14 8012 \
490E 520D EF0F CE0F E40C 630A 080A 2B0B 510E 8B11 B60E 440A \
""")
class SunauPCM16Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm16.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022EFFEA 4B5C00F9 311404EF 80DB0844 CBE006B0 48AB03F3 BFE601B5 0367FE80 \
B853FA42 B4AFF351 2997EBCD 1A5AE6DC EDF9E492 C627E277 0E06E0B7 EF29E029 \
5759E271 FB34E83F 1377EF85 D82CF727 978EFB79 F5F7FC12 0864FB9E DF30FB40 \
1183FA30 3EEAFB59 BC78FCB4 66D5FF60 CF130415 431A097D C1BA0EC7 512312A0 \
EEE11754 82071666 7FFE1448 80001298 49990EB7 52B40DC1 EFAD0F65 CE3A0FBE \
E4B70CE6 63490A57 08CC0A1D 2BBC0B09 51480E46 8BCB113C B6F60EE9 44150A5A \
""")
class SunauPCM24Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm24.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 3
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
""")
class SunauPCM32Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm32.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 4
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
""")
class SunauULAWTest(SunauTest, unittest.TestCase):
sndfilename = 'pluck-ulaw.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'ULAW'
compname = 'CCITT G.711 u-law'
frames = bytes.fromhex("""\
022CFFE8 497C00F4 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \
B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \
557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \
11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C09BC C1840EBC 517C12FC \
EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \
E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 2)
if __name__ == "__main__":
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
'''
Created on 17.03.2016
@author: stefan
'''
import unittest
from lenatu import tools
import lenatu
import ast
class TestScope(unittest.TestCase):
def setUp(self):
self.cache = {}
def test_module_global(self):
src = """
x = 1
"""
self.assertSame(src,
".defined_block",
".**{Name}.id_block")
def test_implicit_local(self):
src = """
def f():
x = 1
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{Name}.id_block")
def test_implicit_global(self):
src = """
def f():
x + 1
"""
self.assertSame(src,
".defined_block",
".**{Name}.id_block")
@tools.version("3.0+")
def test_parameter_arg(self):
src = """
def f(x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{arg}.arg_block")
@tools.version("2.0+")
def test_parameter_P2(self):
src = """
def f(x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{id=x}.id_block")
@tools.version("2.0+")
def test_vararg_P2(self):
src = """
def f(*x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{vararg=x}.vararg_block")
@tools.version("3.0 3.1 3.2 3.3")
def test_vararg_P30(self):
src = """
def f(*x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{arguments}.vararg_block")
@tools.version("3.4+")
def test_vararg_P34(self):
src = """
def f(*x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{arg=x}.arg_block")
@tools.version("2.0+")
def test_kwarg_P2(self):
src = """
def f(**x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{kwarg=x}.kwarg_block")
@tools.version("3.0 3.1 3.2 3.3")
def test_kwarg_P30(self):
src = """
def f(**x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{arguments}.kwarg_block")
@tools.version("3.4+")
def test_kwarg_P34(self):
src = """
def f(**x):
pass
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{arg=x}.arg_block")
def test_default(self):
src = """
def foo(x=y):
pass
"""
self.assertSame(src,
".defined_block",
".**{id=y}.id_block")
@tools.version("3.0+")
def test_arg_annotation(self):
src = """
def foo(x:y):
pass
"""
self.assertSame(src,
".defined_block",
".**{arg}.annotation.id_block")
def test_implicit_closure(self):
src = """
def f():
x = 1
def g():
x + 1
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{name=g}.**{id=x}.id_block")
@tools.version("3.0+")
def test_explict_closure(self):
src = """
def f():
x = 1
def g():
nonlocal x
x = 2
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{name=g}.**{id=x}.id_block")
def test_local_hides_closure(self):
src = """
def f():
x = 1
def g():
x = 2
"""
self.assertSame(src,
".**{name=g}.defined_block",
".**{name=g}.**{id=x}.id_block")
def test_explicit_global_closure(self):
src = """
def f():
x = 1
def g():
global x
x + 1
"""
self.assertSame(src,
".defined_block",
".**{name=g}.**{id=x}.id_block")
def test_class(self):
src = """
class f():
pass
"""
self.assertSame(src,
".defined_block",
".**{ClassDef}.name_block")
def test_class_member(self):
src = """
class f():
x = 1
"""
self.assertSame(src,
".**{ClassDef}.defined_block",
".**{id=x}.id_block")
def test_class_uses_closure(self):
src = """
def f(x):
class g():
y = x + 1
"""
self.assertSame(src,
".**{FunctionDef}.defined_block",
".**{ClassDef}.**{id=x}.id_block")
def test_class_members_no_closure(self):
src = """
class f():
x = 1
def g():
y = x + 1
"""
self.assertSame(src,
".defined_block",
".**{name=g}.**{id=x}.id_block")
def test_class_bypassed(self):
src = """
def f():
x = 1
class g():
x = 2
def h():
print(x)
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{name=h}.**{id=x}.id_block")
def test_import(self):
src = """
def f():
import x
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{alias}.name_block")
def test_import_as(self):
src = """
def f():
import x as y
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{alias}.asname_block")
def test_except(self):
src = """
def f():
try:
pass
except ValueError as e:
pass
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{ExceptHandler}.name_block")
@tools.version("3.0+")
def test_except_nonlocal(self):
src = """
def f():
nonlocal e
try:
pass
except ValueError as e:
pass
"""
self.assertSame(src,
".defined_block",
".**{ExceptHandler}.name_block")
def test_generator_element(self):
src = """
def f():
(x for x in y)
"""
self.assertSame(src,
".**{GeneratorExp}.defined_block",
".**{GeneratorExp}.elt.id_block")
def test_generator_iterable(self):
src = """
def f(y):
(x for x in y)
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{GeneratorExp}.generators.**{id=y}.id_block")
def test_with(self):
src = """
def f():
with x as y:
pass
"""
self.assertSame(src,
".**{name=f}.defined_block",
".**{id=y}.id_block")
self.assertSame(src,
".defined_block",
".**{id=x}.id_block")
def get(self, src, path):
node = self.parse(src)
return tools.npath(node, path)
def assertSame(self, src, path_a, path_b):
node = self.parse(src)
a = tools.npath(node, path_a)
b = tools.npath(node, path_b)
self.assertIs(a, b)
def assertNotSame(self, src, path_a, path_b):
node = self.parse(src)
a = tools.npath(node, path_a)
b = tools.npath(node, path_b)
self.assertIsNot(a, b)
def parse(self, src):
if src not in self.cache:
src = tools.unindent(src)
node = ast.parse(src)
lenatu.augment(node)
self.cache[src] = node
else:
node = self.cache[src]
return node
|
|
import single_robot_behavior
import behavior
import constants
import robocup
import main
from enum import Enum
import math
import planning_priority
## The Defender behavior positions a robot on a certain area of the field and defends it
class Defender(single_robot_behavior.SingleRobotBehavior):
class State(Enum):
## gets between a particular opponent and the goal. stays closer to the goal
marking = 1
## chilling out in a zone waiting to mark an opponent. doesn't do this much
area_marking = 2
## The area of the field this robot should block
class Side(Enum):
left = 1
center = 2
right = 3
def __init__(self, side=Side.center):
super().__init__(continuous=True)
self._block_robot = None
self._area = None
self._side = side
self._opponent_avoid_threshold = 2.0
self._defend_goal_radius = 0.9
self._win_eval = robocup.WindowEvaluator(main.context())
self._area = robocup.Rect(
robocup.Point(-constants.Field.Width / 2.0,
constants.Field.Length),
robocup.Point(constants.Field.Width / 2.0, 0))
if self._side is Defender.Side.right:
self._area.get_pt(0).x = 0
if self._side is Defender.Side.left:
self._area.get_pt(1).x = 0
self.add_state(Defender.State.marking, behavior.Behavior.State.running)
self.add_state(Defender.State.area_marking,
behavior.Behavior.State.running)
self.add_transition(behavior.Behavior.State.start,
Defender.State.marking, lambda: True,
"immediately")
self.add_transition(
Defender.State.marking, Defender.State.area_marking,
lambda: not self._area.contains_point(main.ball().pos) and self.block_robot is None,
"if ball not in area and no robot to block")
self.add_transition(
Defender.State.area_marking, Defender.State.marking,
lambda: self._area.contains_point(main.ball().pos) or self.find_robot_to_block() is not None,
"if ball or opponent enters my area")
def execute_running(self):
self.robot.set_planning_priority(planning_priority.DEFENDER)
def execute_marking(self):
#main.debug_drawer().draw_line(robocup.Line(self._area.get_pt(0), self._area.get_pt(1)), (127,0,255), "Defender")
self.block_robot = self.find_robot_to_block()
if self.block_robot is not None:
# self.robot.add_text("Blocking Robot " + str(self.block_robot.shell_id()), (255,255,255), "RobotText")
pass
if self.robot.pos.near_point(
robocup.Point(0, 0), self._opponent_avoid_threshold):
self.robot.set_avoid_opponents(False)
else:
self.robot.set_avoid_opponents(True)
target = None
if self.block_robot is None:
target = main.ball().pos + main.ball().vel * 0.3
else:
target = self.block_robot.pos + self.block_robot.vel * 0.3
goal_line = robocup.Segment(
robocup.Point(-constants.Field.GoalWidth / 2.0, 0),
robocup.Point(constants.Field.GoalWidth / 2.0, 0))
self._win_eval.excluded_robots = [self.robot]
# TODO defenders should register themselves with some static list on init
# TODO make this happen in python-land
# for (Defender *f : otherDefenders)
# {
# if (f->robot)
# {
# _winEval.exclude.push_back(f->robot->pos);
# }
# }
windows = self._win_eval.eval_pt_to_seg(target, goal_line)[0]
best = None
goalie = main.our_robot_with_id(main.root_play().goalie_id)
if goalie is not None and self.side is not Defender.Side.center:
for window in windows:
if best is None:
best = window
elif self.side is Defender.Side.left and window.segment.center.x < goalie.pos.x and window.segment.length > best.segment.length:
best = window
elif self.side is Defender.Side.right and window.segment.center.x > goalie.pos.x and window.segment.length > best.segment.length:
best = window
else:
best_dist = 0
for window in windows:
seg = robocup.Segment(window.segment.center(), main.ball().pos)
new_dist = seg.dist_to(self.robot.pos)
if best is None or new_dist < best_dist:
best = window
best_dist = new_dist
shoot_seg = None
if best is not None:
if self.block_robot is not None:
dirvec = robocup.Point.direction(self.block_robot.angle *
(math.pi / 180.0))
shoot_seg = robocup.Segment(
self.block_robot.pos, self.block_robot.pos + dirvec * 7.0)
else:
shoot_seg = robocup.Segment(
main.ball().pos,
main.ball().pos + main.ball().vel.normalized() * 7.0)
need_task = False
if best is not None:
winseg = best.segment
if main.ball().vel.magsq() > 0.03 and winseg.segment_intersection(
shoot_seg) != None:
self.robot.move_to(shoot_seg.nearest_point(self.robot.pos))
self.robot.face_none()
else:
winsize = winseg.length()
if winsize < constants.Ball.Radius:
need_task = True
else:
arc = robocup.Circle(
robocup.Point(0, 0), self._defend_goal_radius)
shot = robocup.Line(winseg.center(), target)
dest = [robocup.Point(0, 0), robocup.Point(0, 0)]
intersected, dest[0], dest[1] = shot.intersects_circle(arc)
if intersected:
self.robot.move_to(dest[0] if dest[0].y > 0 else dest[
1])
if self.block_robot is not None:
self.robot.face(self.block_robot.pos)
else:
self.robot.face(main.ball().pos)
else:
need_task = True
if need_task:
self.robot.face(main.ball().pos)
backVec = robocup.Point(1, 0)
backPos = robocup.Point(-constants.Field.Width / 2, 0)
shotVec = robocup.Point(main.ball().pos - self.robot.pos)
backVecRot = robocup.Point(backVec.perp_ccw())
facing_back_line = (backVecRot.dot(shotVec) < 0)
if not facing_back_line and self.robot.has_ball():
if self.robot.has_chipper():
self.robot.chip(1)
else:
self.robot.kick(1)
"""
TODO comment
"""
def execute_area_marking(self):
if self.robot.pos.near_point(
robocup.Point(0, 0), self._opponent_avoid_threshold):
self.robot.set_avoid_opponents(False)
else:
self.robot.set_avoid_opponents(True)
goal_target = robocup.Point(0, -constants.Field.GoalDepth / 2.0)
goal_line = robocup.Segment(
robocup.Point(-constants.Field.GoalWidth / 2.0, 0),
robocup.Point(constants.Field.GoalWidth / 2.0, 0))
if self.side is Defender.Side.left:
goal_line.get_pt(1).x = 0
goal_line.get_pt(1).y = 0
if self.side is Defender.Side.right:
goal_line.get_pt(0).x = 0
goal_line.get_pt(0).y = 0
for robot in main.system_state().their_robots:
self._win_eval.excluded_robots.append(robot)
if main.root_play().goalie_id is not None:
self._win_eval.excluded_robots.append(main.our_robot_with_id(
main.root_play().goalie_id))
# TODO (cpp line 186)
# windows = self._win_eval.
windows = []
best = None
angle = 0.0
for window in windows:
if best is None:
best = window
angle = window.a0 - window.a1
elif window.a0 - window.a1 > angle:
best = window
angle = window.a0 - window.a1
shootline = robocup.Segment(robocup.Point(0, 0), robocup.Point(0, 0))
if best is not None:
angle = (best.a0 + best.a1) / 2.0
shootline = robocup.Segment(
self._win_eval.origin(), robocup.Point.direction(angle * (
math.pi / 180.0))) # FIXME :no origin.
main.debug_drawer().draw_line(shootline, (255, 0, 0), "Defender")
need_task = False
if best is not None:
winseg = best.segment
arc = robocup.Circle(robocup.Point(0, 0), self._defend_goal_radius)
shot = robocup.Line(shootline.get_pt(0), shootline.get_pt(1))
dest = [robocup.Point(0, 0), robocup.Point(0, 0)]
intersected, dest[0], dest[1] = shot.intersects_circle(arc)
if intersected:
self.robot.move_to(dest[0] if dest[0].y > 0 else dest[1])
else:
need_task = True
if need_task:
self.robot.face(main.ball().pos)
if main.ball().pos.y < constants.Field.Length / 2.0:
self.robot.set_dribble_speed(255)
backVec = robocup.Point(1, 0)
backPos = robocup.Point(-constants.Field.Width / 2, 0)
shotVec = robocup.Point(main.ball().pos - self.robot.pos)
backVecRot = robocup.Point(backVec.perp_ccw())
facing_back_line = (backVecRot.dot(shotVec) < 0)
if not facing_back_line and self.robot.has_ball():
if self.robot.has_chipper():
self.robot.chip(1)
else:
self.robot.kick(1)
def find_robot_to_block(self):
target = None
for robot in main.system_state().their_robots:
if robot.visible and self._area.contains_point(robot.pos):
if target is None or target.pos.dist_to(main.ball(
).pos) > robot.pos.dist_to(main.ball().pos):
target = robot
return target
@property
def block_robot(self):
return self._block_robot
@block_robot.setter
def block_robot(self, value):
self._block_robot = value
@property
def side(self):
return self._side
@side.setter
def side(self, value):
self._side = value
self._area = robocup.Rect(
robocup.Point(-constants.Field.Width / 2.0,
constants.Field.Length),
robocup.Point(constants.Field.Width / 2.0, 0))
if self._side is Defender.Side.right:
self._area.get_pt(0).x = 0
if self._side is Defender.Side.left:
self._area.get_pt(1).x = 0
|
|
import unittest
class FakeRecord(object):
def __init__(self, msg, extra={}):
self.msg = msg
for k, v in extra.items():
setattr(self, k, v)
class StatzHandlerTests(unittest.TestCase):
def cls(self):
from statzlogger import StatzHandler as cls
return cls
def init(self, *args, **kwargs):
return self.cls()(*args, **kwargs)
def test_getindices_index(self):
obj = self.init()
record = FakeRecord("foo", extra=dict(
index="index",
))
indices = obj.getindices(record)
self.assertEqual(indices, ["index"])
def test_getindices_indices_tuple(self):
obj = self.init()
record = FakeRecord("foo", extra=dict(
indices=("index1", "index2"),
))
indices = obj.getindices(record)
self.assertEqual(indices, ["index1", "index2"])
def test_getindices_both(self):
obj = self.init()
record = FakeRecord("foo", extra=dict(
index="index",
indices=("index1", "index2"),
))
indices = obj.getindices(record)
self.assertEqual(indices, ["index1", "index2", "index"])
def test_getindices_none(self):
obj = self.init()
record = FakeRecord("record")
indices = obj.getindices(record)
self.assertEqual(indices, [None])
def test_getvalue_value(self):
obj = self.init()
record = FakeRecord("wrongvalue",
extra=dict(value="rightvalue"))
value = obj.getvalue(record)
self.assertEqual(value, "rightvalue")
def test_getvalue_str(self):
obj = self.init()
record = FakeRecord("value")
value = obj.getvalue(record)
self.assertEqual(value, "value")
def test_getvalue_int(self):
obj = self.init()
record = FakeRecord(1)
value = obj.getvalue(record)
self.assertEqual(value, 1)
def test_getvalue_seq(self):
obj = self.init()
record = FakeRecord((1,2,3))
value = obj.getvalue(record)
self.assertEqual(value, (1,2,3))
def test_emitvalue(self):
obj = self.init()
obj.emitvalue("value", "index")
self.assertEqual(obj.indices["index"], "value")
obj.emitvalue("otherval", "index")
self.assertEqual(obj.indices["index"], "otherval")
class SumTests(unittest.TestCase):
def cls(self):
from statzlogger import Sum as cls
return cls
def init(self, *args, **kwargs):
return self.cls()(*args, **kwargs)
def test_emitvalue(self):
obj = self.init()
obj.emitvalue(1, "index")
self.assertEqual(obj.indices, {"index": 1})
obj.emitvalue(2, "index")
self.assertEqual(obj.indices, {"index": 3})
def test_emitvalue_with_default(self):
obj = self.init(default=10)
obj.emitvalue(1, "index")
self.assertEqual(obj.indices, {"index": 11})
obj.emitvalue(1, "index")
self.assertEqual(obj.indices, {"index": 12})
class CollectionTests(unittest.TestCase):
def cls(self):
from statzlogger import Collection as cls
return cls
def init(self, *args, **kwargs):
return self.cls()(*args, **kwargs)
def test_getvalue_str(self):
obj = self.init()
record = FakeRecord("value")
value = obj.getvalue(record)
self.assertEqual(value, ["value"])
def test_getvalue_int(self):
obj = self.init()
record = FakeRecord(1)
value = obj.getvalue(record)
self.assertEqual(value, [1])
def test_getvalue_seq(self):
obj = self.init()
record = FakeRecord((1,2,3))
value = obj.getvalue(record)
self.assertEqual(value, [(1,2,3)])
def test_emitvalue(self):
obj = self.init()
obj.emitvalue([1], "index")
self.assertEqual(obj.indices, {"index": [1]})
obj.emitvalue([1], "index")
self.assertEqual(obj.indices, {"index": [1, 1]})
class MaximumTests(unittest.TestCase):
def cls(self):
from statzlogger import Maximum as cls
return cls
def init(self, *args, **kwargs):
return self.cls()(*args, **kwargs)
def test_getvalue_str(self):
obj = self.init()
record = FakeRecord("value")
value = obj.getvalue(record)
self.assertEqual(value, [("value", 1)])
def test_getvalue_int(self):
obj = self.init()
record = FakeRecord(1)
value = obj.getvalue(record)
self.assertEqual(value, [(1, 1)])
def test_getvalue_seq(self):
obj = self.init()
record = FakeRecord((1,2,3))
value = obj.getvalue(record)
self.assertEqual(value, [((1,2,3), 1)])
def test_getvalue_weighted(self):
obj = self.init()
record = FakeRecord("value", extra=dict(weight=10))
value = obj.getvalue(record)
self.assertEqual(value, [("value", 10)])
def test_emitvalue(self):
obj = self.init()
obj.emitvalue([("value", 1)], "index")
self.assertEqual(obj.indices["index"], [("value", 1)])
obj.emitvalue([("value", 2)], "index")
self.assertEqual(len(obj.indices), 1)
self.assertEqual(obj.indices["index"][0], ("value", 2))
def test_emitvalue_size(self):
obj = self.init(size=3)
for i in range(5):
obj.emitvalue([("value%d" % i, i)], "index")
self.assertEqual(len(obj.indices["index"]), 3)
self.assertEqual(obj.indices["index"][0], ("value4", 4))
self.assertEqual(obj.indices["index"][-1], ("value2", 2))
class MinimumTests(unittest.TestCase):
def cls(self):
from statzlogger import Minimum as cls
return cls
def init(self, *args, **kwargs):
return self.cls()(*args, **kwargs)
def test_emitvalue(self):
obj = self.init()
obj.emitvalue([("value1", 1)], "index")
self.assertEqual(obj.indices["index"], [("value1", 1)])
obj.emitvalue([("value2", 2)], "index")
self.assertEqual(len(obj.indices["index"]), 2)
self.assertEqual(obj.indices["index"][0], ("value1", 1))
class SetTests(unittest.TestCase):
def cls(self):
from statzlogger import Set as cls
return cls
def init(self, *args, **kwargs):
return self.cls()(*args, **kwargs)
def test_getvalue_str(self):
obj = self.init()
record = FakeRecord("value")
value = obj.getvalue(record)
self.assertEqual(value, set(["value"]))
def test_getvalue_int(self):
obj = self.init()
record = FakeRecord(1)
value = obj.getvalue(record)
self.assertEqual(value, set([1]))
def test_getvalue_seq(self):
obj = self.init()
record = FakeRecord(["one", "two"])
value = obj.getvalue(record)
self.assertEqual(value, set(("one", "two")))
def test_emitvalue(self):
obj = self.init()
obj.emitvalue(["value"], "index")
self.assertEqual(obj.indices["index"], set(["value"]))
obj.emitvalue(["value"], "index")
self.assertEqual(obj.indices["index"], set(["value"]))
def test_emitvalue_size(self):
obj = self.init(size=1)
obj.emitvalue(["value"], "index")
self.assertEqual(obj.indices["index"], set(["value"]))
obj.emitvalue(["value"], "index")
self.assertTrue(len(obj.indices), 0)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenType-related data."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
from nototools import unicode_data
OMPL = {}
def _set_ompl():
"""Set up OMPL.
OMPL is defined to be the list of mirrored pairs in Unicode 5.1:
http://www.microsoft.com/typography/otspec/ttochap1.htm#ltrrtl
"""
global OMPL
unicode_data.load_data()
bmg_data = unicode_data._bidi_mirroring_glyph_data
OMPL = {char:bmg for (char, bmg) in bmg_data.items()
if float(unicode_data.age(char)) <= 5.1}
ZWSP = [0x200B]
JOINERS = [0x200C, 0x200D]
BIDI_MARKS = [0x200E, 0x200F]
DOTTED_CIRCLE = [0x25CC]
# From the various script-specific specs at
# http://www.microsoft.com/typography/SpecificationsOverview.mspx
SPECIAL_CHARACTERS_NEEDED = {
'Arab': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Beng': ZWSP + JOINERS + DOTTED_CIRCLE,
'Bugi': ZWSP + JOINERS + DOTTED_CIRCLE,
'Deva': ZWSP + JOINERS + DOTTED_CIRCLE,
'Gujr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Guru': ZWSP + JOINERS + DOTTED_CIRCLE,
# Hangul may not need the special characters:
# https://code.google.com/p/noto/issues/detail?id=147#c2
# 'Hang': ZWSP + JOINERS,
'Hebr': BIDI_MARKS + DOTTED_CIRCLE,
'Java': ZWSP + JOINERS + DOTTED_CIRCLE,
'Khmr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Knda': ZWSP + JOINERS + DOTTED_CIRCLE,
'Laoo': ZWSP + DOTTED_CIRCLE,
'Mlym': ZWSP + JOINERS + DOTTED_CIRCLE,
'Mymr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Orya': ZWSP + JOINERS + DOTTED_CIRCLE,
'Sinh': ZWSP + JOINERS + DOTTED_CIRCLE,
'Syrc': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Taml': ZWSP + JOINERS + DOTTED_CIRCLE,
'Telu': ZWSP + JOINERS + DOTTED_CIRCLE,
'Thaa': BIDI_MARKS + DOTTED_CIRCLE,
'Thai': ZWSP + DOTTED_CIRCLE,
'Tibt': ZWSP + JOINERS + DOTTED_CIRCLE,
}
# www.microsoft.com/typography/otspec/os2.html#ur
# bit, block name, block range
_unicoderange_data = """0\tBasic Latin\t0000-007F
1\tLatin-1 Supplement\t0080-00FF
2\tLatin Extended-A\t0100-017F
3\tLatin Extended-B\t0180-024F
4\tIPA Extensions\t0250-02AF
\tPhonetic Extensions\t1D00-1D7F
\tPhonetic Extensions Supplement\t1D80-1DBF
5\tSpacing Modifier Letters\t02B0-02FF
\tModifier Tone Letters\tA700-A71F
6\tCombining Diacritical Marks\t0300-036F
\tCombining Diacritical Marks Supplement\t1DC0-1DFF
7\tGreek and Coptic\t0370-03FF
8\tCoptic\t2C80-2CFF
9\tCyrillic\t0400-04FF
\tCyrillic Supplement\t0500-052F
\tCyrillic Extended-A\t2DE0-2DFF
\tCyrillic Extended-B\tA640-A69F
10\tArmenian\t0530-058F
11\tHebrew\t0590-05FF
12\tVai\tA500-A63F
13\tArabic\t0600-06FF
\tArabic Supplement\t0750-077F
14\tNKo\t07C0-07FF
15\tDevanagari\t0900-097F
16\tBengali\t0980-09FF
17\tGurmukhi\t0A00-0A7F
18\tGujarati\t0A80-0AFF
19\tOriya\t0B00-0B7F
20\tTamil\t0B80-0BFF
21\tTelugu\t0C00-0C7F
22\tKannada\t0C80-0CFF
23\tMalayalam\t0D00-0D7F
24\tThai\t0E00-0E7F
25\tLao\t0E80-0EFF
26\tGeorgian\t10A0-10FF
\tGeorgian Supplement\t2D00-2D2F
27\tBalinese\t1B00-1B7F
28\tHangul Jamo\t1100-11FF
29\tLatin Extended Additional\t1E00-1EFF
\tLatin Extended-C\t2C60-2C7F
\tLatin Extended-D\tA720-A7FF
30\tGreek Extended\t1F00-1FFF
31\tGeneral Punctuation\t2000-206F
\tSupplemental Punctuation\t2E00-2E7F
32\tSuperscripts And Subscripts\t2070-209F
33\tCurrency Symbols\t20A0-20CF
34\tCombining Diacritical Marks For Symbols\t20D0-20FF
35\tLetterlike Symbols\t2100-214F
36\tNumber Forms\t2150-218F
37\tArrows\t2190-21FF
\tSupplemental Arrows-A\t27F0-27FF
\tSupplemental Arrows-B\t2900-297F
\tMiscellaneous Symbols and Arrows\t2B00-2BFF
38\tMathematical Operators\t2200-22FF
\tSupplemental Mathematical Operators\t2A00-2AFF
\tMiscellaneous Mathematical Symbols-A\t27C0-27EF
\tMiscellaneous Mathematical Symbols-B\t2980-29FF
39\tMiscellaneous Technical\t2300-23FF
40\tControl Pictures\t2400-243F
41\tOptical Character Recognition\t2440-245F
42\tEnclosed Alphanumerics\t2460-24FF
43\tBox Drawing\t2500-257F
44\tBlock Elements\t2580-259F
45\tGeometric Shapes\t25A0-25FF
46\tMiscellaneous Symbols\t2600-26FF
47\tDingbats\t2700-27BF
48\tCJK Symbols And Punctuation\t3000-303F
49\tHiragana\t3040-309F
50\tKatakana\t30A0-30FF
\tKatakana Phonetic Extensions\t31F0-31FF
51\tBopomofo\t3100-312F
\tBopomofo Extended\t31A0-31BF
52\tHangul Compatibility Jamo\t3130-318F
53\tPhags-pa\tA840-A87F
54\tEnclosed CJK Letters And Months\t3200-32FF
55\tCJK Compatibility\t3300-33FF
56\tHangul Syllables\tAC00-D7AF
57\tNon-Plane 0 *\tD800-DFFF
58\tPhoenician\t10900-1091F
59\tCJK Unified Ideographs\t4E00-9FFF
\tCJK Radicals Supplement\t2E80-2EFF
\tKangxi Radicals\t2F00-2FDF
\tIdeographic Description Characters\t2FF0-2FFF
\tCJK Unified Ideographs Extension A\t3400-4DBF
\tCJK Unified Ideographs Extension B\t20000-2A6DF
\tKanbun\t3190-319F
60\tPrivate Use Area (plane 0)\tE000-F8FF
61\tCJK Strokes\t31C0-31EF
\tCJK Compatibility Ideographs\tF900-FAFF
\tCJK Compatibility Ideographs Supplement\t2F800-2FA1F
62\tAlphabetic Presentation Forms\tFB00-FB4F
63\tArabic Presentation Forms-A\tFB50-FDFF
64\tCombining Half Marks\tFE20-FE2F
65\tVertical Forms\tFE10-FE1F
\tCJK Compatibility Forms\tFE30-FE4F
66\tSmall Form Variants\tFE50-FE6F
67\tArabic Presentation Forms-B\tFE70-FEFF
68\tHalfwidth And Fullwidth Forms\tFF00-FFEF
69\tSpecials\tFFF0-FFFF
70\tTibetan\t0F00-0FFF
71\tSyriac\t0700-074F
72\tThaana\t0780-07BF
73\tSinhala\t0D80-0DFF
74\tMyanmar\t1000-109F
75\tEthiopic\t1200-137F
\tEthiopic Supplement\t1380-139F
\tEthiopic Extended\t2D80-2DDF
76\tCherokee\t13A0-13FF
77\tUnified Canadian Aboriginal Syllabics\t1400-167F
78\tOgham\t1680-169F
79\tRunic\t16A0-16FF
80\tKhmer\t1780-17FF
\tKhmer Symbols\t19E0-19FF
81\tMongolian\t1800-18AF
82\tBraille Patterns\t2800-28FF
83\tYi Syllables\tA000-A48F
\tYi Radicals\tA490-A4CF
84\tTagalog\t1700-171F
\tHanunoo\t1720-173F
\tBuhid\t1740-175F
\tTagbanwa\t1760-177F
85\tOld Italic\t10300-1032F
86\tGothic\t10330-1034F
87\tDeseret\t10400-1044F
88\tByzantine Musical Symbols\t1D000-1D0FF
\tMusical Symbols\t1D100-1D1FF
\tAncient Greek Musical Notation\t1D200-1D24F
89\tMathematical Alphanumeric Symbols\t1D400-1D7FF
90\tPrivate Use (plane 15)\tFF000-FFFFD
\tPrivate Use (plane 16)\t100000-10FFFD
91\tVariation Selectors\tFE00-FE0F
\tVariation Selectors Supplement\tE0100-E01EF
92\tTags\tE0000-E007F
93\tLimbu\t1900-194F
94\tTai Le\t1950-197F
95\tNew Tai Lue\t1980-19DF
96\tBuginese\t1A00-1A1F
97\tGlagolitic\t2C00-2C5F
98\tTifinagh\t2D30-2D7F
99\tYijing Hexagram Symbols\t4DC0-4DFF
100\tSyloti Nagri\tA800-A82F
101\tLinear B Syllabary\t10000-1007F
\tLinear B Ideograms\t10080-100FF
\tAegean Numbers\t10100-1013F
102\tAncient Greek Numbers\t10140-1018F
103\tUgaritic\t10380-1039F
104\tOld Persian\t103A0-103DF
105\tShavian\t10450-1047F
106\tOsmanya\t10480-104AF
107\tCypriot Syllabary\t10800-1083F
108\tKharoshthi\t10A00-10A5F
109\tTai Xuan Jing Symbols\t1D300-1D35F
110\tCuneiform\t12000-123FF
\tCuneiform Numbers and Punctuation\t12400-1247F
111\tCounting Rod Numerals\t1D360-1D37F
112\tSundanese\t1B80-1BBF
113\tLepcha\t1C00-1C4F
114\tOl Chiki\t1C50-1C7F
115\tSaurashtra\tA880-A8DF
116\tKayah Li\tA900-A92F
117\tRejang\tA930-A95F
118\tCham\tAA00-AA5F
119\tAncient Symbols\t10190-101CF
120\tPhaistos Disc\t101D0-101FF
121\tCarian\t102A0-102DF
\tLycian\t10280-1029F
\tLydian\t10920-1093F
122\tDomino Tiles\t1F030-1F09F
\tMahjong Tiles\t1F000-1F02F
"""
ur_data = []
ur_bucket_info = [[] for i in range(128)]
def _setup_unicoderange_data():
"""The unicoderange data used in the os/2 table consists of slightly under
128 'buckets', each of which consists of one or more 'ranges' of codepoints.
Each range has a name, start, and end. Bucket 57 is special, it consists of
all non-BMP codepoints and overlaps the other ranges, though in the data it
corresponds to the high and low UTF-16 surrogate code units. The other ranges
are all disjoint.
We build two tables. ur_data is a list of the ranges, consisting of the
start, end, bucket index, and name. It is sorted by range start. ur_bucket_info
is a list of buckets in bucket index order; each entry is a list of the tuples
in ur_data that belong to that bucket.
This is called by functions that require these tables. On first use it builds
ur_data and ur_bucket_info, which should remain unchanged thereafter."""
if ur_data:
return
index = 0
for line in _unicoderange_data.splitlines():
index_str, name, urange = line.split('\t')
range_start_str, range_end_str = urange.split('-')
range_start = int(range_start_str, 16)
range_end = int(range_end_str, 16)
if index_str:
index = int(index_str)
tup = (range_start, range_end, index, name)
ur_data.append(tup)
ur_bucket_info[index].append(tup)
ur_data.sort()
def collect_unicoderange_info(cmap):
"""Return a list of 2-tuples, the first element a count of the characters in a
range, the second element the 4-tuple of information about that range: start,
end, bucket number, and name. Only ranges for which the cmap has a character
are included."""
_setup_unicoderange_data()
range_count = 0
index = 0
limit = len(ur_data)
result = []
for cp in sorted(cmap):
while index < limit:
tup = ur_data[index]
if cp <= tup[1]:
range_count += 1
break
if range_count:
result.append((range_count, ur_data[index]))
range_count = 0
index += 1
if range_count:
result.append((range_count, ur_data[index]))
return result
def unicoderange_bucket_info_name(bucket_info):
return ', '.join(t[3] for t in bucket_info)
def unicoderange_bucket_info_size(bucket_info):
return sum(t[1] - t[0] + 1 for t in bucket_info)
def unicoderange_bucket_index_to_info(bucket_index):
if bucket_index < 0 or bucket_index >= 128:
raise ValueError('bucket_index %s out of range' % bucket_index)
_setup_unicoderange_data()
return ur_bucket_info[bucket_index]
def unicoderange_bucket_index_to_name(bucket_index):
return unicoderange_bucket_info_name(unicoderange_bucket_index_to_info(bucket_index))
if not OMPL:
_set_ompl()
|
|
"""Wrapper for `tlgu` command line utility.
Original software at: ``http://tlgu.carmen.gr/``.
TLGU software written by Dimitri Marinakis and available at
`<http://tlgu.carmen.gr/>`_ under GPLv2 license.
TODO: the arguments to ``convert_corpus()`` need some rationalization, and
``divide_works()`` should be incorporated into it.
"""
__author__ = [
"Kyle P. Johnson <kyle@kyle-p-johnson.com>",
"Stephen Margheim <stephen.margheim@gmail.com>",
]
__license__ = "MIT License. See LICENSE."
import os
import subprocess
from cltk.core.cltk_logger import logger
from cltk.core.exceptions import CLTKException
from cltk.data.fetch import FetchCorpus
from cltk.utils.file_operations import make_cltk_path
from cltk.utils.utils import query_yes_no
# this currently not in use
ARGS = {
"book_breaks": "-b",
"page_breaks": "-p",
"lat_text": "-r",
"level_1": "-v",
"level_2": "-w",
"level_3": "-x",
"level_4": "-y",
"level_5": "-z",
"line_tab": "-B",
"higher_levels": "-X",
"lower_levels": "-Y",
"no_spaces": "-N", # rm_newlines
"citation_debug": "-C",
"code_debug": "-S",
"verbose": "-V",
"split_works": "-W",
}
class TLGU:
"""Check, install, and call TLGU."""
def __init__(self, interactive=True):
"""Check whether tlgu is installed, if not, import and install."""
self.interactive = interactive
self._check_and_download_tlgu_source()
self._check_install()
def _check_and_download_tlgu_source(self):
"""Check if tlgu downloaded, if not download it."""
path = make_cltk_path("grc/software/grc_software_tlgu/tlgu.h")
if not os.path.isfile(path):
dl_msg = f"This part of the CLTK depends upon TLGU, software written by Dimitri Marinakis `<http://tlgu.carmen.gr/>`_."
print(dl_msg)
repo_url = "https://github.com/cltk/grc_software_tlgu.git"
dl_dir = os.path.split(path)[0]
dl_question = (
f"Do you want to download TLGU from '{repo_url}' to '{dl_dir}'?"
)
if self.interactive:
do_download = query_yes_no(question=dl_question)
else:
do_download = True
if do_download:
fetch_corpus = FetchCorpus(language="grc")
fetch_corpus.import_corpus(corpus_name="grc_software_tlgu")
else:
raise CLTKException(f"TLGU software required for this class to work.")
def _check_install(self):
"""Check if tlgu installed, if not install it."""
try:
subprocess.check_output(["which", "tlgu"])
except subprocess.SubprocessError as sub_err:
print("TLGU not installed.")
logger.info("TLGU not installed: %s", sub_err)
logger.info("Installing TLGU.")
if not subprocess.check_output(["which", "gcc"]):
logger.error("GCC seems not to be installed.")
else:
tlgu_path = make_cltk_path("grc/software/grc_software_tlgu")
if self.interactive:
install_question = "Do you want to install TLGU?"
do_install = query_yes_no(question=install_question)
if not do_install:
raise CLTKException(
"TLGU installation required for this class to work."
)
else:
print("Non-interactive installation. Continuing ...")
command = "cd {0} && make install".format(tlgu_path)
print(f"Going to run command: ``{command}``")
try:
p_out = subprocess.call(command, shell=True)
except subprocess.SubprocessError as sub_err:
print(
"Error executing installation. Going to check output of ``subprocess.call()`` ..."
)
raise CLTKException(sub_err)
if p_out == 0:
msg = "TLGU installed."
print(msg)
logger.info(msg)
return True
else:
msg = "TLGU install without sudo failed. Going to try again with sudo (usually required for Linux) ..."
print(msg)
logger.error(msg)
command = "cd {0} && sudo make install".format(tlgu_path)
if self.interactive:
install_question = "Do you want to install TLGU? with sudo?"
do_install = query_yes_no(question=install_question)
if not do_install:
raise CLTKException(
"TLGU installation required for this class to work."
)
p_out = subprocess.call(command, shell=True)
else:
print("Going to run command:", command)
p_out = subprocess.call(command, shell=True)
if p_out == 0:
msg = "TLGU installed."
print(msg)
logger.info(msg)
else:
msg = "TLGU install with sudo failed."
print(msg)
logger.error(msg)
raise CLTKException(
"TLGU installation required for this class to work."
)
@staticmethod
def convert(
input_path=None,
output_path=None,
markup=None,
rm_newlines=False,
divide_works=False,
lat=False,
extra_args=None,
):
"""
Do conversion.
:param input_path: TLG filepath to convert.
:param output_path: filepath of new converted text.
:param markup: Specificity of inline markup. Default None removes all numerical markup; 'full' gives most detailed, with reference numbers included before each text line.
:param rm_newlines: No spaces; removes line ends and hyphens before an ID code; hyphens and spaces before page and column ends are retained.
:param divide_works: Each work (book) is output as a separate file in the form output_file-xxx.txt; if an output file is not specified, this option has no effect.
:param lat: Primarily Latin text (PHI). Some TLG texts, notably doccan1.txt and doccan2.txt are mostly roman texts lacking explicit language change codes. Setting this option will force a change to Latin text after each citation block is encountered.
:param extra_args: Any other tlgu args to be passed, in list form and without dashes, e.g.: ['p', 'b', 'B'].
"""
# setup file paths
input_path = os.path.expanduser(input_path)
output_path = os.path.expanduser(output_path)
# check input path exists
assert os.path.isfile(input_path), "File {0} does not exist.".format(input_path)
# setup tlgu flags
tlgu_options = []
if markup == "full":
full_args = ["v", "w", "x", "y", "z"]
[tlgu_options.append(x) for x in full_args] # pylint: disable=W0106
if rm_newlines:
tlgu_options.append("N")
if divide_works:
tlgu_options.append("W")
if lat:
tlgu_options.append("r")
# setup extra args
if extra_args is None:
extra_args = []
else:
try:
extra_args = list(extra_args)
except Exception as exc:
logger.error("Argument 'extra_args' must be a list: %s.", exc)
raise
tlgu_options = tlgu_options + extra_args
# assemble all tlgu flags
tlgu_options = list(set(tlgu_options))
if tlgu_options:
tlgu_flags = "-" + " -".join(tlgu_options)
else:
tlgu_flags = ""
# make tlgu call
tlgu_call = "tlgu {0} {1} {2}".format(tlgu_flags, input_path, output_path)
logger.info(tlgu_call)
try:
p_out = subprocess.call(tlgu_call, shell=True)
if p_out == 1:
logger.error("Failed to convert %s to %s.", input_path, output_path)
except Exception as exc:
logger.error("Failed to convert %s to %s: %s", input_path, output_path, exc)
raise
def convert_corpus(self, corpus, markup=None, lat=None): # pylint: disable=W0613
"""Look for imported TLG or PHI files and convert them all to
``~/cltk_data/grc/text/tlg/<plaintext>``.
TODO: Add markup options to input.
TODO: Add rm_newlines, divide_works, and extra_args
"""
orig_path = make_cltk_path("originals")
target_path = make_cltk_path()
assert corpus in [
"tlg",
"phi5",
"phi7",
], "Corpus must be 'tlg', 'phi5', or 'phi7'"
if corpus in ["tlg", "phi5", "phi7"]:
orig_path = os.path.join(orig_path, corpus)
if corpus in ["tlg", "phi7"]:
if "phi7" and lat is True:
lat = True
target_path = os.path.join(target_path, "lat", "text", corpus)
else:
lat = None
target_path = os.path.join(target_path, "grc", "text", corpus)
else:
target_path = os.path.join(target_path, "lat", "text", corpus)
lat = True
try:
corpus_files = os.listdir(orig_path)
except Exception as exception:
logger.error("Failed to find TLG files: %s", exception)
raise
# make a list of files to be converted
txts = [x for x in corpus_files if x.endswith("TXT")]
# loop through list and convert one at a time
for txt in txts:
orig_txt_path = os.path.join(orig_path, txt)
if markup is None:
target_txt_dir = os.path.join(target_path, "plaintext")
else:
target_txt_dir = os.path.join(target_path, str(markup))
if not os.path.isdir(target_txt_dir):
os.makedirs(target_txt_dir)
target_txt_path = os.path.join(target_txt_dir, txt)
try:
self.convert(
orig_txt_path,
target_txt_path,
markup=False,
rm_newlines=False,
divide_works=False,
lat=lat,
extra_args=None,
)
except Exception as exception:
logger.error(
"Failed to convert file '%s' to '%s': %s",
orig_txt_path,
target_txt_path,
exception,
)
def divide_works(self, corpus):
"""Use the work-breaking option.
TODO: Maybe incorporate this into ``convert_corpus()``
TODO: Write test for this
"""
if corpus == "tlg":
orig_dir = make_cltk_path("originals/tlg")
works_dir = make_cltk_path("grc/text/tlg/individual_works")
file_prefix = "TLG"
lat = False
elif corpus == "phi5":
orig_dir = make_cltk_path("originals/phi5")
works_dir = make_cltk_path("lat/text/phi5/individual_works")
file_prefix = "LAT"
lat = True # this is for the optional TLGU argument to convert()
elif corpus == "phi7":
raise CLTKException("``phi7`` cannot be divided into individual works.")
else:
raise CLTKException(f"Invalid corpus '{corpus}'. This should never happen.")
if not os.path.exists(works_dir):
os.makedirs(works_dir)
files = os.listdir(orig_dir)
texts = [x for x in files if x.endswith(".TXT") and x.startswith(file_prefix)]
for file in texts:
orig_file_path = os.path.join(orig_dir, file)
new_file_path = os.path.join(works_dir, file)
try:
self.convert(orig_file_path, new_file_path, divide_works=True, lat=lat)
logger.info("Writing files at %s to %s.", orig_file_path, works_dir)
except Exception as err:
logger.error("Failed to convert files: %s.", err)
# assemble_tlg_author_filepaths
|
|
import math
import sys
import random
DEBUG = False
#------- Some "Helper" functions ---------------
# Segregating out instances that take a particular value
# attributearray is an N x 1 array.
def segregate(attributearray, value):
outlist = []
for i in range(0, len(attributearray)):
if(attributearray[i] == value):
outlist.append(i)
return outlist
def unique_labels(labels):
unique_labels = []
for e in labels:
if (e not in unique_labels):
unique_labels.append(e)
return unique_labels
# Assuming labels take values 1..M.
def computeEntropy(labels):
entropy = 0.0
for v in unique_labels(labels):
probability_i = (len(segregate(labels, v)) * 1.0) / len(labels)
entropy -= probability_i * math.log(probability_i, 2)
return entropy
def computeEntropyAttrCount(attributeCount):
entropy = 0.0
total = 0
for v in attributeCount.values():
total = total + v
for k in attributeCount.keys():
probability_i = (attributeCount[k] * 1.0) / total
entropy -= probability_i * math.log(probability_i, 2)
return entropy
# Find most frequent value. Assuming labels take values 1..M
def mostFrequentlyOccurringValue(labels):
bestCount = float('-inf')
bestId = None
for v in unique_labels(labels):
count_i = len(segregate(labels, v))
if (count_i > bestCount):
bestCount = count_i
bestId = v
return bestId
def transpose(matrix):
if(len(matrix) < 1):
return matrix
else:
new_matrix = []
for k in range(len(matrix[0])):
new_matrix.append([])
for i in range(len(matrix)):
for j in range(len(matrix[i])):
new_matrix[j].append(matrix[i][j])
if DEBUG:
if(i % 1000 == 0):
print("transposing " + str(i) + " ....")
return new_matrix
def select_labels(labels, ids):
result = []
for i in ids:
result.append(labels[i])
return result
#-------- The Dtree code ------------
#Here "attributes" is an Num-instance x Num-attributes matrix. Each row is
#one training instance.
#"labels" is a Num-instance x 1 array of class labels for the training instances
# Note, we're storing a number of seemingly unnecessary variables, but
# we'll use them later for counting and pruning
class Dtree:
c_nodeGainRatio = None
c_nodeInformationGain = None
c_isLeaf = None
c_majorityClass = None
c_bestAttribute = None
c_children = None
c_parent = None
def buildTree(self, attributes, labels):
print("Number of instances for this run of buildTree: " + str(len(labels)))
if DEBUG:
print("starting build tree...")
numInstances = len(labels)
nodeInformation = numInstances * computeEntropy(labels)
self.c_majorityClass = mostFrequentlyOccurringValue(labels)
if DEBUG:
print("done nodeInfo and c_majority_class....")
print("majorityClass: " + str(self.c_majorityClass))
print("nodeInfo: " + str(nodeInformation))
if(int(nodeInformation) == 0):
self.c_isLeaf = True
return
else:
self.c_isLeaf = False
bestAttribute = None
bestInformationGain = float('-inf')
bestGainRatio = float('-inf')
attributes_t = transpose(attributes)
if DEBUG:
print("done transposing matrix....")
for i in random.sample(range(len(attributes_t)), 1):
attributeCount = {}
conditionalInfo = 0.0
attributeEntropy = 0.0
for v in unique_labels(attributes_t[i]):
if DEBUG:
print("starting work on attribute and value " + str(i) + ", " + str(v))
ids = segregate(attributes_t[i], v)
if DEBUG:
print("done segregating ids...")
print("len ids: " + str(len(ids)))
attributeCount[v] = len(ids)
label_ids = select_labels(labels, ids)
conditionalInfo += attributeCount[v] * computeEntropy(label_ids);
if DEBUG:
print("done with values...")
attributeInformationGain = nodeInformation - conditionalInfo
if DEBUG:
print("attributeInformationGain: " + str(attributeInformationGain))
compEntAttrCnt = computeEntropyAttrCount(attributeCount)
if compEntAttrCnt != 0:
gainRatio = (attributeInformationGain * 1.0) / compEntAttrCnt
if DEBUG:
print("gainRatio: " + str(gainRatio))
if (gainRatio > bestGainRatio):
bestInformationGain = attributeInformationGain
bestGainRatio = gainRatio
bestAttribute = i
if DEBUG:
print("done with main part....")
#If no attribute provides any gain, this node cannot be split further
if (bestGainRatio <= -sys.maxint - 1):
bestGainRatio = -sys.maxint - 1
if (bestGainRatio >= sys.maxint):
bestGainRatio = sys.maxint
if (int(bestGainRatio) == 0) or (bestAttribute == None):
self.c_isLeaf = True
return
# Otherwise split by the best attribute
self.c_bestAttribute = bestAttribute
self.c_nodeGainRatio = bestGainRatio
self.c_nodeInformationGain = bestInformationGain
self.c_children = {}
if DEBUG:
print("\n(inner)building tree...")
print("isLeaf: " + str(self.c_isLeaf))
print("bestAttr: " + str(self.c_bestAttribute))
print("majorityClass: " + str(self.c_majorityClass))
for v in unique_labels(attributes_t[bestAttribute]):
ids = segregate(attributes_t[bestAttribute], v)
new_attributes = []
new_labels = []
for i in ids:
new_attributes.append(attributes[i])
new_labels.append(labels[i])
self.c_children[v] = Dtree(new_attributes, new_labels)
self.c_children[v].c_parent = self
return
def __init__(self, attributes, labels):
self.c_parent = None
self.buildTree(attributes, labels)
if DEBUG:
print("\nbuilding tree...")
print("isLeaf: " + str(self.c_isLeaf))
print("bestAttr: " + str(self.c_bestAttribute))
print("majorityClass: " + str(self.c_majorityClass))
print("parent: " + str(self.c_parent))
if self.c_children:
print("children: " + str(self.c_children.keys()))
|
|
#!/usr/bin/env python
from glob import glob
import os
import numpy
import pyfits
import matplotlib
from matplotlib import pyplot
from matplotlib import cm
from numpy import log, log10, exp, power, pi
from numpy.polynomial.chebyshev import Chebyshev
from scipy.stats import scoreatpercentile
from scipy.special import gamma, gammaincinv
from scipy.optimize import fmin
from scipy.integrate import quad
from scipy.interpolate import interp1d
from RGBImage import *
matplotlib.rcParams.update({'font.size': 16,
'font.family': 'serif',
'font.serif': 'times',
'text.usetex': True})
labelsize = 16
rescolmap = matplotlib.colors.LinearSegmentedColormap.from_list('rescol', ('blue', 'black', 'white', 'red'), N=256, gamma=1.0)
bands = ['u', 'g', 'r', 'i', 'z', 'Y', 'J', 'H', 'K']
w = numpy.array([3543,4770,6231,7625,9134,10305,12483,16313,22010], numpy.float)
#zp = numpy.array([16.75,15.957,15.0,14.563,14.259,14.162,13.955,13.636,13.525])
#zpscale = 10**(-0.4*(zp-15.0))
zp = numpy.array([29.0]*9)
xlim = (2000, 23000)
sim_std = {'MAG': 1.0 + numpy.array([16.935,15.964,15.0,14.562,14.267,14.183,13.992,13.672,13.547])}
sim_A_disk = {'MAG': 1.0 + numpy.array([17.687,16.717,15.753,15.315,15.019,14.936,14.745,14.425,14.299]),
'Re': numpy.array([18.0]*9), 'n': numpy.array([1.0]*9),
'AR': numpy.array([0.4]*9), 'PA': numpy.array([45.0]*9)}
sim_A_bulge = {'MAG': 1.0 + numpy.array([17.687,16.717,15.753,15.315,15.019,14.936,14.745,14.425,14.299]),
'Re': numpy.array([6.0]*9), 'n': numpy.array([4.0]*9),
'AR': numpy.array([0.8]*9), 'PA': numpy.array([45.0]*9)}
sim_D_disk = {'MAG': 1.0 + numpy.array([17.328,16.509,15.753,15.374,15.112,15.056,14.882,14.597,14.5]),
'Re': numpy.array([18.0]*9), 'n': numpy.array([1.0]*9),
'AR': numpy.array([0.4]*9), 'PA': numpy.array([45.0]*9)}
sim_D_bulge = {'MAG': 1.0 + numpy.array([18.229,16.974,15.753,15.258,14.934,14.827,14.623,14.276,14.13]),
'Re': numpy.array([6.0]*9), 'n': numpy.array([4.0]*9),
'AR': numpy.array([0.8]*9), 'PA': numpy.array([45.0]*9)}
sim_E_disk = {'MAG': 1.0 + numpy.array([16.999,16.127,15.753,15.529,15.389,15.41,15.384,15.192,15.281]),
'Re': numpy.array([18.0]*9), 'n': numpy.array([1.0]*9),
'AR': numpy.array([0.8]*9), 'PA': numpy.array([45.0]*9)}
sim_E_bulge = {'MAG': 1.0 + numpy.array([18.546,17.519,15.753,15.084,14.764,14.623,14.433,14.02,13.78]),
'Re': numpy.array([6.0]*9), 'n': numpy.array([4.0]*9),
'AR': numpy.array([0.9]*9), 'PA': numpy.array([45.0]*9)}
marker = ['o', '^', 's', 'D', 'x', '+', '*']
linestyle = [':', '-', '-.', (0, '.-.')]
ylim_std = {'MAG': (19.05, 13.45), 'Re': (6.05, 28.95), 'n': (2.05, 5.95),
'AR': (0.41, 0.79), 'PA': (35.05, 64.95)}
ylim_disk = {'MAG': (20.05, 14.45), 'Re': (13.05, 22.95), 'n': (0.05, 2.95),
'AR': (0.21, 0.89), 'PA': (35.05, 64.95)}
ylim_bulge = {'MAG': (20.05, 14.45), 'Re': (1.05, 12.95), 'n': (2.05, 6.95),
'AR': (0.61, 1.09), 'PA': (0.05, 89.95)}
#varlist_std = ('MAG', 'Re', 'n', 'AR', 'PA')
varlist_std = ('MAG', 'Re', 'n')
labels = {'MAG': '$m$', 'Re': '$R_e$', 'n': '$n$', 'AR': '$b/a$', 'PA': '$\\theta$'}
def ugrizYJHK_cheb(wl):
y = numpy.array([100.000,100.058,100.217,100.541,100.924,101.307,101.631,101.790,101.848])
fn = interp1d(w, y, 'linear', bounds_error=False)
return fn(wl)
wlfuncs = {'A1c': numpy.log10, 'Ah1c': numpy.log10, 'A1e': ugrizYJHK_cheb}
def poster_plots():
plot(('D1', 'D4'), 1, 'D1D4-1', 'True', ylim=ylim_bulge, sim=sim_D_bulge, varlist=('MAG', 'Re', 'n')) # and D6
def plot_all():
plot_standard()
plot_nonparam()
def plot_standard():
plot(('A2', 'A1', 'A3'), 1, '01', 'True', varlist=('MAGNOSUB', 'MAG', 'Re', 'n'))
plot(('Ah2', 'Ah1', 'Ah3'), 1, '02', 'True')
plot(('Bh2', 'Bh1', 'Bh3'), 1, '03', 'True')
plot(('A1e', 'A1c', 'A1'), 1, '04', 'True') # add additional wavelength scale
plot(('Ah1c', 'Ah1'), 1, '04h', 'True') # add additional wavelength scale
plot(('A1a', 'A1', 'A1b'), 1, '05', 'True', varlist=('MAG',))
plot(('Ah1a', 'Ah1', 'Ah1b'), 1, '05h', 'True', varlist=('MAG',))
# plot(('A1', 'A1c', 'A1d'), 1, '06', 'True')
# plot(('Ah1', 'Ah1c', 'Ah1d'), 1, '06h', 'True')
# illustration 7 requires a different kind of plot
plot(('D2', 'D1', 'D3'), 1, '08', 'True', varlist=('MAG', 'Re', 'n', 'AR', 'PA'))
plot(('D1a', 'D1'), 1, '08a', 'True', varlist=('MAG', 'Re', 'n', 'AR', 'PA'))
plot(('A5', 'A4', 'A6'), 1, '09-1', 'True', ylim=ylim_bulge, sim=sim_A_bulge, varlist=('MAG', 'Re', 'n'))
plot(('A5', 'A4', 'A6'), 2, '09-2', 'True', ylim=ylim_disk, sim=sim_A_disk, varlist=('MAG', 'Re'))
plot(('D5', 'D4', 'D6'), 1, '10-1', 'True', ylim=ylim_bulge, sim=sim_D_bulge, varlist=('MAG', 'Re', 'n'))
plot(('D5', 'D4', 'D6'), 2, '10-2', 'True', ylim=ylim_disk, sim=sim_D_disk, varlist=('MAG', 'Re'))
plot(('Dh5', 'Dh4', 'Dh6'), 1, '10h-1', 'True', ylim=ylim_bulge, sim=sim_D_bulge, varlist=('MAG', 'Re', 'n'))
plot(('Dh5', 'Dh4', 'Dh6'), 2, '10h-2', 'True', ylim=ylim_disk, sim=sim_D_disk, varlist=('MAG', 'Re'))
plot(('E5', 'E4', 'E6'), 1, '11-1', 'True', ylim=ylim_bulge, sim=sim_E_bulge, varlist=('MAG', 'Re', 'n'))
plot(('E5', 'E4', 'E6'), 2, '11-2', 'True', ylim=ylim_disk, sim=sim_E_disk, varlist=('MAG', 'Re'))
plot(('Eh5', 'Eh4', 'Eh6'), 1, '11h-1', 'True', ylim=ylim_bulge, sim=sim_E_bulge, varlist=('MAG', 'Re', 'n'))
plot(('Eh5', 'Eh4', 'Eh6'), 2, '11h-2', 'True', ylim=ylim_disk, sim=sim_E_disk, varlist=('MAG', 'Re'))
plot(('Dc5', 'Dc4', 'Dc6'), 1, '12-1', 'True', ylim=ylim_bulge, sim=sim_D_bulge, varlist=('MAG', 'Re', 'n'))
plot(('Dc5', 'Dc4', 'Dc6'), 2, '12-2', 'True', ylim=ylim_disk, sim=sim_D_disk, varlist=('MAG', 'Re'))
def plot_nonparam():
plot(('NA1n', 'NA1'), 1, 'N01', 'True')
plot(('NA2n', 'NA2'), 1, 'N02', 'True')
plot(('NA4n', 'NA4'), 1, 'N03-1', 'True', ylim=ylim_bulge, sim=sim_A_bulge, varlist=('MAG', 'Re', 'n'))
plot(('NA4n', 'NA4'), 2, 'N03-2', 'True', ylim=ylim_disk, sim=sim_A_disk, varlist=('MAG', 'Re'))
plot(('NB4n', 'NB4'), 1, 'N04-1', 'True', ylim=ylim_bulge, sim=sim_A_bulge, varlist=('MAG', 'Re', 'n'))
plot(('NB4n', 'NB4'), 2, 'N04-2', 'True', ylim=ylim_disk, sim=sim_A_disk, varlist=('MAG', 'Re'))
plot(('NC4n', 'NC4', 'NC4m'), 1, 'N05-1', 'True', ylim=ylim_bulge, sim=sim_A_bulge, varlist=('MAG', 'Re', 'n'))
plot(('NC4n', 'NC4', 'NC4m'), 2, 'N05-2', 'True', ylim=ylim_disk, sim=sim_A_disk, varlist=('MAG', 'Re'))
def plot(id=('A2', 'A1'), compno=1, name='0', show_func=False,
varlist=varlist_std, ylim=ylim_std, sim=sim_std, submag=True,
legends=None):
print name, ':', id
res = [fit_results(i) for i in id]
if show_func:
func = [fit_func(i) for i in id]
else:
func = None
nvar = len(varlist)
fig = pyplot.figure(figsize=(5, 15))
fig.subplots_adjust(bottom=0.1, top=0.9, left=0.2, right=0.95, hspace=0.075)
for i, v in enumerate(varlist):
if v == 'MAGNOSUB':
v = 'MAG'
vsubmag = False
else:
vsubmag = submag
ax = make_bands_plot(fig, (5, 1, i+1), labels[v], i==0, i==nvar-1)
sub = norm = None
if v in sim.keys():
sv = sim[v]
if vsubmag and v == 'MAG':
sub = interp1d(w, sim[v], 'cubic', bounds_error=False, fill_value='extrapolate')
if sub is not None:
sv = sim[v] - sub(w)
ax.set_ylabel('$\Delta ' + labels[v][1:])
pyplot.plot(w, sv, '-k', alpha=0.75)
plotres(res, id, 'COMP%i_%s'%(compno, v), func, sub=sub, norm=norm, legends=None)
if v in sim.keys():
pyplot.plot(w, sv, 'xk', markersize=10.0, alpha=0.75)
if sub is None:
pyplot.ylim(ylim[v])
else:
pyplot.ylim(numpy.subtract(ylim[v], numpy.mean(ylim[v], 0))/3.0)
#pyplot.legend(loc='lower right', numpoints=1, prop={'size': 16})
if compno == 1 and ('-' not in name):
if i == nvar-1:
pyplot.legend(loc='upper left', numpoints=1, prop={'size': 16},
bbox_to_anchor=(0., -.35, 1., .1),
ncol=4, mode="expand", borderaxespad=0.)
elif compno == 1:
if i == nvar-1:
pyplot.text(0.5, -0.25, 'bulge',
horizontalalignment='center',
verticalalignment='top',
transform = ax.transAxes)
elif compno == 2:
if i == nvar-1:
pyplot.text(0.5, -0.25, 'disc',
horizontalalignment='center',
verticalalignment='top',
transform = ax.transAxes)
if i == 0:
pyplot.legend(loc='lower left', numpoints=1, prop={'size': 16},
bbox_to_anchor=(0., 1.45, 1., .1),
ncol=4, mode="expand", borderaxespad=0.)
if i == nvar-1:
# invisible text to ensure plots line up after cropping
pyplot.text(0.5, -0.30, '\_', color='white',
horizontalalignment='center',
verticalalignment='top',
transform = ax.transAxes)
fig.savefig('plots/illustration_%s.pdf'%name)
pyplot.close('all')
if compno==1:
plotimg(id, name)
plotcolimg(id, name)
plotprof(id, name)
plotcolprof(id, name)
npid = [j for j in id if j[0] == 'N' and j[-1] in 'nm']
if len(npid) > 0:
plotnonparamcolimg(npid, name)
def plotimg(id, name='0', asinh=True):
cmap_img = pyplot.cm.gray
cmap_res = rescolmap
norm_res = None
nbands = len(bands)
nid = len(id)
fig = pyplot.figure(figsize=(15.0/nbands * (1+nid*2), 15))
fig.subplots_adjust(bottom=0.05, top=0.95, left=0.05, right=0.95, hspace=0.0, wspace=0.0)
for i, iid in enumerate(id):
img = fit_images(iid)
if asinh:
for j, jimg in enumerate(img):
for iimg in jimg:
if j != 2:
iimg[iimg <= 0] = 0.0
else:
iimg /= 10.0
iimg *= 0.1
iimg[:] = numpy.arcsinh(iimg)
if i == 0:
vmin = []
vmax = []
vrange = []
for ib, b in enumerate(bands):
ax = fig.add_subplot(nbands, 1+2*len(id), 1+ib*(1+nid*2)+i*2)
if ib==nbands-1:
ax.set_xlabel('image', fontsize=labelsize)
ticksoff(ax)
vmin.append(scoreatpercentile(img[0][ib].ravel(), 50))
vmax.append(scoreatpercentile(img[0][ib].ravel(), 99.9) * 1.1)
vrange.append((scoreatpercentile(img[2][ib].ravel(), 99) - scoreatpercentile(img[2][ib].ravel(), 1)) * 1.5)
pyplot.imshow(img[0][ib][::-1], cmap=cmap_img, vmin=vmin[ib], vmax=vmax[ib], interpolation='nearest')
ax.set_ylabel('$%s$'%b, fontsize=labelsize)
for ib, b in enumerate(bands):
ax = fig.add_subplot(nbands, 1+2*len(id), 2+ib*(1+nid*2)+i*2)
if ib==nbands-1:
ax.set_xlabel('model %s'%iid, fontsize=labelsize)
ticksoff(ax)
pyplot.imshow(img[1][ib][::-1], cmap=cmap_img, vmin=vmin[ib], vmax=vmax[ib], interpolation='nearest')
for ib, b in enumerate(bands):
ax = fig.add_subplot(nbands, 1+2*len(id), 3+ib*(1+nid*2)+i*2)
if ib==nbands-1:
ax.set_xlabel('residual %s'%iid, fontsize=labelsize)
ticksoff(ax)
pyplot.imshow(img[2][ib][::-1], cmap=cmap_res, norm=norm_res, vmin=-vrange[ib], vmax=vrange[ib], interpolation='nearest')
fig.savefig('plots/images_%s.pdf'%name, dpi=300)
pyplot.close('all')
def plotcolimg(id, name='0', rgb='Hzg', desaturate=True, pedestal=0):
nbands = len(bands)
nid = len(id)
beta = 2.5
scales = numpy.array((0.04, 0.055, 0.2))
# offsets not so necessary now have nice desaturation feature working
offsets = numpy.array([75.0, 40.0, 8.0]) * 0.5
fig = pyplot.figure(figsize=(15.0/nbands * (1+nid*2), 15))
fig.subplots_adjust(bottom=0.05, top=0.95, left=0.05, right=0.95, hspace=0.0, wspace=0.0)
for i, iid in enumerate(id):
img = fit_images(iid, rgb)
img[0] = [img[0][j] - offsets[j] for j in range(3)]
img[1] = [img[1][j] - offsets[j] for j in range(3)]
img[2] = [img[2][j] + scales[j]*2*offsets.mean() for j in range(3)]
if i == 0:
ax = fig.add_subplot(nbands, 1+2*nid, 1+i*2)
ticksoff(ax)
ax.set_xlabel('image', fontsize=labelsize)
colimg = RGBImage(*img[0], scales=scales, beta=beta,
desaturate=desaturate, pedestal=pedestal).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
ax = fig.add_subplot(nbands, 1+2*nid, 2+i*2)
ticksoff(ax)
ax.set_xlabel('model %s'%iid, fontsize=labelsize)
colimg = RGBImage(*img[1], scales=scales, beta=beta,
desaturate=False, pedestal=pedestal).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
ax = fig.add_subplot(nbands, 1+2*nid, 3+i*2)
ticksoff(ax)
ax.set_xlabel('residual %s'%iid, fontsize=labelsize)
colimg = RGBImage(*img[2], scales=scales, beta=beta,
desaturate=desaturate).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
fig.savefig('plots/colimages_%s.pdf'%name, dpi=300)
pyplot.close('all')
def plotnonparamcolimg(id, name='0', rgb='Hzg', desaturate=True, pedestal=0):
nbands = len(bands)
nid = len(id)
beta = 2.5
scales = numpy.array((0.04, 0.055, 0.2))
# offsets not so necessary now have nice desaturation feature working
offsets = numpy.array([75.0, 40.0, 8.0]) * 0.5
fig = pyplot.figure(figsize=(15.0/nbands * (1+nid*2), 15))
fig.subplots_adjust(bottom=0.05, top=0.95, left=0.05, right=0.95, hspace=0.0, wspace=0.0)
original_iid = None
for i, iid in enumerate(id):
# First row, results without nonparam
if original_iid != iid[:-1]:
original_iid = iid[:-1]
img = fit_images(original_iid, rgb)
img[0] = [img[0][j] - offsets[j] for j in range(3)]
img[1] = [img[1][j] - offsets[j] for j in range(3)]
img[2] = [img[2][j] + scales[j]*2*offsets.mean() for j in range(3)]
if i == 0:
ax = fig.add_subplot(nbands, 1+2*nid, 1+i*2)
ticksoff(ax)
ax.set_title('image', fontsize=labelsize)
colimg = RGBImage(*img[0], scales=scales, beta=beta,
desaturate=desaturate, pedestal=pedestal).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
ax = fig.add_subplot(nbands, 1+2*nid, 2+i*2)
ticksoff(ax)
ax.set_title('model %s'%original_iid, fontsize=labelsize)
colimg = RGBImage(*img[1], scales=scales, beta=beta,
desaturate=desaturate, pedestal=pedestal).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
ax = fig.add_subplot(nbands, 1+2*nid, 3+i*2)
ticksoff(ax)
ax.set_title('residual %s'%original_iid, fontsize=labelsize)
colimg = RGBImage(*img[2], scales=scales, beta=beta,
desaturate=desaturate).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
# Second row, results with nonparam
img = fit_images(iid, rgb)
img[0] = [img[0][j] - offsets[j] for j in range(3)]
img[1] = [img[1][j] - offsets[j] for j in range(3)]
img[2] = [img[2][j] + scales[j]*2*offsets.mean() for j in range(3)]
ax = fig.add_subplot(nbands, 1+2*nid, 1+2*nid+2+i*2)
ticksoff(ax)
colimg = RGBImage(*img[1], scales=scales, beta=beta,
desaturate=False, pedestal=pedestal).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
ax = fig.add_subplot(nbands, 1+2*nid, 1+2*nid+3+i*2)
ticksoff(ax)
colimg = RGBImage(*img[2], scales=scales, beta=beta,
desaturate=desaturate).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
# Third row, nonparam diagnostics
nonparam = nonparam_images(iid, rgb)
datasub = [img[0][j] - nonparam[j] for j in range(3)]
nonparam = [nonparam[j] - offsets[j] for j in range(3)]
datasub = [datasub[j] - offsets[j] for j in range(3)]
ax = fig.add_subplot(nbands, 1+2*nid, 2+4*nid+2+i*2)
ticksoff(ax)
ax.set_xlabel('nonparam %s'%iid, fontsize=labelsize)
colimg = RGBImage(*nonparam, scales=scales, beta=beta,
desaturate=desaturate).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
ax = fig.add_subplot(nbands, 1+2*nid, 2+4*nid+3+i*2)
ticksoff(ax)
ax.set_xlabel('datasub %s'%iid, fontsize=labelsize)
colimg = RGBImage(*datasub, scales=scales, beta=beta,
desaturate=desaturate).img
pyplot.imshow(colimg, interpolation='nearest', origin='lower')
fig.savefig('plots/nonparamcolimages_%s.pdf'%name, dpi=300)
pyplot.close('all')
def ticksoff(ax):
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
def plotres(res, id, field, func=None, sub=None, norm=None, legends=None):
nid = len(id)
#mec = ['black', None] * (1+nid//2)
#mfc = ['white', 'black'] * (1+nid//2)
#color = ['grey', 'grey'] * (1+nid//2)
mec_func = ['DeepSkyBlue' ,'DarkGreen', 'Orange']
mfc_func = ['DeepSkyBlue', 'white', 'white']
color = ['DeepSkyBlue', 'DarkGreen', 'Orange']
mec_nofunc = ['MediumPurple', 'MediumSeaGreen', 'Pink']
mfc_nofunc = ['MediumPurple', 'MediumSeaGreen', 'Pink']
ymin, ymax = (1e99, -1e99)
for i, iid in enumerate(id):
if nid%2 == 0:
x = w + 150 * [-1, 1, -2, 2][i]
else:
x = w + 300 * [-1, 0, 1, -2, 2][i]
if func is not None and func[i] is not None:
mec = mec_func
mfc = mfc_func
f = func[i][field]
plotfunc(f, wlfunc=wlfuncs.get(iid), color=color[i],
sub=sub, norm=norm)
else:
mec = mec_nofunc
mfc = mfc_nofunc
r = res[i][field]
rerr = res[i][field+'_ERR']
if sub is not None:
r = r - sub(w)
if norm is not None:
r = r / norm(w)
rerr = rerr / norm(w)
#alpha = 1 - i * 0.2
alpha = 1
if legends is not None:
label = legends[i]
else:
label = iid
pyplot.errorbar(x, r, rerr, color=mec[i],
marker=marker[i//2], mec=mec[i],
markerfacecolor=mfc[i], linestyle='',
label=label, alpha=alpha)
ymin = min(ymin, (r-rerr).min())
ymax = max(ymax, (r+rerr).max())
yrange = ymax - ymin
ymin -= 0.05 * yrange
ymax += 0.05 * yrange
pyplot.ylim(ymin, ymax)
def plotfunc(func, wlfunc=None, color='red', label='', sub=None, norm=None):
#dx = (xlim[1] - xlim[0]) / 1000.0
#x = numpy.arange(xlim[0], xlim[1]+dx/2.0, dx)
dx = (w[-1] - w[0]) / 1000.0
x = numpy.arange(w[0], w[-1]+dx/2.0, dx)
if wlfunc is None:
xfunc = x
else:
xfunc = wlfunc(x)
y = func(xfunc)
if sub is not None:
y = y - sub(x)
if norm is not None:
y = y / norm(x)
return pyplot.plot(x, y, ':', color=color, label=label, alpha=0.5)
def fit_results(f):
fn = 'fits/%s/fit%s.fits'%(f,f)
r = None
if os.path.exists(fn):
r = pyfits.getdata(fn, 'final_band')
else:
r = numpy.concatenate([pyfits.getdata('fits/%s/fit%s%s.fits'%(f,f, b), 'final_band')
for b in bands])
return r
def fit_images(f, bands=bands, zoom=2.0,
extensions=['input', 'model', 'residual']):
"""Get the images from the specified galfit(m) filename(s)
`bands` should be a list of band id strings.
If fn includes a format placeholder `{}`, this will be replaced by each
band id in turn. Otherwise all bands are assumed to be in a single file.
"""
fn = 'fits/%s/fit%s.fits'%(f,f)
if not os.path.exists(fn):
fn = fn.replace('.fits', '{}.fits')
out = []
# auto-discovery of bands for galfitm files
multiband = True
if '{}' not in fn:
with pyfits.open(fn) as p:
names = [x.name for x in p]
ext = extensions[0].upper()
if ext in names:
multiband = False
else:
ext = ext + '_'
found_bands = [x.name.replace(ext, '') for x in p
if x.name.startswith(ext)]
if bands is None:
bands = found_bands
if not multiband:
for ext in extensions:
try:
hdu = [pyfits.getdata(fn, ext)]
except KeyError:
hdu = None
out.append(hdu)
else:
for ext in extensions:
try:
if '{}' in fn:
hdu = [pyfits.getdata(fn.format(b), ext)
for b in bands]
else:
hdu = [pyfits.getdata(fn, '{}_{}'.format(ext, b))
for b in bands]
except KeyError:
hdu = None
out.append(hdu)
if zoom is not None:
for ib, b in enumerate(bands):
for i, xx in enumerate(out):
if xx is not None:
shape = numpy.array(out[i][ib].shape)
crop = shape * (1 - 1 / zoom) / 2
crop = crop.round().astype(numpy.int)
crop = crop.clip(0, shape // 2 - 1)
icrop = [crop[0]] * 2
jcrop = [crop[1]] * 2
if xx is not None:
out[i][ib] = out[i][ib][icrop[0]:-icrop[1],
jcrop[0]:-jcrop[1]]
return out
def nonparam_images(f, bands=bands):
return fit_images(f, bands, extensions=['nonparam'])[0]
def fit_func(f):
fn = 'fits/%s/fit%s.fits'%(f,f)
if os.path.exists(fn):
r = {}
d = pyfits.getdata('fits/%s/fit%s.fits'%(f,f), 'fit_info')[0]
ref = d.field('refwlband')
low = d.field('lowdwlband') + ref
high = d.field('highdwlband') + ref
d = pyfits.getdata('fits/%s/fit%s.fits'%(f,f), 'final_cheb')
for n in d.names:
r[n] = Chebyshev(d.field(n), (low, high))
else:
r = None
return r
def make_bands_plot(fig, subplot=111, ylabel='', top=True, bottom=True):
ax1 = fig.add_subplot(*subplot)
ax2 = ax1.twiny()
ax1.set_ylabel(ylabel)
ax1.set_xlim(xlim)
if top:
ax2.set_xlabel('wavelength, \AA')
else:
ax2.set_xticklabels([])
ax2.set_xlim(xlim)
ax1.set_xticks(w)
if bottom:
ax1.set_xticklabels(['$'+i+'$' for i in bands])
else:
ax1.set_xticklabels([])
pyplot.setp(ax1.get_xticklabels(), va='baseline')
pyplot.setp(ax1.get_xaxis().get_major_ticks(), pad=20.)
pyplot.setp(ax1.get_yaxis().get_major_ticks(), pad=8.)
ax2.xaxis.labelpad = 12
return ax1
class Sersic:
# currently doesn't handle uncertainties
def __init__(self, mag, re, n, ar=1.0, pa=0.0,
mag_err=None, re_err=None, n_err=None, ar_err=None, pa_err=None, xc_err=None, yc_err=None):
self.mag = mag
self.re = re
self.n = n
self.ar = ar
self.pa = pa
self.mag_err = mag_err
self.re_err = re_err
self.n_err = n_err
self.ar_err = ar_err
self.pa_err = pa_err
def __call__(self, r):
return self.mu_r(r)
def mu_r(self, r):
# Returns the surface brightess at specified major axis radius,
# within annular ellipses corresponding to the shape of each component individualy
# Taking, e.g. colours, this currently assumes major axes of components align
# to be more generally correct need to account for AR, PA, XC, YC,
# and either select specific vector, or properly compute azimuthal average
mag = self.mag
re = self.re
n = self.n
bn = self.bn()
mue = mag + 5.0*log10(re) + 2.5*log10(2.0*pi*n*gamma(2.0*n)*exp(bn)/power(bn, 2.0*n))
mu = mue + 2.5 * bn / log(10) * (power(r/re, 1.0/n) - 1.0)
return mu
def bn(self):
return gammaincinv(2.0*self.n, 0.5)
# These need testing
def I_el(self, r_m, ar_m, pa_m=0):
return quad(self.I_el_theta, 0, 2*pi, args=(r_m, ar_m, pa_m))[0] / (2*pi)
def mu_el(self, r_m, ar_m, pa_m=0):
return -2.5*numpy.log10(self.I_el(r_m, ar_m, pa_m))
def mu_el_theta(self, theta, r_m, ar_m, pa_m=0):
x = r_m * numpy.cos(theta - pa_m)
y = ar_m * r_m * numpy.sin(theta - pa_m)
r_c = numpy.sqrt(x**2 + self.ar**2 * y**2)
return self.mu_r(r_c)
def I_el_theta(self, theta, r_m, ar_m, pa_m=0):
return 10**(-0.4*self.mu_el_theta(theta, r_m, ar_m, pa_m))
def plotprof(id=('A1', 'A2'), name='0'):
print name, ':', id
color = [cm.gist_rainbow(i) for i in numpy.linspace(1.0, 0.0, 9)]
func, remax = make_funcs(id)
fig = pyplot.figure(figsize=(5, 5))
fig.subplots_adjust(bottom=0.15, top=0.95, left=0.15, right=0.95, hspace=0.0, wspace=0.0)
rmax = remax*3.0001
r = numpy.arange(rmax/10000.0, rmax, rmax/100.0)
for i, iid in enumerate(id):
for j in range(len(func[i])):
for k, band in enumerate(bands):
if k == 0:
label = "%s\_%i"%(iid, j)
else:
label = ""
pyplot.plot(r, func[i][j][k](r), linestyle=linestyle[i],
marker=None, color=color[k], label=label)
pyplot.legend(loc='upper right', numpoints=1, prop={'size': 16})
pyplot.xlabel('$r_{\mathrm{e}}$')
pyplot.ylabel('$\mu$')
#fig.gca().invert_yaxis()
pyplot.xlim(0.0, rmax)
pyplot.ylim(26, 16)
fig.savefig('plots/profiles_%s.pdf'%name)
def plotcolprof(id=('A1', 'A2'), name='0'):
# need to decide and implement consistent annuli in which to determine colour
# would be nice to plot lines for input model too
# normalised at remax and offset for display purposes
print name, ':', id
offset = 0.5
color = [cm.gist_rainbow(i) for i in numpy.linspace(1.0, 0.0, 9)]
func, remax = make_funcs(id)
fig = pyplot.figure(figsize=(5, 5))
fig.subplots_adjust(bottom=0.15, top=0.95, left=0.15, right=0.95, hspace=0.0, wspace=0.0)
rmax = remax*3.0001
r = numpy.arange(rmax/10000.0, rmax, rmax/100.0)
for i, iid in enumerate(id):
for k in range(len(bands)-1):
f1 = f2 = f1max = f2max = 0.0
for j in range(len(func[i])):
if k == 0:
#label = "%s_%i_%s-%s"%(iid, j, bands[k], bands[k+1])
#label = "%s_%i"%(iid, j)
label = "%s"%iid
else:
label = ""
# to use elliptically averaged surface brightnesses will need
# to supply multi-component fits with single-Sersic info
f1 += 10**(-0.4*func[i][j][k](r))
f2 += 10**(-0.4*func[i][j][k+1](r))
f1max += 10**(-0.4*func[i][j][k](remax))
f2max += 10**(-0.4*func[i][j][k+1](remax))
colour = -2.5*numpy.log10(f1/f2)
colour_remax = -2.5*numpy.log10(f1max/f2max)
colour -= colour_remax
colour += offset*k
pyplot.hlines([offset*k], 0.0, rmax, colors='grey')
pyplot.plot(r, colour, linestyle=linestyle[i],
marker=None, color=color[k], label=label)
pyplot.legend(loc='upper right', numpoints=1, prop={'size': 16})
pyplot.xlabel('$r_{\mathrm{e}}$')
pyplot.ylabel('Colour')
pyplot.ylim(-1*offset, (len(bands)+1) * offset)
pyplot.xlim(0.0, rmax)
fig.savefig('plots/colprofiles_%s.pdf'%name)
def make_funcs(id):
res = [fit_results(i) for i in id]
func = []
for i, iid in enumerate(id):
func.append([])
remax = 0
compno = 0
while True:
compno += 1
field = 'COMP%i_MAG'%compno
if field not in res[i].dtype.names:
break
mag = res[i][field]
re, n, ar, pa, xc, yc = [res[i]['COMP%i_%s'%(compno, par)] for par in
('Re', 'n', 'AR', 'PA', 'XC', 'YC')]
func[i].append([])
for k, band in enumerate(bands):
func[i][compno-1].append(Sersic(mag[k], re[k], n[k], ar[k], pa[k], xc[k], yc[k]))
remax = max(remax, re.max())
return func, remax
if __name__ =='__main__':
plot_all()
|
|
import string
import random
import io
import pytest
import numpy as np
import validate_docstrings
validate_one = validate_docstrings.validate_one
from pandas.util.testing import capture_stderr
class GoodDocStrings(object):
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
"""
def plot(self, kind, color='blue', **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot.
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
"""
return random.random()
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
"""
length = random.randint(1, 10)
letters = "".join(random.sample(string.ascii_lowercase, length))
return length, letters
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
"""
while True:
yield random.random()
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
Series
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
return self.iloc[:5]
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
Series
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon'])
>>> s.head()
0 Ant
1 Bear
2 Cow
3 Dog
4 Falcon
dtype: object
With the `n` parameter, we can change the number of returned rows:
>>> s.head(n=3)
0 Ant
1 Bear
2 Cow
dtype: object
"""
return self.iloc[:n]
def contains(self, pat, case=True, na=np.nan):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
Examples
--------
>>> s = pd.Series(['Antelope', 'Lion', 'Zebra', np.nan])
>>> s.str.contains(pat='a')
0 False
1 False
2 True
3 NaN
dtype: object
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s.str.contains(pat='a', case=False)
0 True
1 False
2 True
3 NaN
dtype: object
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s.str.contains(pat='a', na=False)
0 False
1 False
2 True
3 False
dtype: bool
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure sphinx directives don't affect checks for trailing periods.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : bool
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
"""
pass
def good_imports(self):
"""
Ensure import other than numpy and pandas are fine.
Examples
--------
This example does not import pandas or import numpy.
>>> import datetime
>>> datetime.MAXYEAR
9999
"""
pass
class BadGenericDocStrings(object):
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def method(self, foo=None, bar=None):
"""
A sample DataFrame method.
Do not import numpy and pandas.
Try to use meaningful data, when it makes the example easier
to understand.
Try to avoid positional arguments like in `df.method(1)`. They
can be alright if previously defined with a meaningful name,
like in `present_value(interest_rate)`, but avoid them otherwise.
When presenting the behavior with different parameters, do not place
all the calls one next to the other. Instead, add a short sentence
explaining what the example shows.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame(np.ones((3, 3)),
... columns=('a', 'b', 'c'))
>>> df.all(1)
0 True
1 True
2 True
dtype: bool
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
pass
class BadSummaries(object):
def wrong_line(self):
"""Exists on the wrong line"""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters(object):
"""
Everything here has a problem with its Parameters section.
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
def integer_parameter(self, kind):
"""
Uses integer instead of int.
Parameters
----------
kind : integer
Foo bar baz.
"""
pass
def string_parameter(self, kind):
"""
Uses string instead of str.
Parameters
----------
kind : string
Foo bar baz.
"""
pass
def boolean_parameter(self, kind):
"""
Uses boolean instead of bool.
Parameters
----------
kind : boolean
Foo bar baz.
"""
pass
def list_incorrect_parameter_type(self, kind):
"""
Uses list of boolean instead of list of bool.
Parameters
----------
kind : list of boolean, integer, float or string
Foo bar baz.
"""
pass
class BadReturns(object):
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
class BadSeeAlso(object):
def desc_no_period(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n
"""
pass
def desc_first_letter_lowercase(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
pass
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
class BadExamples(object):
def unused_import(self):
"""
Examples
--------
>>> import pandas as pdf
>>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
"""
pass
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
"""
pass
class TestValidator(object):
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "scripts.tests.test_validate_docstrings"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
@capture_stderr
def test_good_class(self):
errors = validate_one(self._import_path(
klass='GoodDocStrings'))['errors']
assert isinstance(errors, list)
assert not errors
@capture_stderr
@pytest.mark.parametrize("func", [
'plot', 'sample', 'random_letters', 'sample_values', 'head', 'head1',
'contains', 'mode', 'good_imports'])
def test_good_functions(self, func):
errors = validate_one(self._import_path(
klass='GoodDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert not errors
@capture_stderr
def test_bad_class(self):
errors = validate_one(self._import_path(
klass='BadGenericDocStrings'))['errors']
assert isinstance(errors, list)
assert errors
@capture_stderr
@pytest.mark.parametrize("func", [
'func', 'astype', 'astype1', 'astype2', 'astype3', 'plot', 'method'])
def test_bad_generic_functions(self, func):
errors = validate_one(self._import_path( # noqa:F821
klass='BadGenericDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize("klass,func,msgs", [
# See Also tests
('BadSeeAlso', 'desc_no_period',
('Missing period at end of description for See Also "Series.iloc"',)),
('BadSeeAlso', 'desc_first_letter_lowercase',
('should be capitalized for See Also "Series.tail"',)),
# Summary tests
('BadSummaries', 'wrong_line',
('should start in the line immediately after the opening quotes',)),
('BadSummaries', 'no_punctuation',
('Summary does not end with a period',)),
('BadSummaries', 'no_capitalization',
('Summary does not start with a capital letter',)),
('BadSummaries', 'no_capitalization',
('Summary must start with infinitive verb',)),
('BadSummaries', 'multi_line',
('Summary should fit in a single line.',)),
('BadSummaries', 'two_paragraph_multi_line',
('Summary should fit in a single line.',)),
# Parameters tests
('BadParameters', 'missing_params',
('Parameters {**kwargs} not documented',)),
('BadParameters', 'bad_colon_spacing',
('Parameters {kind} not documented',
'Unknown parameters {kind: str}',
'Parameter "kind: str" has no type')),
('BadParameters', 'no_description_period',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'no_description_period_with_directive',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'parameter_capitalization',
('Parameter "kind" description should start with a capital letter',)),
('BadParameters', 'integer_parameter',
('Parameter "kind" type should use "int" instead of "integer"',)),
('BadParameters', 'string_parameter',
('Parameter "kind" type should use "str" instead of "string"',)),
('BadParameters', 'boolean_parameter',
('Parameter "kind" type should use "bool" instead of "boolean"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "bool" instead of "boolean"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "int" instead of "integer"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "str" instead of "string"',)),
pytest.param('BadParameters', 'blank_lines', ('No error yet?',),
marks=pytest.mark.xfail),
# Returns tests
('BadReturns', 'return_not_documented', ('No Returns section found',)),
('BadReturns', 'yield_not_documented', ('No Yields section found',)),
pytest.param('BadReturns', 'no_type', ('foo',),
marks=pytest.mark.xfail),
pytest.param('BadReturns', 'no_description', ('foo',),
marks=pytest.mark.xfail),
pytest.param('BadReturns', 'no_punctuation', ('foo',),
marks=pytest.mark.xfail),
# Examples tests
('BadGenericDocStrings', 'method',
('numpy does not need to be imported in the examples',)),
('BadGenericDocStrings', 'method',
('pandas does not need to be imported in the examples',)),
# See Also tests
('BadSeeAlso', 'prefix_pandas',
('pandas.Series.rename in `See Also` section '
'does not need `pandas` prefix',)),
# Examples tests
('BadExamples', 'unused_import',
('1 F401 \'pandas as pdf\' imported but unused',)),
('BadExamples', 'indentation_is_not_a_multiple_of_four',
('1 E111 indentation is not a multiple of four',)),
('BadExamples', 'missing_whitespace_around_arithmetic_operator',
('1 E226 missing whitespace around arithmetic operator',)),
('BadExamples', 'missing_whitespace_after_comma',
('3 E231 missing whitespace after \',\'',)),
])
def test_bad_examples(self, capsys, klass, func, msgs):
result = validate_one(self._import_path(klass=klass, func=func))
for msg in msgs:
assert msg in ' '.join(result['errors'])
class ApiItems(object):
@property
def api_doc(self):
return io.StringIO('''
.. currentmodule:: itertools
Itertools
---------
Infinite
~~~~~~~~
.. autosummary::
cycle
count
Finite
~~~~~~
.. autosummary::
chain
.. currentmodule:: random
Random
------
All
~~~
.. autosummary::
seed
randint
''')
@pytest.mark.parametrize('idx,name', [(0, 'itertools.cycle'),
(1, 'itertools.count'),
(2, 'itertools.chain'),
(3, 'random.seed'),
(4, 'random.randint')])
def test_item_name(self, idx, name):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][0] == name
@pytest.mark.parametrize('idx,func', [(0, 'cycle'),
(1, 'count'),
(2, 'chain'),
(3, 'seed'),
(4, 'randint')])
def test_item_function(self, idx, func):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert callable(result[idx][1])
assert result[idx][1].__name__ == func
@pytest.mark.parametrize('idx,section', [(0, 'Itertools'),
(1, 'Itertools'),
(2, 'Itertools'),
(3, 'Random'),
(4, 'Random')])
def test_item_section(self, idx, section):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][2] == section
@pytest.mark.parametrize('idx,subsection', [(0, 'Infinite'),
(1, 'Infinite'),
(2, 'Finite'),
(3, 'All'),
(4, 'All')])
def test_item_subsection(self, idx, subsection):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][3] == subsection
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import smilesparser
from rdkit import Chem
serial = 0
element_number = {'C': 6,
'N': 7,
'O': 8,
'H': 1,
'S': 16
}
class SMILES:
def __init__(self, smiles):
self.mol = Chem.RWMol()
self.parsed = smilesparser.SMILES.parseString(smiles)[0]
self.prevAtomIdx = None
self.prevBond = None
self.atomStack = []
self.ringClosures = {}
self.iterate_smiles(self.parsed.smiles)
def AddAtom(self, s):
a = Chem.Atom(element_number[s.upper()])
if a.GetSymbol() == 'S':
a.SetHybridization(Chem.rdchem.HybridizationType.SP2)
a.SetNumRadicalElectrons(1)
a.SetNoImplicit(True)
else:
if not self.prevBond:
a.SetHybridization(Chem.rdchem.HybridizationType.SP3)
elif self.prevBond == ':':
bt = Chem.rdchem.BondType.SINGLE
a.SetHybridization(Chem.rdchem.HybridizationType.SP2)
elif self.prevBond == '=':
bt = Chem.rdchem.BondType.DOUBLE
a.SetHybridization(Chem.rdchem.HybridizationType.SP2)
else:
raise RuntimeError
idx = self.mol.AddAtom(a)
if self.prevAtomIdx is not None:
self.AddBond(idx)
self.prevAtomIdx = idx
return a
def AddBond(self, idx):
bt = Chem.rdchem.BondType.SINGLE
if self.prevBond:
if self.prevBond == '=':
bt = Chem.rdchem.BondType.DOUBLE
if self.prevBond == '#':
bt = Chem.rdchem.BondType.TRIPLE
if self.prevBond == ':':
bt = Chem.rdchem.BondType.AROMATIC
self.mol.AddBond(self.prevAtomIdx, idx, bt)
self.prevBond = None
def inspect_organic_symbol(self, organic_symbol, indent=0):
s = ''.join(organic_symbol)
self.AddAtom(s)
def inspect_aromatic_symbol(self, aromatic_symbol, indent=0):
s = ''.join(aromatic_symbol)
a = self.AddAtom(s)
a.SetIsAromatic(True)
self.prevBond = ":"
def inspect_element_symbol(self, element_symbol, indent=0):
s = ''.join(element_symbol)
self.AddAtom(s)
def inspect_chiral_class(self, chiral_class, indent=0):
pass
def inspect_hcount(self, hcount, indent=0):
pass
def inspect_charge(self, charge, indent=0):
pass
def inspect_atomspec(self, atomspec, indent=0):
self.atomStack.append(self.prevAtomIdx)
for item in atomspec:
if isinstance(item, smilesparser.AST.AromaticSymbol):
self.inspect_aromatic_symbol(item.aromatic_symbol, indent+1)
elif isinstance(item, smilesparser.AST.ElementSymbol):
self.inspect_element_symbol(item.element_symbol, indent+1)
elif isinstance(item, smilesparser.AST.ChiralClass):
self.inspect_chiral_class(item.chiral_class, indent+1)
elif isinstance(item, smilesparser.AST.HCount):
self.inspect_hcount(item.hcount, indent+1)
elif isinstance(item, smilesparser.AST.Charge):
self.inspect_charge(item.charge, indent+1)
else:
print " " * indent + str(item), dir(item)
self.prevAtomIdx = self.atomStack.pop()
def inspect_atom(self, atom, indent=0):
if isinstance(atom, smilesparser.AST.OrganicSymbol):
self.inspect_organic_symbol(atom.organic_symbol, indent)
elif isinstance(atom, smilesparser.AST.AromaticSymbol):
self.inspect_aromatic_symbol(atom.aromatic_symbol, indent)
elif isinstance(atom, smilesparser.AST.AtomSpec):
self.inspect_atomspec(atom.atom_spec, indent)
else:
print " " * indent + atom, dir(atom)
def inspect_bond(self, bond, indent=0):
self.prevBond = bond
def inspect_ring_closure(self, ring_closure, indent=0):
if ring_closure not in self.ringClosures:
self.ringClosures[ring_closure] = self.prevAtomIdx
else:
idx = self.ringClosures[ring_closure]
self.AddBond(idx)
def inspect_chain(self, chain, indent=0):
for item in chain:
if isinstance(item, smilesparser.AST.Bond):
self.inspect_bond(item.bond, indent)
elif isinstance(item, smilesparser.AST.Atom):
self.inspect_atom(item.atom, indent)
elif isinstance(item, smilesparser.AST.RingClosure):
self.inspect_ring_closure(item.ring_closure, indent)
else:
print " " * indent + item, dir(item)
def iterate_branch(self, branch, indent=0):
self.atomStack.append(self.prevAtomIdx)
for item in branch[0]:
if isinstance(item, smilesparser.AST.Bond):
self.inspect_bond(item.bond, indent+1)
elif isinstance(item, smilesparser.AST.SMILES):
self.iterate_smiles(item.smiles, indent+1)
else:
print " " * indent + item, dir(item)
self.prevAtomIdx = self.atomStack.pop()
def iterate_smiles(self, smiles, indent=0):
for item in smiles:
if isinstance(item, smilesparser.AST.Atom):
self.inspect_atom(item.atom, indent)
elif isinstance(item, smilesparser.AST.Chain):
self.inspect_chain(item.chain, indent)
elif isinstance(item, smilesparser.AST.Branch):
self.iterate_branch(item, indent+1)
else:
print " " * indent + item, dir(item)
def print_mol(mol):
for atom in mol.GetAtoms():
atom.UpdatePropertyCache(strict=False)
print (atom.GetIdx(),
atom.GetAtomicNum(),
atom.GetDegree(),
atom.GetTotalDegree(),
atom.GetTotalValence(),
atom.GetImplicitValence(),
atom.GetExplicitValence(),
atom.GetFormalCharge(),
atom.GetNumRadicalElectrons(),
atom.GetHybridization(),
atom.GetNoImplicit())
for bond in mol.GetBonds():
print (bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx(),
bond.GetBondType())
if __name__ == '__main__':
smiles=[
# 'C',
# 'CC',
# 'CCCCC(CCC)CCC',
# 'C1CCC(C1C)CCCC',
# 'c1ccccc1',
# 'Cc1ccccc1',
# 'CCC[S]=O',
# 'CC[S@](=O)c1ccc2c(c1)[nH]/c(=N/C(=O)OC)/[nH]2',
'C=CCc1cc(OC)c2c(c1OC)OCO2'
# 'CCC(=O)O[C@]1(CC[NH+](C[C@@H]1CC=C)C)c2ccccc2'
]
for s in smiles:
print s
m = Chem.MolFromSmiles(s)
s1 = Chem.MolToSmiles(m)
print s1
print_mol(m)
print
sm = SMILES(s1)
print_mol(sm.mol)
print Chem.MolToSmiles(sm.mol)
print
|
|
# Copyright 2016 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import oslo_config
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
import testtools
from neutron.conf import common as core_config
from neutron.conf.plugins.ml2 import config as ml2_config
from nuage_neutron.plugins.common.base_plugin import RootNuagePlugin
from nuage_neutron.plugins.common import config
from nuage_neutron.plugins.common.exceptions import NuageBadRequest
from nuage_neutron.plugins.common import nuagedb
from nuage_neutron.plugins.nuage_ml2.mech_nuage import NuageMechanismDriver
from nuage_neutron.vsdclient.impl.vsdclientimpl import VsdClientImpl
from nuage_neutron.vsdclient.restproxy import RESTProxyError
from nuage_neutron.vsdclient.restproxy import RESTProxyServer
class ConfigTypes(object):
MINIMAL_CONFIG = 1
MISSING_SERVICE_PLUGIN = 2
MISSING_ML2_EXTENSION = 3
NUAGE_L2BRIDGE_WITHOUT_NUAGE_NETWORK = 4
class TestNuageMechanismDriver(testtools.TestCase):
@classmethod
def setUpClass(cls):
super(TestNuageMechanismDriver, cls).setUpClass()
# make sure we have the configs
if core_config.core_opts is None or ml2_config.ml2_opts is None:
cls.fail('Fix your setup.')
def set_config_fixture(self, config_type=ConfigTypes.MINIMAL_CONFIG):
ml2_config.register_ml2_plugin_opts()
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group='RESTPROXY', server='localhost:9876')
conf.config(group='RESTPROXY', server_timeout=1)
conf.config(group='RESTPROXY', server_max_retries=1)
conf.config(group='RESTPROXY', cms_id='1')
if config_type == ConfigTypes.MISSING_SERVICE_PLUGIN:
conf.config(service_plugins=['NuagePortAttributes',
'NuageL3'])
else:
conf.config(service_plugins=['NuagePortAttributes',
'NuageL3', 'NuageAPI'])
if config_type == ConfigTypes.MISSING_ML2_EXTENSION:
conf.config(group='ml2',
extension_drivers=['nuage_subnet',
'nuage_port'])
else:
conf.config(group='ml2',
extension_drivers=['nuage_subnet',
'nuage_port',
'port_security'])
if config_type == ConfigTypes.NUAGE_L2BRIDGE_WITHOUT_NUAGE_NETWORK:
conf.config(service_plugins=['NuagePortAttributes',
'NuageL3', 'NuageAPI',
'NuageL2Bridge'])
return conf
# get me a Nuage mechanism driver
def get_me_a_nmd(self):
self.set_config_fixture()
nmd = NuageMechanismDriver()
nmd._l2_plugin = nmd
nmd.initialize()
return nmd
@staticmethod
def get_me_a_rest_proxy():
rest_proxy = RESTProxyServer(server='localhost:9876',
base_uri='/nuage/api/v6',
serverssl=True,
verify_cert='False',
serverauth='1:1',
auth_resource='/me',
organization='org')
return rest_proxy
# NETWORK DRIVER INITIALIZATION CHECKS
def test_init_native_nmd_missing_service_plugin(self):
self.set_config_fixture(ConfigTypes.MISSING_SERVICE_PLUGIN)
self.assertRaisesRegex(
oslo_config.cfg.ConfigFileValueError,
r'Missing required service_plugin\(s\) '
r'\[\'NuageAPI\'\] for mechanism driver nuage',
NuageMechanismDriver().initialize)
def test_init_native_nmd_missing_ml2_extension(self):
self.set_config_fixture(ConfigTypes.MISSING_ML2_EXTENSION)
self.assertRaisesRegex(
oslo_config.cfg.ConfigFileValueError,
r'Missing required extension\(s\) '
r'\[\'port_security\'\] for mechanism driver nuage',
NuageMechanismDriver().initialize)
def test_init_missing_nuage_network_ml2_extension_for_l2bridge(self):
self.set_config_fixture(
ConfigTypes.NUAGE_L2BRIDGE_WITHOUT_NUAGE_NETWORK)
self.assertRaisesRegex(
oslo_config.cfg.ConfigFileValueError,
'Missing required extension '
r'\'nuage_network\' for service plugin NuageL2Bridge',
NuageMechanismDriver().initialize)
def test_init_native_nmd_invalid_server(self):
self.set_config_fixture()
self.assertRaisesRegex(
RESTProxyError,
'Error in REST call to VSD: '
'Could not establish a connection with the VSD. '
'Please check VSD URI path in plugin config '
'and verify IP connectivity.',
NuageMechanismDriver().initialize)
@mock.patch.object(RESTProxyServer, 'raise_rest_error')
@mock.patch.object(VsdClientImpl, 'verify_cms')
def test_multi_init_nmd_invalid_server(self, *_):
# init nmd 3 times
nmd1 = self.get_me_a_nmd()
nmd2 = self.get_me_a_nmd()
nmd3 = self.get_me_a_nmd()
# validate there is actually only 1 vsdclient (memoize)
self.assertEqual(nmd2.vsdclient, nmd1.vsdclient)
self.assertEqual(nmd3.vsdclient, nmd1.vsdclient)
# validate no api call is made - we don't count authentication calls!
self.assertEqual(0, nmd1.vsdclient.restproxy.api_count)
# FLAT NETWORKS
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'is_external',
return_value=False)
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_subnet_precommit_in_flat_network(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'flat',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_subnet_precommit_in_flat_net_with_nuagenet(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'flat',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('Subnet precommit should not have succeeded')
except NuageBadRequest as e:
self.assertEqual("Bad request: Network should have 'provider:"
"network_type' vxlan or nuage_hybrid_mpls, or "
"have such a segment", str(e))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_vsd_mgd_subnet_precommit_in_flat_net(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'flat',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'net_partition': 'lalaland',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('Create subnet precommit should not have succeeded')
except NuageBadRequest as e:
self.assertEqual("Bad request: Network should have 'provider:"
"network_type' vxlan or nuage_hybrid_mpls, or "
"have such a segment", str(e))
# VXLAN NETWORKS
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver, '_create_vsd_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_subnet_precommit_with_nuagenet(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'ip_version': 4,
'cidr': '10.0.0.0/24'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver, '_create_vsd_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
def test_create_vsd_mgd_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'net_partition': 'lalaland',
'ip_version': 4,
'gateway_ip': None,
'cidr': '10.0.0.0/24'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets')
@mock.patch.object(NuageMechanismDriver, '_create_vsd_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
@mock.patch.object(nuagedb, 'get_net_partition_by_id',
return_value={'id': 1})
def test_create_vsd_mgd_v6_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'nuagenet': '0x100',
'net_partition': 'lalaland',
'ip_version': 6,
'cidr': 'fee::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'default_np_id',
return_value=1)
@mock.patch.object(NuageMechanismDriver, 'is_external',
return_value=False)
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids',
return_value=[])
@mock.patch.object(nuagedb, 'get_net_partition_by_id',
return_value={'id': 1})
@mock.patch.object(NuageMechanismDriver, '_create_nuage_subnet')
def test_create_subnet_precommit_default(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 4,
'cidr': '10.10.1.0/24',
'gateway_ip': '10.10.1.1'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_v6_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 6},
{'id': 'subnet2', 'ip_version': 6}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(NuageMechanismDriver, 'check_dhcp_agent_alive',
return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_two_v6_subnets_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fef::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 6},
{'id': 'subnet2', 'ip_version': 6}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(NuageMechanismDriver, 'check_dhcp_agent_alive',
return_value=True)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_two_v6_subnets_with_dhcp_agent_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'eef::/64'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('This is a negative test and was not meant to pass.')
except NuageBadRequest as e:
self.assertEqual('Bad request: A network with multiple ipv4 or '
'ipv6 subnets is not allowed when '
'neutron-dhcp-agent is enabled', str(e))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 4}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(NuageMechanismDriver,
'_create_openstack_managed_subnet')
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_v4_v6_subnet_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
nmd.create_subnet_precommit(Context(network, subnet))
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 4},
{'id': 'subnet2', 'ip_version': 4},
{'id': 'subnet2', 'ip_version': 6}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_two_v4_v6_subnets_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('This is a negative test and was not meant to pass.')
except NuageBadRequest as e:
self.assertEqual('Bad request: A network can only have maximum 1 '
'ipv4 and 1 ipv6 subnet existing together', str(e)
)
@mock.patch.object(RESTProxyServer, 'generate_nuage_auth')
@mock.patch.object(RESTProxyServer, '_rest_call',
return_value=(401, 'Unauthorized', None, None, None,
None))
def test_rest_call_infinite_recursion(self, *_):
rest_proxy = self.get_me_a_rest_proxy()
try:
rest_proxy.rest_call('get', '', '')
except Exception as e:
self.assertEqual(True, 'Unauthorized' in str(e),
"Got an exception other than Unauthorized")
@mock.patch.object(RootNuagePlugin, 'init_vsd_client')
@mock.patch.object(NuageMechanismDriver, 'get_subnets',
return_value=[{'id': 'subnet1', 'ip_version': 4},
{'id': 'subnet2', 'ip_version': 6},
{'id': 'subnet2', 'ip_version': 4}])
@mock.patch.object(NuageMechanismDriver, 'is_external', return_value=False)
@mock.patch.object(nuagedb, 'get_subnet_l2dom_by_network_id',
return_value=[])
@mock.patch.object(nuagedb, 'get_subnet_l2doms_by_subnet_ids')
def test_create_v4_v6_v4_subnets_precommit(self, *_):
nmd = self.get_me_a_nmd()
network = {'id': '1',
'provider:network_type': 'vxlan',
'router:external': False}
subnet = {'id': '10',
'network_id': '1',
'ip_version': 6,
'cidr': 'fee::/64'}
try:
nmd.create_subnet_precommit(Context(network, subnet))
self.fail('This is a negative test and was not meant to pass.')
except NuageBadRequest as e:
self.assertEqual('Bad request: A network can only have maximum 1 '
'ipv4 and 1 ipv6 subnet existing together', str(e)
)
# DEFAULT ALLOW NON IP CHECKS
def test_default_allow_non_ip_not_set(self):
self.assertFalse(config.default_allow_non_ip_enabled())
def test_default_allow_non_ip_set_empty_string(self):
try:
conf = self.set_config_fixture()
conf.config(group='PLUGIN', default_allow_non_ip='')
self.fail('From Ocata onwards oslo is correctly checking its '
'config value parsing; '
'hence this line shd not be reached.')
except ValueError as e:
self.assertEqual('Unexpected boolean value \'\'', str(e))
def test_default_allow_non_ip_set(self):
conf = self.set_config_fixture()
conf.config(group='PLUGIN', default_allow_non_ip=True)
self.assertTrue(config.default_allow_non_ip_enabled())
# ENABLE INGRESS REPLICATION
def test_enable_ingress_replication(self):
self.assertFalse(config.ingress_replication_enabled())
def test_enable_ingress_replication_set(self):
conf = self.set_config_fixture()
conf.config(group='PLUGIN', enable_ingress_replication=True)
self.assertTrue(config.ingress_replication_enabled())
# ENABLE NATIVE SRIOV TRUNKS
def test_enable_native_sriov_trunks(self):
self.assertFalse(config.enable_native_sriov_trunks())
def test_enable_native_sriov_trunks_set(self):
conf = self.set_config_fixture()
conf.config(group='PLUGIN', enable_native_sriov_trunks=True)
self.assertTrue(config.enable_native_sriov_trunks())
# ip utility checks
def test_ip_comparison(self):
self.assertTrue(NuageMechanismDriver.compare_ip(
'cafe:babe::1', 'cafe:babe:0::1'))
self.assertFalse(NuageMechanismDriver.compare_cidr(
'cafe:babe::1', 'cafe:babe:1::1'))
def test_cidr_comparison(self):
self.assertTrue(NuageMechanismDriver.compare_cidr(
'cafe:babe::1/64', 'cafe:babe:0::1/64'))
self.assertFalse(NuageMechanismDriver.compare_cidr(
'cafe:babe::1/64', 'cafe:babe::1/63'))
def test_needs_vport_creation_basic(self):
self.assertFalse(NuageMechanismDriver.needs_vport_creation(
'nuage:vip'))
def test_needs_vport_creation_using_prefix(self):
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group='PLUGIN', device_owner_prefix='no_vport')
# test match
self.assertFalse(NuageMechanismDriver.needs_vport_creation(
'no_vport:something'))
# test no match
self.assertTrue(NuageMechanismDriver.needs_vport_creation(
'something:no_vport'))
def test_count_fixed_ips_per_version(self):
self.assertEqual(
(1, 2), NuageMechanismDriver.count_fixed_ips_per_version(
[{'ip_address': 'cafe:babe::1'},
{'ip_address': '69.69.69.69'},
{'ip_address': 'dead:beef::1'}]))
def test_sort_ips(self):
self.assertEqual([], NuageMechanismDriver.sort_ips([]))
self.assertEqual(['cafe:babe:1::1', 'cafe:babe:12::1'],
NuageMechanismDriver.sort_ips(
['cafe:babe:12::1', 'cafe:babe:1::1']))
class Context(object):
def __init__(self, network, subnet):
self.current = subnet
self.original = subnet
self.db_context = self
self._plugin_context = self
class Transaction(object):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def __del__(self):
pass
class Session(object):
@staticmethod
def is_active():
return True
def begin(self, **_):
return Transaction()
self.session = Session()
class Network(object):
def __init__(self, curr_network):
self.current = curr_network
class CorePlugin(object):
def __init__(self, _network):
self.network = _network
def get_network(self, _context, _subnet):
return self.network
self._plugin = CorePlugin(network)
self.network = Network(network)
|
|
"""Arcam media player."""
import logging
from arcam.fmj import DecodeMode2CH, DecodeModeMCH, IncomingAudioFormat, SourceCodes
from arcam.fmj.state import State
from homeassistant import config_entries
from homeassistant.components.media_player import BrowseMedia, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_MUSIC,
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_PLAY_MEDIA,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from .config_flow import get_entry_client
from .const import (
DOMAIN,
EVENT_TURN_ON,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Set up the configuration entry."""
client = get_entry_client(hass, config_entry)
async_add_entities(
[
ArcamFmj(
config_entry.title,
State(client, zone),
config_entry.unique_id or config_entry.entry_id,
)
for zone in [1, 2]
],
True,
)
return True
class ArcamFmj(MediaPlayerEntity):
"""Representation of a media device."""
def __init__(
self,
device_name,
state: State,
uuid: str,
):
"""Initialize device."""
self._state = state
self._device_name = device_name
self._name = f"{device_name} - Zone: {state.zn}"
self._uuid = uuid
self._support = (
SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
| SUPPORT_BROWSE_MEDIA
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
)
if state.zn == 1:
self._support |= SUPPORT_SELECT_SOUND_MODE
def _get_2ch(self):
"""Return if source is 2 channel or not."""
audio_format, _ = self._state.get_incoming_audio_format()
return bool(
audio_format
in (
IncomingAudioFormat.PCM,
IncomingAudioFormat.ANALOGUE_DIRECT,
IncomingAudioFormat.UNDETECTED,
None,
)
)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._state.zn == 1
@property
def unique_id(self):
"""Return unique identifier if known."""
return f"{self._uuid}-{self._state.zn}"
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"name": self._device_name,
"identifiers": {
(DOMAIN, self._uuid),
(DOMAIN, self._state.client.host, self._state.client.port),
},
"model": "Arcam FMJ AVR",
"manufacturer": "Arcam",
}
@property
def should_poll(self) -> bool:
"""No need to poll."""
return False
@property
def name(self):
"""Return the name of the controlled device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state.get_power():
return STATE_ON
return STATE_OFF
@property
def supported_features(self):
"""Flag media player features that are supported."""
return self._support
async def async_added_to_hass(self):
"""Once registered, add listener for events."""
await self._state.start()
@callback
def _data(host):
if host == self._state.client.host:
self.async_write_ha_state()
@callback
def _started(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
@callback
def _stopped(host):
if host == self._state.client.host:
self.async_schedule_update_ha_state(force_refresh=True)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_DATA, _data
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STARTED, _started
)
)
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_CLIENT_STOPPED, _stopped
)
)
async def async_update(self):
"""Force update of state."""
_LOGGER.debug("Update state %s", self.name)
await self._state.update()
async def async_mute_volume(self, mute):
"""Send mute command."""
await self._state.set_mute(mute)
self.async_write_ha_state()
async def async_select_source(self, source):
"""Select a specific source."""
try:
value = SourceCodes[source]
except KeyError:
_LOGGER.error("Unsupported source %s", source)
return
await self._state.set_source(value)
self.async_write_ha_state()
async def async_select_sound_mode(self, sound_mode):
"""Select a specific source."""
try:
if self._get_2ch():
await self._state.set_decode_mode_2ch(DecodeMode2CH[sound_mode])
else:
await self._state.set_decode_mode_mch(DecodeModeMCH[sound_mode])
except KeyError:
_LOGGER.error("Unsupported sound_mode %s", sound_mode)
return
self.async_write_ha_state()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._state.set_volume(round(volume * 99.0))
self.async_write_ha_state()
async def async_volume_up(self):
"""Turn volume up for media player."""
await self._state.inc_volume()
self.async_write_ha_state()
async def async_volume_down(self):
"""Turn volume up for media player."""
await self._state.dec_volume()
self.async_write_ha_state()
async def async_turn_on(self):
"""Turn the media player on."""
if self._state.get_power() is not None:
_LOGGER.debug("Turning on device using connection")
await self._state.set_power(True)
else:
_LOGGER.debug("Firing event to turn on device")
self.hass.bus.async_fire(EVENT_TURN_ON, {ATTR_ENTITY_ID: self.entity_id})
async def async_turn_off(self):
"""Turn the media player off."""
await self._state.set_power(False)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
if media_content_id not in (None, "root"):
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
presets = self._state.get_preset_details()
radio = [
BrowseMedia(
title=preset.name,
media_class=MEDIA_CLASS_MUSIC,
media_content_id=f"preset:{preset.index}",
media_content_type=MEDIA_TYPE_MUSIC,
can_play=True,
can_expand=False,
)
for preset in presets.values()
]
root = BrowseMedia(
title="Root",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="root",
media_content_type="library",
can_play=False,
can_expand=True,
children=radio,
)
return root
async def async_play_media(self, media_type: str, media_id: str, **kwargs) -> None:
"""Play media."""
if media_id.startswith("preset:"):
preset = int(media_id[7:])
await self._state.set_tuner_preset(preset)
else:
_LOGGER.error("Media %s is not supported", media_id)
return
@property
def source(self):
"""Return the current input source."""
value = self._state.get_source()
if value is None:
return None
return value.name
@property
def source_list(self):
"""List of available input sources."""
return [x.name for x in self._state.get_source_list()]
@property
def sound_mode(self):
"""Name of the current sound mode."""
if self._state.zn != 1:
return None
if self._get_2ch():
value = self._state.get_decode_mode_2ch()
else:
value = self._state.get_decode_mode_mch()
if value:
return value.name
return None
@property
def sound_mode_list(self):
"""List of available sound modes."""
if self._state.zn != 1:
return None
if self._get_2ch():
return [x.name for x in DecodeMode2CH]
return [x.name for x in DecodeModeMCH]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
value = self._state.get_mute()
if value is None:
return None
return value
@property
def volume_level(self):
"""Volume level of device."""
value = self._state.get_volume()
if value is None:
return None
return value / 99.0
@property
def media_content_type(self):
"""Content type of current playing media."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = MEDIA_TYPE_MUSIC
elif source == SourceCodes.FM:
value = MEDIA_TYPE_MUSIC
else:
value = None
return value
@property
def media_content_id(self):
"""Content type of current playing media."""
source = self._state.get_source()
if source in (SourceCodes.DAB, SourceCodes.FM):
preset = self._state.get_tuner_preset()
if preset:
value = f"preset:{preset}"
else:
value = None
else:
value = None
return value
@property
def media_channel(self):
"""Channel currently playing."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = self._state.get_dab_station()
elif source == SourceCodes.FM:
value = self._state.get_rds_information()
else:
value = None
return value
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
source = self._state.get_source()
if source == SourceCodes.DAB:
value = self._state.get_dls_pdt()
else:
value = None
return value
@property
def media_title(self):
"""Title of current playing media."""
source = self._state.get_source()
if source is None:
return None
channel = self.media_channel
if channel:
value = f"{source.name} - {channel}"
else:
value = source.name
return value
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import c_api_util
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
@tf_export("OpError", "errors.OpError")
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._message = message
self._node_def = node_def
self._op = op
self._error_code = error_code
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
@{tf.Operation}
object. In that case, this will return `None`, and you should
instead use the @{tf.OpError.node_def} to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nCaused by op %r, defined at:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(self._op.traceback)
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(original_op.traceback)
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
output.append("\n%s (see above for traceback): %s\n" %
(type(self).__name__, self.message))
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
tf_export("errors.OK").export_constant(__name__, "OK")
CANCELLED = error_codes_pb2.CANCELLED
tf_export("errors.CANCELLED").export_constant(__name__, "CANCELLED")
UNKNOWN = error_codes_pb2.UNKNOWN
tf_export("errors.UNKNOWN").export_constant(__name__, "UNKNOWN")
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
tf_export("errors.INVALID_ARGUMENT").export_constant(__name__,
"INVALID_ARGUMENT")
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
tf_export("errors.DEADLINE_EXCEEDED").export_constant(__name__,
"DEADLINE_EXCEEDED")
NOT_FOUND = error_codes_pb2.NOT_FOUND
tf_export("errors.NOT_FOUND").export_constant(__name__, "NOT_FOUND")
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
tf_export("errors.ALREADY_EXISTS").export_constant(__name__, "ALREADY_EXISTS")
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
tf_export("errors.PERMISSION_DENIED").export_constant(__name__,
"PERMISSION_DENIED")
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
tf_export("errors.UNAUTHENTICATED").export_constant(__name__, "UNAUTHENTICATED")
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
tf_export("errors.RESOURCE_EXHAUSTED").export_constant(__name__,
"RESOURCE_EXHAUSTED")
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
tf_export("errors.FAILED_PRECONDITION").export_constant(__name__,
"FAILED_PRECONDITION")
ABORTED = error_codes_pb2.ABORTED
tf_export("errors.ABORTED").export_constant(__name__, "ABORTED")
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
tf_export("errors.OUT_OF_RANGE").export_constant(__name__, "OUT_OF_RANGE")
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
tf_export("errors.UNIMPLEMENTED").export_constant(__name__, "UNIMPLEMENTED")
INTERNAL = error_codes_pb2.INTERNAL
tf_export("errors.INTERNAL").export_constant(__name__, "INTERNAL")
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
tf_export("errors.UNAVAILABLE").export_constant(__name__, "UNAVAILABLE")
DATA_LOSS = error_codes_pb2.DATA_LOSS
tf_export("errors.DATA_LOSS").export_constant(__name__, "DATA_LOSS")
# pylint: disable=line-too-long
@tf_export("errors.CancelledError")
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
@{tf.QueueBase.enqueue} may be
cancelled by running another operation (e.g.
@{tf.QueueBase.close},
or by @{tf.Session.close}.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
@tf_export("errors.UnknownError")
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
@tf_export("errors.InvalidArgumentError")
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
@{tf.matmul} op will raise this
error if it receives an input that is not a matrix, and the
@{tf.reshape} op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
@tf_export("errors.DeadlineExceededError")
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
@tf_export("errors.NotFoundError")
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
@{tf.WholeFileReader.read}
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
@tf_export("errors.AlreadyExistsError")
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. @{tf.train.Saver.save})
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
@tf_export("errors.PermissionDeniedError")
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
@{tf.WholeFileReader.read}
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
@tf_export("errors.UnauthenticatedError")
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
@tf_export("errors.ResourceExhaustedError")
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
@tf_export("errors.FailedPreconditionError")
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a @{tf.Variable}
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
@tf_export("errors.AbortedError")
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
@{tf.QueueBase.enqueue}
operation may raise `AbortedError` if a
@{tf.QueueBase.close} operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
@tf_export("errors.OutOfRangeError")
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
@{tf.QueueBase.dequeue}
operation is blocked on an empty queue, and a
@{tf.QueueBase.close}
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
@tf_export("errors.UnimplementedError")
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the @{tf.nn.max_pool} operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
@tf_export("errors.InternalError")
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
@tf_export("errors.UnavailableError")
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
@tf_export("errors.DataLossError")
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
@{tf.WholeFileReader.read}
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
_EXCEPTION_CLASS_TO_CODE = dict((
(class_, code) for (code, class_) in _CODE_TO_EXCEPTION_CLASS.items()))
@tf_export("errors.exception_type_from_error_code")
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
@tf_export("errors.error_code_from_exception_type")
def error_code_from_exception_type(cls):
return _EXCEPTION_CLASS_TO_CODE[cls]
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export("errors.raise_exception_on_not_ok_status") # pylint: disable=invalid-name
class raise_exception_on_not_ok_status(object):
"""Context manager to check for C API status."""
def __enter__(self):
self.status = c_api_util.ScopedTFStatus()
return self.status.status
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
if c_api.TF_GetCode(self.status.status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(c_api.TF_Message(self.status.status)),
c_api.TF_GetCode(self.status.status))
# Delete the underlying status object from memory otherwise it stays alive
# as there is a reference to status from this from the traceback due to
# raise.
finally:
del self.status
return False # False values do not suppress exceptions
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The descriptors used to define generated elements of the mojo python bindings.
"""
import array
import itertools
import struct
import mojo.bindings.reflection as reflection
import mojo.bindings.serialization as serialization
# pylint: disable=E0611,F0401
import mojo.system
class Type(object):
"""Describes the type of a struct field or a method parameter,"""
def Convert(self, value): # pylint: disable=R0201
"""
Convert the given value into its canonical representation, raising an
exception if the value cannot be converted.
"""
return value
def GetDefaultValue(self, value):
"""
Returns the default value for this type associated with the given value.
This method must be able to correcly handle value being None.
"""
return self.Convert(value)
class SerializableType(Type):
"""Describe a type that can be serialized by itself."""
def __init__(self, typecode):
Type.__init__(self)
self.typecode = typecode
self.byte_size = struct.calcsize('<%s' % self.GetTypeCode())
def GetTypeCode(self):
"""
Returns the type code (as defined by the struct module) used to encode
this type.
"""
return self.typecode
def GetByteSize(self):
"""
Returns the size of the encoding of this type.
"""
return self.byte_size
def Serialize(self, value, data_offset, data, handle_offset):
"""
Serialize a value of this type.
Args:
value: the value to serialize.
data_offset: the offset to the end of the data bytearray. Used to encode
pointers.
data: the bytearray to append additional data to.
handle_offset: the offset to use to encode handles.
Returns a a tuple where the first element is the value to encode, and the
second is the array of handles to add to the message.
"""
raise NotImplementedError()
def Deserialize(self, value, context):
"""
Deserialize a value of this type.
Args:
value: the base value for this type. This is always a numeric type, and
corresponds to the first element in the tuple returned by
Serialize.
data: the bytearray to retrieve additional data from.
handles: the array of handles contained in the message to deserialize.
Returns the deserialized value.
"""
raise NotImplementedError()
class BooleanType(Type):
"""Type object for booleans"""
def Convert(self, value):
return bool(value)
class NumericType(SerializableType):
"""Base Type object for all numeric types"""
def GetDefaultValue(self, value):
if value is None:
return self.Convert(0)
return self.Convert(value)
def Serialize(self, value, data_offset, data, handle_offset):
return (value, [])
def Deserialize(self, value, context):
return value
class IntegerType(NumericType):
"""Type object for integer types."""
def __init__(self, typecode):
NumericType.__init__(self, typecode)
size = 8 * self.byte_size
signed = typecode.islower()
if signed:
self._min_value = -(1 << (size - 1))
self._max_value = (1 << (size - 1)) - 1
else:
self._min_value = 0
self._max_value = (1 << size) - 1
def Convert(self, value):
if value is None:
raise TypeError('None is not an integer.')
if not isinstance(value, (int, long)):
raise TypeError('%r is not an integer type' % value)
if value < self._min_value or value > self._max_value:
raise OverflowError('%r is not in the range [%d, %d]' %
(value, self._min_value, self._max_value))
return value
class FloatType(NumericType):
"""Type object for floating point number types."""
def Convert(self, value):
if value is None:
raise TypeError('None is not an floating point number.')
if not isinstance(value, (int, long, float)):
raise TypeError('%r is not a numeric type' % value)
return float(value)
class PointerType(SerializableType):
"""Base Type object for pointers."""
def __init__(self, nullable=False):
SerializableType.__init__(self, 'Q')
self.nullable = nullable
def Serialize(self, value, data_offset, data, handle_offset):
if value is None and not self.nullable:
raise serialization.SerializationException(
'Trying to serialize null for non nullable type.')
if value is None:
return (0, [])
return self.SerializePointer(value, data_offset, data, handle_offset)
def Deserialize(self, value, context):
if value == 0:
if not self.nullable:
raise serialization.DeserializationException(
'Trying to deserialize null for non nullable type.')
return None
if value % 8 != 0:
raise serialization.DeserializationException(
'Pointer alignment is incorrect.')
sub_context = context.GetSubContext(value)
if len(sub_context.data) < serialization.HEADER_STRUCT.size:
raise serialization.DeserializationException(
'Available data too short to contain header.')
(size, nb_elements) = serialization.HEADER_STRUCT.unpack_from(
sub_context.data)
if len(sub_context.data) < size or size < serialization.HEADER_STRUCT.size:
raise serialization.DeserializationException('Header size is incorrect.')
sub_context.ClaimMemory(0, size)
return self.DeserializePointer(size, nb_elements, sub_context)
def SerializePointer(self, value, data_offset, data, handle_offset):
"""Serialize the not null value."""
raise NotImplementedError()
def DeserializePointer(self, size, nb_elements, context):
raise NotImplementedError()
class StringType(PointerType):
"""
Type object for strings.
Strings are represented as unicode, and the conversion is done using the
default encoding if a string instance is used.
"""
def __init__(self, nullable=False):
PointerType.__init__(self, nullable)
self._array_type = NativeArrayType('B', nullable)
def Convert(self, value):
if value is None or isinstance(value, unicode):
return value
if isinstance(value, str):
return unicode(value)
raise TypeError('%r is not a string' % value)
def SerializePointer(self, value, data_offset, data, handle_offset):
string_array = array.array('b')
string_array.fromstring(value.encode('utf8'))
return self._array_type.SerializeArray(
string_array, data_offset, data, handle_offset)
def DeserializePointer(self, size, nb_elements, context):
string_array = self._array_type.DeserializeArray(size, nb_elements, context)
return unicode(string_array.tostring(), 'utf8')
class BaseHandleType(SerializableType):
"""Type object for handles."""
def __init__(self, nullable=False):
SerializableType.__init__(self, 'i')
self.nullable = nullable
def Serialize(self, value, data_offset, data, handle_offset):
handle = self.ToHandle(value)
if not handle.IsValid() and not self.nullable:
raise serialization.SerializationException(
'Trying to serialize null for non nullable type.')
if not handle.IsValid():
return (-1, [])
return (handle_offset, [handle])
def Deserialize(self, value, context):
if value == -1:
if not self.nullable:
raise serialization.DeserializationException(
'Trying to deserialize null for non nullable type.')
return self.FromHandle(mojo.system.Handle())
return self.FromHandle(context.ClaimHandle(value))
def FromHandle(self, handle):
raise NotImplementedError()
def ToHandle(self, value):
raise NotImplementedError()
class HandleType(BaseHandleType):
"""Type object for handles."""
def Convert(self, value):
if value is None:
return mojo.system.Handle()
if not isinstance(value, mojo.system.Handle):
raise TypeError('%r is not a handle' % value)
return value
def FromHandle(self, handle):
return handle
def ToHandle(self, value):
return value
class InterfaceRequestType(BaseHandleType):
"""Type object for interface requests."""
def Convert(self, value):
if value is None:
return reflection.InterfaceRequest(mojo.system.Handle())
if not isinstance(value, reflection.InterfaceRequest):
raise TypeError('%r is not an interface request' % value)
return value
def FromHandle(self, handle):
return reflection.InterfaceRequest(handle)
def ToHandle(self, value):
return value.PassMessagePipe()
class InterfaceType(BaseHandleType):
"""Type object for interfaces."""
def __init__(self, interface_getter, nullable=False):
BaseHandleType.__init__(self, nullable)
self._interface_getter = interface_getter
self._interface = None
def Convert(self, value):
if value is None or isinstance(value, self.interface):
return value
raise TypeError('%r is not an instance of ' % self.interface)
@property
def interface(self):
if not self._interface:
self._interface = self._interface_getter()
return self._interface
def FromHandle(self, handle):
if handle.IsValid():
return self.interface.manager.Proxy(handle)
return None
def ToHandle(self, value):
if not value:
return mojo.system.Handle()
if isinstance(value, reflection.InterfaceProxy):
return value.manager.PassMessagePipe()
pipe = mojo.system.MessagePipe()
self.interface.manager.Bind(value, pipe.handle0)
return pipe.handle1
class BaseArrayType(PointerType):
"""Abstract Type object for arrays."""
def __init__(self, nullable=False, length=0):
PointerType.__init__(self, nullable)
self.length = length
def SerializePointer(self, value, data_offset, data, handle_offset):
if self.length != 0 and len(value) != self.length:
raise serialization.SerializationException('Incorrect array size')
return self.SerializeArray(value, data_offset, data, handle_offset)
def SerializeArray(self, value, data_offset, data, handle_offset):
"""Serialize the not null array."""
raise NotImplementedError()
def DeserializePointer(self, size, nb_elements, context):
if self.length != 0 and nb_elements != self.length:
raise serialization.DeserializationException('Incorrect array size')
if (size <
serialization.HEADER_STRUCT.size + self.SizeForLength(nb_elements)):
raise serialization.DeserializationException('Incorrect array size')
return self.DeserializeArray(size, nb_elements, context)
def DeserializeArray(self, size, nb_elements, context):
raise NotImplementedError()
def SizeForLength(self, nb_elements):
raise NotImplementedError()
class BooleanArrayType(BaseArrayType):
def __init__(self, nullable=False, length=0):
BaseArrayType.__init__(self, nullable, length)
self._array_type = NativeArrayType('B', nullable)
def Convert(self, value):
if value is None:
return value
return [TYPE_BOOL.Convert(x) for x in value]
def SerializeArray(self, value, data_offset, data, handle_offset):
groups = [value[i:i+8] for i in range(0, len(value), 8)]
converted = array.array('B', [_ConvertBooleansToByte(x) for x in groups])
return _SerializeNativeArray(converted, data_offset, data, len(value))
def DeserializeArray(self, size, nb_elements, context):
converted = self._array_type.DeserializeArray(size, nb_elements, context)
elements = list(itertools.islice(
itertools.chain.from_iterable(
[_ConvertByteToBooleans(x, 8) for x in converted]),
0,
nb_elements))
return elements
def SizeForLength(self, nb_elements):
return (nb_elements + 7) // 8
class GenericArrayType(BaseArrayType):
"""Type object for arrays of pointers."""
def __init__(self, sub_type, nullable=False, length=0):
BaseArrayType.__init__(self, nullable, length)
assert isinstance(sub_type, SerializableType)
self.sub_type = sub_type
def Convert(self, value):
if value is None:
return value
return [self.sub_type.Convert(x) for x in value]
def SerializeArray(self, value, data_offset, data, handle_offset):
size = (serialization.HEADER_STRUCT.size +
self.sub_type.GetByteSize() * len(value))
data_end = len(data)
position = len(data) + serialization.HEADER_STRUCT.size
data.extend(bytearray(size +
serialization.NeededPaddingForAlignment(size)))
returned_handles = []
to_pack = []
for item in value:
(new_data, new_handles) = self.sub_type.Serialize(
item,
len(data) - position,
data,
handle_offset + len(returned_handles))
to_pack.append(new_data)
returned_handles.extend(new_handles)
position = position + self.sub_type.GetByteSize()
serialization.HEADER_STRUCT.pack_into(data, data_end, size, len(value))
struct.pack_into('%d%s' % (len(value), self.sub_type.GetTypeCode()),
data,
data_end + serialization.HEADER_STRUCT.size,
*to_pack)
return (data_offset, returned_handles)
def DeserializeArray(self, size, nb_elements, context):
values = struct.unpack_from(
'%d%s' % (nb_elements, self.sub_type.GetTypeCode()),
buffer(context.data, serialization.HEADER_STRUCT.size))
result = []
sub_context = context.GetSubContext(serialization.HEADER_STRUCT.size)
for value in values:
result.append(self.sub_type.Deserialize(
value,
sub_context))
sub_context = sub_context.GetSubContext(self.sub_type.GetByteSize())
return result
def SizeForLength(self, nb_elements):
return nb_elements * self.sub_type.GetByteSize();
class NativeArrayType(BaseArrayType):
"""Type object for arrays of native types."""
def __init__(self, typecode, nullable=False, length=0):
BaseArrayType.__init__(self, nullable, length)
self.array_typecode = typecode
self.element_size = struct.calcsize('<%s' % self.array_typecode)
def Convert(self, value):
if value is None:
return value
if (isinstance(value, array.array) and
value.array_typecode == self.array_typecode):
return value
return array.array(self.array_typecode, value)
def SerializeArray(self, value, data_offset, data, handle_offset):
return _SerializeNativeArray(value, data_offset, data, len(value))
def DeserializeArray(self, size, nb_elements, context):
result = array.array(self.array_typecode)
result.fromstring(buffer(context.data,
serialization.HEADER_STRUCT.size,
size - serialization.HEADER_STRUCT.size))
return result
def SizeForLength(self, nb_elements):
return nb_elements * self.element_size
class StructType(PointerType):
"""Type object for structs."""
def __init__(self, struct_type_getter, nullable=False):
PointerType.__init__(self)
self._struct_type_getter = struct_type_getter
self._struct_type = None
self.nullable = nullable
@property
def struct_type(self):
if not self._struct_type:
self._struct_type = self._struct_type_getter()
return self._struct_type
def Convert(self, value):
if value is None or isinstance(value, self.struct_type):
return value
raise TypeError('%r is not an instance of %r' % (value, self.struct_type))
def GetDefaultValue(self, value):
if value:
return self.struct_type()
return None
def SerializePointer(self, value, data_offset, data, handle_offset):
(new_data, new_handles) = value.Serialize(handle_offset)
data.extend(new_data)
return (data_offset, new_handles)
def DeserializePointer(self, size, nb_elements, context):
return self.struct_type.Deserialize(context)
class MapType(SerializableType):
"""Type objects for maps."""
def __init__(self, key_type, value_type, nullable=False):
self._key_type = key_type
self._value_type = value_type
dictionary = {
'__metaclass__': reflection.MojoStructType,
'__module__': __name__,
'DESCRIPTOR': {
'fields': [
SingleFieldGroup('keys', MapType._GetArrayType(key_type), 0, 0),
SingleFieldGroup('values', MapType._GetArrayType(value_type), 1, 1),
],
}
}
self.struct = reflection.MojoStructType('MapStruct', (object,), dictionary)
self.struct_type = StructType(lambda: self.struct, nullable)
SerializableType.__init__(self, self.struct_type.typecode)
def Convert(self, value):
if value is None:
return value
if isinstance(value, dict):
return dict([(self._key_type.Convert(x), self._value_type.Convert(y)) for
x, y in value.iteritems()])
raise TypeError('%r is not a dictionary.')
def Serialize(self, value, data_offset, data, handle_offset):
s = None
if value:
keys, values = [], []
for key, value in value.iteritems():
keys.append(key)
values.append(value)
s = self.struct(keys=keys, values=values)
return self.struct_type.Serialize(s, data_offset, data, handle_offset)
def Deserialize(self, value, context):
s = self.struct_type.Deserialize(value, context)
if s:
if len(s.keys) != len(s.values):
raise serialization.DeserializationException(
'keys and values do not have the same length.')
return dict(zip(s.keys, s.values))
return None
@staticmethod
def _GetArrayType(t):
if t == TYPE_BOOL:
return BooleanArrayType()
else:
return GenericArrayType(t)
TYPE_BOOL = BooleanType()
TYPE_INT8 = IntegerType('b')
TYPE_INT16 = IntegerType('h')
TYPE_INT32 = IntegerType('i')
TYPE_INT64 = IntegerType('q')
TYPE_UINT8 = IntegerType('B')
TYPE_UINT16 = IntegerType('H')
TYPE_UINT32 = IntegerType('I')
TYPE_UINT64 = IntegerType('Q')
TYPE_FLOAT = FloatType('f')
TYPE_DOUBLE = FloatType('d')
TYPE_STRING = StringType()
TYPE_NULLABLE_STRING = StringType(True)
TYPE_HANDLE = HandleType()
TYPE_NULLABLE_HANDLE = HandleType(True)
TYPE_INTERFACE_REQUEST = InterfaceRequestType()
TYPE_NULLABLE_INTERFACE_REQUEST = InterfaceRequestType(True)
class FieldDescriptor(object):
"""Describes a field in a generated struct."""
def __init__(self, name, field_type, index, version, default_value=None):
self.name = name
self.field_type = field_type
self.version = version
self.index = index
self._default_value = default_value
def GetDefaultValue(self):
return self.field_type.GetDefaultValue(self._default_value)
class FieldGroup(object):
"""
Describe a list of field in the generated struct that must be
serialized/deserialized together.
"""
def __init__(self, descriptors):
self.descriptors = descriptors
def GetDescriptors(self):
return self.descriptors
def GetTypeCode(self):
raise NotImplementedError()
def GetByteSize(self):
raise NotImplementedError()
def GetVersion(self):
raise NotImplementedError()
def Serialize(self, obj, data_offset, data, handle_offset):
raise NotImplementedError()
def Deserialize(self, value, context):
raise NotImplementedError()
class SingleFieldGroup(FieldGroup, FieldDescriptor):
"""A FieldGroup that contains a single FieldDescriptor."""
def __init__(self, name, field_type, index, version, default_value=None):
FieldDescriptor.__init__(
self, name, field_type, index, version, default_value)
FieldGroup.__init__(self, [self])
def GetTypeCode(self):
return self.field_type.GetTypeCode()
def GetByteSize(self):
return self.field_type.GetByteSize()
def GetVersion(self):
return self.version
def Serialize(self, obj, data_offset, data, handle_offset):
value = getattr(obj, self.name)
return self.field_type.Serialize(value, data_offset, data, handle_offset)
def Deserialize(self, value, context):
entity = self.field_type.Deserialize(value, context)
return { self.name: entity }
class BooleanGroup(FieldGroup):
"""A FieldGroup to pack booleans."""
def __init__(self, descriptors):
FieldGroup.__init__(self, descriptors)
self.version = min([descriptor.version for descriptor in descriptors])
def GetTypeCode(self):
return 'B'
def GetByteSize(self):
return 1
def GetVersion(self):
return self.version
def Serialize(self, obj, data_offset, data, handle_offset):
value = _ConvertBooleansToByte(
[getattr(obj, field.name) for field in self.GetDescriptors()])
return (value, [])
def Deserialize(self, value, context):
values = itertools.izip_longest([x.name for x in self.descriptors],
_ConvertByteToBooleans(value),
fillvalue=False)
return dict(values)
def _SerializeNativeArray(value, data_offset, data, length):
data_size = len(data)
data.extend(bytearray(serialization.HEADER_STRUCT.size))
data.extend(buffer(value))
data_length = len(data) - data_size
data.extend(bytearray(serialization.NeededPaddingForAlignment(data_length)))
serialization.HEADER_STRUCT.pack_into(data, data_size, data_length, length)
return (data_offset, [])
def _ConvertBooleansToByte(booleans):
"""Pack a list of booleans into an integer."""
return reduce(lambda x, y: x * 2 + y, reversed(booleans), 0)
def _ConvertByteToBooleans(value, min_size=0):
"""Unpack an integer into a list of booleans."""
res = []
while value:
res.append(bool(value&1))
value = value / 2
res.extend([False] * (min_size - len(res)))
return res
|
|
import re
from jsonschema import _utils
from jsonschema.exceptions import FormatError, ValidationError
from jsonschema.compat import iteritems
def patternProperties(validator, patternProperties, instance, schema):
if not validator.is_type(instance, "object"):
return
for pattern, subschema in iteritems(patternProperties):
for k, v in iteritems(instance):
if re.search(pattern, k):
for error in validator.descend(
v, subschema, path=k, schema_path=pattern,
):
yield error
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
extras = set(_utils.find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
for error in validator.descend(instance[extra], aP, path=extra):
yield error
elif not aP and extras:
error = "Additional properties are not allowed (%s %s unexpected)"
yield ValidationError(error % _utils.extras_msg(extras))
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "object"):
for index, item in enumerate(instance):
for error in validator.descend(item, items, path=index):
yield error
else:
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index,
):
yield error
def additionalItems(validator, aI, instance, schema):
if (
not validator.is_type(instance, "array") or
validator.is_type(schema.get("items", {}), "object")
):
return
len_items = len(schema.get("items", []))
if validator.is_type(aI, "object"):
for index, item in enumerate(instance[len_items:], start=len_items):
for error in validator.descend(item, aI, path=index):
yield error
elif not aI and len(instance) > len(schema.get("items", [])):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error %
_utils.extras_msg(instance[len(schema.get("items", [])):])
)
def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMinimum", False):
failed = instance <= minimum
cmp = "less than or equal to"
else:
failed = instance < minimum
cmp = "less than"
if failed:
yield ValidationError(
"%r is %s the minimum of %r" % (instance, cmp, minimum)
)
def maximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if schema.get("exclusiveMaximum", False):
failed = instance >= maximum
cmp = "greater than or equal to"
else:
failed = instance > maximum
cmp = "greater than"
if failed:
yield ValidationError(
"%r is %s the maximum of %r" % (instance, cmp, maximum)
)
def multipleOf(validator, dB, instance, schema):
if not validator.is_type(instance, "number"):
return
if isinstance(dB, float):
quotient = instance / dB
failed = int(quotient) != quotient
else:
failed = instance % dB
if failed:
yield ValidationError("%r is not a multiple of %r" % (instance, dB))
def minItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) < mI:
yield ValidationError("%r is too short" % (instance,))
def maxItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) > mI:
yield ValidationError("%r is too long" % (instance,))
def uniqueItems(validator, uI, instance, schema):
if (
uI and
validator.is_type(instance, "array") and
not _utils.uniq(instance)
):
yield ValidationError("%r has non-unique elements" % instance)
def pattern(validator, patrn, instance, schema):
if (
validator.is_type(instance, "string") and
not re.search(patrn, instance)
):
yield ValidationError("%r does not match %r" % (instance, patrn))
def format(validator, format, instance, schema):
if validator.format_checker is not None:
try:
validator.format_checker.check(instance, format)
except FormatError as error:
yield ValidationError(error.message, cause=error.cause)
def minLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) < mL:
yield ValidationError("%r is too short" % (instance,))
def maxLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) > mL:
yield ValidationError("%r is too long" % (instance,))
def dependencies(validator, dependencies, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, dependency in iteritems(dependencies):
if property not in instance:
continue
if validator.is_type(dependency, "object"):
for error in validator.descend(
instance, dependency, schema_path=property,
):
yield error
else:
dependencies = _utils.ensure_list(dependency)
for dependency in dependencies:
if dependency not in instance:
yield ValidationError(
"%r is a dependency of %r" % (dependency, property)
)
def enum(validator, enums, instance, schema):
if instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
def ref(validator, ref, instance, schema):
resolve = getattr(validator.resolver, "resolve", None)
if resolve is None:
with validator.resolver.resolving(ref) as resolved:
for error in validator.descend(instance, resolved):
yield error
else:
scope, resolved = validator.resolver.resolve(ref)
validator.resolver.push_scope(scope)
try:
for error in validator.descend(instance, resolved):
yield error
finally:
validator.resolver.pop_scope()
def type_draft3(validator, types, instance, schema):
types = _utils.ensure_list(types)
all_errors = []
for index, type in enumerate(types):
if type == "any":
return
if validator.is_type(type, "object"):
errors = list(validator.descend(instance, type, schema_path=index))
if not errors:
return
all_errors.extend(errors)
else:
if validator.is_type(instance, type):
return
else:
yield ValidationError(
_utils.types_msg(instance, types), context=all_errors,
)
def properties_draft3(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
elif subschema.get("required", False):
error = ValidationError("%r is a required property" % property)
error._set(
validator="required",
validator_value=subschema["required"],
instance=instance,
schema=schema,
)
error.path.appendleft(property)
error.schema_path.extend([property, "required"])
yield error
def disallow_draft3(validator, disallow, instance, schema):
for disallowed in _utils.ensure_list(disallow):
if validator.is_valid(instance, {"type" : [disallowed]}):
yield ValidationError(
"%r is disallowed for %r" % (disallowed, instance)
)
def extends_draft3(validator, extends, instance, schema):
if validator.is_type(extends, "object"):
for error in validator.descend(instance, extends):
yield error
return
for index, subschema in enumerate(extends):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def type_draft4(validator, types, instance, schema):
types = _utils.ensure_list(types)
if not any(validator.is_type(instance, type) for type in types):
yield ValidationError(_utils.types_msg(instance, types))
def properties_draft4(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
def required_draft4(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in required:
if property not in instance:
yield ValidationError("%r is a required property" % property)
def minProperties_draft4(validator, mP, instance, schema):
if validator.is_type(instance, "object") and len(instance) < mP:
yield ValidationError(
"%r does not have enough properties" % (instance,)
)
def maxProperties_draft4(validator, mP, instance, schema):
if not validator.is_type(instance, "object"):
return
if validator.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r has too many properties" % (instance,))
def allOf_draft4(validator, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def oneOf_draft4(validator, oneOf, instance, schema):
subschemas = enumerate(oneOf)
all_errors = []
for index, subschema in subschemas:
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
first_valid = subschema
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
if more_valid:
more_valid.append(first_valid)
reprs = ", ".join(repr(schema) for schema in more_valid)
yield ValidationError(
"%r is valid under each of %s" % (instance, reprs)
)
def anyOf_draft4(validator, anyOf, instance, schema):
all_errors = []
for index, subschema in enumerate(anyOf):
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
def not_draft4(validator, not_schema, instance, schema):
if validator.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
|
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.location import Location # noqa: F401,E501
from swagger_client.models.program import Program # noqa: F401,E501
from swagger_client.models.session_type import SessionType # noqa: F401,E501
from swagger_client.models.staff import Staff # noqa: F401,E501
class Availability(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'staff': 'Staff',
'session_type': 'SessionType',
'programs': 'list[Program]',
'start_date_time': 'datetime',
'end_date_time': 'datetime',
'bookable_end_date_time': 'datetime',
'location': 'Location'
}
attribute_map = {
'id': 'Id',
'staff': 'Staff',
'session_type': 'SessionType',
'programs': 'Programs',
'start_date_time': 'StartDateTime',
'end_date_time': 'EndDateTime',
'bookable_end_date_time': 'BookableEndDateTime',
'location': 'Location'
}
def __init__(self, id=None, staff=None, session_type=None, programs=None, start_date_time=None, end_date_time=None, bookable_end_date_time=None, location=None): # noqa: E501
"""Availability - a model defined in Swagger""" # noqa: E501
self._id = None
self._staff = None
self._session_type = None
self._programs = None
self._start_date_time = None
self._end_date_time = None
self._bookable_end_date_time = None
self._location = None
self.discriminator = None
if id is not None:
self.id = id
if staff is not None:
self.staff = staff
if session_type is not None:
self.session_type = session_type
if programs is not None:
self.programs = programs
if start_date_time is not None:
self.start_date_time = start_date_time
if end_date_time is not None:
self.end_date_time = end_date_time
if bookable_end_date_time is not None:
self.bookable_end_date_time = bookable_end_date_time
if location is not None:
self.location = location
@property
def id(self):
"""Gets the id of this Availability. # noqa: E501
The ID of the availability. # noqa: E501
:return: The id of this Availability. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Availability.
The ID of the availability. # noqa: E501
:param id: The id of this Availability. # noqa: E501
:type: int
"""
self._id = id
@property
def staff(self):
"""Gets the staff of this Availability. # noqa: E501
Contains information about staff members. # noqa: E501
:return: The staff of this Availability. # noqa: E501
:rtype: Staff
"""
return self._staff
@staff.setter
def staff(self, staff):
"""Sets the staff of this Availability.
Contains information about staff members. # noqa: E501
:param staff: The staff of this Availability. # noqa: E501
:type: Staff
"""
self._staff = staff
@property
def session_type(self):
"""Gets the session_type of this Availability. # noqa: E501
Contains information about the types of sessions. # noqa: E501
:return: The session_type of this Availability. # noqa: E501
:rtype: SessionType
"""
return self._session_type
@session_type.setter
def session_type(self, session_type):
"""Sets the session_type of this Availability.
Contains information about the types of sessions. # noqa: E501
:param session_type: The session_type of this Availability. # noqa: E501
:type: SessionType
"""
self._session_type = session_type
@property
def programs(self):
"""Gets the programs of this Availability. # noqa: E501
Contains information about the programs. # noqa: E501
:return: The programs of this Availability. # noqa: E501
:rtype: list[Program]
"""
return self._programs
@programs.setter
def programs(self, programs):
"""Sets the programs of this Availability.
Contains information about the programs. # noqa: E501
:param programs: The programs of this Availability. # noqa: E501
:type: list[Program]
"""
self._programs = programs
@property
def start_date_time(self):
"""Gets the start_date_time of this Availability. # noqa: E501
The date and time the availability starts. # noqa: E501
:return: The start_date_time of this Availability. # noqa: E501
:rtype: datetime
"""
return self._start_date_time
@start_date_time.setter
def start_date_time(self, start_date_time):
"""Sets the start_date_time of this Availability.
The date and time the availability starts. # noqa: E501
:param start_date_time: The start_date_time of this Availability. # noqa: E501
:type: datetime
"""
self._start_date_time = start_date_time
@property
def end_date_time(self):
"""Gets the end_date_time of this Availability. # noqa: E501
The date and time the availability ends. # noqa: E501
:return: The end_date_time of this Availability. # noqa: E501
:rtype: datetime
"""
return self._end_date_time
@end_date_time.setter
def end_date_time(self, end_date_time):
"""Sets the end_date_time of this Availability.
The date and time the availability ends. # noqa: E501
:param end_date_time: The end_date_time of this Availability. # noqa: E501
:type: datetime
"""
self._end_date_time = end_date_time
@property
def bookable_end_date_time(self):
"""Gets the bookable_end_date_time of this Availability. # noqa: E501
The time of day that the last appointment can start. # noqa: E501
:return: The bookable_end_date_time of this Availability. # noqa: E501
:rtype: datetime
"""
return self._bookable_end_date_time
@bookable_end_date_time.setter
def bookable_end_date_time(self, bookable_end_date_time):
"""Sets the bookable_end_date_time of this Availability.
The time of day that the last appointment can start. # noqa: E501
:param bookable_end_date_time: The bookable_end_date_time of this Availability. # noqa: E501
:type: datetime
"""
self._bookable_end_date_time = bookable_end_date_time
@property
def location(self):
"""Gets the location of this Availability. # noqa: E501
Contains information about the location. # noqa: E501
:return: The location of this Availability. # noqa: E501
:rtype: Location
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this Availability.
Contains information about the location. # noqa: E501
:param location: The location of this Availability. # noqa: E501
:type: Location
"""
self._location = location
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Availability, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Availability):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
# Copyright 2010 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nonogram (Painting by numbers) in Google CP Solver.
http://en.wikipedia.org/wiki/Nonogram
'''
Nonograms or Paint by Numbers are picture logic puzzles in which cells in a
grid have to be colored or left blank according to numbers given at the
side of the grid to reveal a hidden picture. In this puzzle type, the
numbers measure how many unbroken lines of filled-in squares there are
in any given row or column. For example, a clue of '4 8 3' would mean
there are sets of four, eight, and three filled squares, in that order,
with at least one blank square between successive groups.
'''
See problem 12 at http://www.csplib.org/.
http://www.puzzlemuseum.com/nonogram.htm
Haskell solution:
http://twan.home.fmf.nl/blog/haskell/Nonograms.details
Brunetti, Sara & Daurat, Alain (2003)
'An algorithm reconstructing convex lattice sets'
http://geodisi.u-strasbg.fr/~daurat/papiers/tomoqconv.pdf
The Comet model (http://www.hakank.org/comet/nonogram_regular.co)
was a major influence when writing this Google CP solver model.
I have also blogged about the development of a Nonogram solver in Comet
using the regular constraint.
* 'Comet: Nonogram improved: solving problem P200 from 1:30 minutes
to about 1 second'
http://www.hakank.org/constraint_programming_blog/2009/03/comet_nonogram_improved_solvin_1.html
* 'Comet: regular constraint, a much faster Nonogram with the regular
constraint,
some OPL models, and more'
http://www.hakank.org/constraint_programming_blog/2009/02/comet_regular_constraint_a_muc_1.html
Compare with the other models:
* Gecode/R: http://www.hakank.org/gecode_r/nonogram.rb (using 'regexps')
* MiniZinc: http://www.hakank.org/minizinc/nonogram_regular.mzn
* MiniZinc: http://www.hakank.org/minizinc/nonogram_create_automaton.mzn
* MiniZinc: http://www.hakank.org/minizinc/nonogram_create_automaton2.mzn
Note: nonogram_create_automaton2.mzn is the preferred model
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
#
# Global constraint regular
#
# This is a translation of MiniZinc's regular constraint (defined in
# lib/zinc/globals.mzn), via the Comet code refered above.
# All comments are from the MiniZinc code.
# '''
# The sequence of values in array 'x' (which must all be in the range 1..S)
# is accepted by the DFA of 'Q' states with input 1..S and transition
# function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
# (which must be in 1..Q) and accepting states 'F' (which all must be in
# 1..Q). We reserve state 0 to be an always failing state.
# '''
#
# x : IntVar array
# Q : number of states
# S : input_max
# d : transition matrix
# q0: initial state
# F : accepting states
def regular(x, Q, S, d, q0, F):
solver = x[0].solver()
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
# int d2[0..Q, 1..S]
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
row.append(0)
else:
row.append(d[i - 1][j])
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
# x[i]. If a[n] is in F, then we succeed (ie. accept the
# string).
x_range = list(range(0, len(x)))
m = 0
n = len(x)
a = [solver.IntVar(0, Q + 1, 'a[%i]' % i) for i in range(m, n + 1)]
# Check that the final state is in F
solver.Add(solver.MemberCt(a[-1], F))
# First state is q0
solver.Add(a[m] == q0)
for i in x_range:
solver.Add(x[i] >= 1)
solver.Add(x[i] <= S)
# Determine a[i+1]: a[i+1] == d2[a[i], x[i]]
solver.Add(
a[i + 1] == solver.Element(d2_flatten, ((a[i]) * S) + (x[i] - 1)))
#
# Make a transition (automaton) matrix from a
# single pattern, e.g. [3,2,1]
#
def make_transition_matrix(pattern):
p_len = len(pattern)
num_states = p_len + sum(pattern)
# this is for handling 0-clues. It generates
# just the state 1,2
if num_states == 0:
num_states = 1
t_matrix = []
for i in range(num_states):
row = []
for j in range(2):
row.append(0)
t_matrix.append(row)
# convert pattern to a 0/1 pattern for easy handling of
# the states
tmp = [0 for i in range(num_states)]
c = 0
tmp[c] = 0
for i in range(p_len):
for j in range(pattern[i]):
c += 1
tmp[c] = 1
if c < num_states - 1:
c += 1
tmp[c] = 0
t_matrix[num_states - 1][0] = num_states
t_matrix[num_states - 1][1] = 0
for i in range(num_states):
if tmp[i] == 0:
t_matrix[i][0] = i + 1
t_matrix[i][1] = i + 2
else:
if i < num_states - 1:
if tmp[i + 1] == 1:
t_matrix[i][0] = 0
t_matrix[i][1] = i + 2
else:
t_matrix[i][0] = i + 2
t_matrix[i][1] = 0
# print 'The states:'
# for i in range(num_states):
# for j in range(2):
# print t_matrix[i][j],
# print
# print
return t_matrix
#
# check each rule by creating an automaton
# and regular
#
def check_rule(rules, y):
solver = y[0].solver()
r_len = sum([1 for i in range(len(rules)) if rules[i] > 0])
rules_tmp = []
for i in range(len(rules)):
if rules[i] > 0:
rules_tmp.append(rules[i])
transition_fn = make_transition_matrix(rules_tmp)
n_states = len(transition_fn)
input_max = 2
# Note: we cannot use 0 since it's the failing state
initial_state = 1
accepting_states = [n_states] # This is the last state
regular(y, n_states, input_max, transition_fn, initial_state,
accepting_states)
def main(rows, row_rule_len, row_rules, cols, col_rule_len, col_rules):
# Create the solver.
solver = pywrapcp.Solver('Regular test')
#
# data
#
#
# variables
#
board = {}
for i in range(rows):
for j in range(cols):
board[i, j] = solver.IntVar(1, 2, 'board[%i,%i]' % (i, j))
board_flat = [board[i, j] for i in range(rows) for j in range(cols)]
# Flattened board for labeling.
# This labeling was inspired by a suggestion from
# Pascal Van Hentenryck about my Comet nonogram model.
board_label = []
if rows * row_rule_len < cols * col_rule_len:
for i in range(rows):
for j in range(cols):
board_label.append(board[i, j])
else:
for j in range(cols):
for i in range(rows):
board_label.append(board[i, j])
#
# constraints
#
for i in range(rows):
check_rule([row_rules[i][j] for j in range(row_rule_len)],
[board[i, j] for j in range(cols)])
for j in range(cols):
check_rule([col_rules[j][k] for k in range(col_rule_len)],
[board[i, j] for i in range(rows)])
#
# solution and search
#
db = solver.Phase(board_label, solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print()
num_solutions += 1
for i in range(rows):
row = [board[i, j].Value() - 1 for j in range(cols)]
row_pres = []
for j in row:
if j == 1:
row_pres.append('#')
else:
row_pres.append(' ')
print(' ', ''.join(row_pres))
print()
print(' ', '-' * cols)
if num_solutions >= 2:
print('2 solutions is enough...')
break
solver.EndSearch()
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
#
# Default problem
#
# From http://twan.home.fmf.nl/blog/haskell/Nonograms.details
# The lambda picture
#
rows = 12
row_rule_len = 3
row_rules = [[0, 0, 2], [0, 1, 2], [0, 1, 1], [0, 0, 2], [0, 0, 1], [0, 0, 3],
[0, 0, 3], [0, 2, 2], [0, 2, 1], [2, 2, 1], [0, 2, 3], [0, 2, 2]]
cols = 10
col_rule_len = 2
col_rules = [[2, 1], [1, 3], [2, 4], [3, 4], [0, 4], [0, 3], [0, 3], [0, 3],
[0, 2], [0, 2]]
if __name__ == '__main__':
if len(sys.argv) > 1:
file = sys.argv[1]
exec(compile(open(file).read(), file, 'exec'))
main(rows, row_rule_len, row_rules, cols, col_rule_len, col_rules)
|
|
"""Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
Example of invocation (use to test the script):
python makerelease.py --platform=msvc6,msvc71,msvc80,msvc90,mingw -ublep 0.6.0 0.7.0-dev
When testing this script:
python makerelease.py --force --retag --platform=msvc6,msvc71,msvc80,mingw -ublep test-0.6.0 test-0.6.1-dev
Example of invocation when doing a release:
python makerelease.py 0.5.0 0.6.0-dev
Note: This was for Subversion. Now that we are in GitHub, we do not
need to build versioned tarballs anymore, so makerelease.py is defunct.
"""
import os.path
import subprocess
import sys
import doxybuild
import subprocess
import xml.etree.ElementTree as ElementTree
import shutil
import urllib2
import tempfile
import os
import time
from devtools import antglob, fixeol, tarball
import amalgamate
SVN_ROOT = 'https://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/'
SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
def set_version( version ):
with open('version','wb') as f:
f.write( version.strip() )
def rmdir_if_exist( dir_path ):
if os.path.isdir( dir_path ):
shutil.rmtree( dir_path )
class SVNError(Exception):
pass
def svn_command( command, *args ):
cmd = ['svn', '--non-interactive', command] + list(args)
print 'Running:', ' '.join( cmd )
process = subprocess.Popen( cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
error = SVNError( 'SVN command failed:\n' + stdout )
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
stdout = svn_command( 'status', '--xml' )
etree = ElementTree.fromstring( stdout )
msg = []
for entry in etree.getiterator( 'entry' ):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
if msg:
msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
return '\n'.join( msg )
def svn_join_url( base_url, suffix ):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
def svn_check_if_tag_exist( tag_url ):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
list_stdout = svn_command( 'list', tag_url )
except SVNError, e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
# otherwise ignore error, meaning tag does not exist
return False
return True
def svn_commit( message ):
"""Commit the sandbox, providing the specified comment.
"""
svn_command( 'ci', '-m', message )
def svn_tag_sandbox( tag_url, message ):
"""Makes a tag based on the sandbox revisions.
"""
svn_command( 'copy', '-m', message, '.', tag_url )
def svn_remove_tag( tag_url, message ):
"""Removes an existing tag.
"""
svn_command( 'delete', '-m', message, tag_url )
def svn_export( tag_url, export_dir ):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
rmdir_if_exist( export_dir )
svn_command( 'export', tag_url, export_dir )
def fix_sources_eol( dist_dir ):
"""Set file EOL for tarball distribution.
"""
print 'Preparing exported source file EOL for distribution...'
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob( dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs )
unix_sources = antglob.glob( dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs )
for path in win_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
for path in unix_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
def download( url, target_path ):
"""Download file represented by url to target_path.
"""
f = urllib2.urlopen( url )
try:
data = f.read()
finally:
f.close()
fout = open( target_path, 'wb' )
try:
fout.write( data )
finally:
fout.close()
def check_compile( distcheck_top_dir, platform ):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
print 'Running:', ' '.join( cmd )
log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
flog = open( log_path, 'wb' )
try:
process = subprocess.Popen( cmd,
stdout=flog,
stderr=subprocess.STDOUT,
cwd=distcheck_top_dir )
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
def write_tempfile( content, **kwargs ):
fd, path = tempfile.mkstemp( **kwargs )
f = os.fdopen( fd, 'wt' )
try:
f.write( content )
finally:
f.close()
return path
class SFTPError(Exception):
pass
def run_sftp_batch( userhost, sftp, batch, retry=0 ):
path = write_tempfile( batch, suffix='.sftp', text=True )
# psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in xrange(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
print heading, ' '.join( cmd )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode != 0:
error = SFTPError( 'SFTP batch failed:\n' + stdout )
else:
break
if error:
raise error
return stdout
def sourceforge_web_synchro( sourceforge_project, doc_dir,
user=None, sftp='sftp' ):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
stdout = run_sftp_batch( userhost, sftp, """
cd htdocs
dir
exit
""" )
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
line = line.strip()
if not collect and line.endswith('> dir'):
collect = True
elif collect and line.endswith('> exit'):
break
elif collect == 1:
collect = 2
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
existing_paths.add( path[0] )
upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print 'Removing the following file from web:'
print '\n'.join( paths_to_remove )
stdout = run_sftp_batch( userhost, sftp, """cd htdocs
rm %s
exit""" % ' '.join(paths_to_remove) )
print 'Uploading %d files:' % len(upload_paths)
batch_size = 10
upload_paths = list(upload_paths)
start_time = time.time()
for index in xrange(0,len(upload_paths),batch_size):
paths = upload_paths[index:index+batch_size]
file_per_sec = (time.time() - start_time) / (index+1)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print '%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec)
run_sftp_batch( userhost, sftp, """cd htdocs
lcd %s
mput %s
exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
run_sftp_batch( userhost, sftp, """
mput %s
exit
""" % (' '.join(paths),) )
def main():
usage = """%prog release_version next_dev_version
Update 'version' file to release_version and commit.
Generates the document tarball.
Tags the sandbox revision with release_version.
Update 'version' file to next_dev_version and commit.
Performs an svn export of tag release version, and build a source tarball.
Must be started in the project top directory.
Warning: --force should only be used when developping/testing the release script.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--dot', dest="dot_path", action='store', default=doxybuild.find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=doxybuild.find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--force', dest="ignore_pending_commit", action='store_true', default=False,
help="""Ignore pending commit. [Default: %default]""")
parser.add_option('--retag', dest="retag_release", action='store_true', default=False,
help="""Overwrite release existing tag if it exist. [Default: %default]""")
parser.add_option('-p', '--platforms', dest="platforms", action='store', default='',
help="""Comma separated list of platform passed to scons for build check.""")
parser.add_option('--no-test', dest="no_test", action='store_true', default=False,
help="""Skips build check.""")
parser.add_option('--no-web', dest="no_web", action='store_true', default=False,
help="""Do not update web site.""")
parser.add_option('-u', '--upload-user', dest="user", action='store',
help="""Sourceforge user for SFTP documentation upload.""")
parser.add_option('--sftp', dest='sftp', action='store', default=doxybuild.find_program('psftp', 'sftp'),
help="""Path of the SFTP compatible binary used to upload the documentation.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 2:
parser.error( 'release_version missing on command-line.' )
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
parser.error( 'You must specify either --platform or --no-test option.' )
if options.ignore_pending_commit:
msg = ''
else:
msg = check_no_pending_commit()
if not msg:
print 'Setting version to', release_version
set_version( release_version )
svn_commit( 'Release ' + release_version )
tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
if svn_check_if_tag_exist( tag_url ):
if options.retag_release:
svn_remove_tag( tag_url, 'Overwriting previous tag' )
else:
print 'Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url
sys.exit( 1 )
svn_tag_sandbox( tag_url, 'Release ' + release_version )
print 'Generated doxygen document...'
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
doc_distcheck_dir = 'dist/doccheck'
tarball.decompress( doc_tarball_path, doc_distcheck_dir )
doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
export_dir = 'dist/export'
svn_export( tag_url, export_dir )
fix_sources_eol( export_dir )
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print 'Generating source tarball to', source_tarball_path
tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
amalgamation_tarball_path = 'dist/%s-amalgamation.tar.gz' % source_dir
print 'Generating amalgamation source tarball to', amalgamation_tarball_path
amalgamation_dir = 'dist/amalgamation'
amalgamate.amalgamate_source( export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h' )
amalgamation_source_dir = 'jsoncpp-src-amalgamation' + release_version
tarball.make_tarball( amalgamation_tarball_path, [amalgamation_dir],
amalgamation_dir, prefix_dir=amalgamation_source_dir )
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print 'Decompressing source tarball to', distcheck_dir
rmdir_if_exist( distcheck_dir )
tarball.decompress( source_tarball_path, distcheck_dir )
scons_local_path = 'dist/scons-local.tar.gz'
print 'Downloading scons-local to', scons_local_path
download( SCONS_LOCAL_URL, scons_local_path )
print 'Decompressing scons-local to', distcheck_top_dir
tarball.decompress( scons_local_path, distcheck_top_dir )
# Run compilation
print 'Compiling decompressed tarball'
all_build_status = True
for platform in options.platforms.split(','):
print 'Testing platform:', platform
build_status, log_path = check_compile( distcheck_top_dir, platform )
print 'see build log:', log_path
print build_status and '=> ok' or '=> FAILED'
all_build_status = all_build_status and build_status
if not build_status:
print 'Testing failed on at least one platform, aborting...'
svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
sys.exit(1)
if options.user:
if not options.no_web:
print 'Uploading documentation using user', options.user
sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
print 'Completed documentation upload'
print 'Uploading source and documentation tarballs for release using user', options.user
sourceforge_release_tarball( SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
user=options.user, sftp=options.sftp )
print 'Source and doc release tarballs uploaded'
else:
print 'No upload user specified. Web site and download tarbal were not uploaded.'
print 'Tarball can be found at:', doc_tarball_path
# Set next version number and commit
set_version( next_version )
svn_commit( 'Released ' + release_version )
else:
sys.stderr.write( msg + '\n' )
if __name__ == '__main__':
main()
|
|
"""
N.B. this is a v2 of the distortions_corrector started in Dec 2017 -MAR
This file contains the Distortions_corrector.
An object used to correct distortions using an interactive procedure
involving repeated measurements.
"""
from qcodes.instrument.base import Instrument
from qcodes.instrument.parameter import ManualParameter, InstrumentRefParameter
from qcodes.utils import validators as vals
import pycqed.analysis.fitting_models as fm
import pycqed.measurement.kernel_functions_ZI as kf
import numpy as np
import scipy.linalg
import scipy.interpolate as sc_intpl
from pycqed.analysis import fitting_models as fit_mods
import lmfit
import os.path
import datetime
import json
import logging
import PyQt5
from qcodes.plots.pyqtgraph import QtPlot
class Distortion_corrector(Instrument):
def __init__(self, name,
nr_plot_points: int=1000,
sampling_rate: float=2.4e9,
auto_save_plots: bool=True, **kw):
'''
Instantiates an object.
Args:
kernel_object (Instrument):
kernel object instrument that handles applying kernels to
flux pulses.
square_amp (float):
Amplitude of the square pulse that is applied. This is
needed for correct normalization of the step response.
nr_plot_points (int):
Number of points of the waveform that are plotted. Can be
changed in self.cfg_nr_plot_points().
'''
super().__init__(name, **kw)
# Initialize instance variables
# Plotting
self._y_min = 0
self._y_max = 1
self._stop_idx = -1
self._start_idx = 0
self._t_start_loop = 0 # sets x range for plotting during loop
self._t_stop_loop = 30e-6
self.add_parameter('cfg_nr_plot_points',
initial_value=nr_plot_points,
parameter_class=ManualParameter)
self.sampling_rate = sampling_rate
self.add_parameter('cfg_sampling_rate',
initial_value=sampling_rate,
parameter_class=ManualParameter)
self.add_parameter('instr_dist_kern',
parameter_class=InstrumentRefParameter)
# Files
self.filename = ''
# where traces and plots are saved
# self.data_dir = self.kernel_object.kernel_dir()
self._iteration = 0
self.auto_save_plots = auto_save_plots
# Data
self.waveform = []
self.time_pts = []
self.new_step = []
# Fitting
self.known_fit_models = ['exponential', 'high-pass', 'spline']
self.fit_model = None
self.edge_idx = None
self.fit_res = None
self.predicted_waveform = None
# Default fit model used in the interactive loop
self._fit_model_loop = 'exponential'
self._loop_helpstring = str(
'h: Print this help.\n'
'q: Quit the loop.\n'
'm: Remeasures the trace. \n'
'p <pars>:\n'
' Print the parameters of the last fit if pars not given.\n'
' If pars are given in the form of JSON string, \n'
' e.g., {"parA": a, "parB": b} the parameters of the last\n'
' fit are updated with those provided.'
's <filename>:\n'
' Save the current plot to "filename.png".\n'
'model <name>:\n'
' Choose the fit model that is used.\n'
' Available models:\n'
' ' + str(self.known_fit_models) + '\n'
'xrange <min> <max>:\n'
' Set the x-range of the plot to (min, max). The points\n'
' outside this range are not plotted. The number of\n'
' points plotted in the given interval is fixed to\n'
' self.cfg_nr_plot_points() (default=1000).\n'
'square_amp <amp> \n'
' Set the square_amp used to normalize measured waveforms.\n'
' If amp = "?" the current square_amp is printed.')
# Make window for plots
self.vw = QtPlot(window_title=name, figsize=(600, 400))
# def load_kernel_file(self, filename):
# '''
# Loads kernel dictionary (containing kernel and metadata) from a JSON
# file. This function looks only in the directory
# self.kernel_object.kernel_dir() for the file.
# Returns a dictionary of the kernel and metadata.
# '''
# with open(os.path.join(self.kernel_object.kernel_dir(),
# filename)) as infile:
# data = json.load(infile)
# return data
# def save_kernel_file(self, kernel_dict, filename):
# '''
# Saves kernel dictionary (containing kernel and metadata) to a JSON
# file in the directory self.kernel_object.kernel_dir().
# '''
# directory = self.kernel_object.kernel_dir()
# if not os.path.exists(directory):
# os.makedirs(directory)
# with open(os.path.join(directory, filename),
# 'w') as outfile:
# json.dump(kernel_dict, outfile, indent=True, sort_keys=True)
def save_plot(self, filename):
try:
directory = self.kernel_object.kernel_dir()
if not os.path.exists(directory):
os.makedirs(directory)
# FIXME: saving disabled as it is currently broken.
# self.vw.save(os.path.join(self.kernel_object.kernel_dir(),
# filename))
except Exception as e:
logging.warning('Could not save plot.')
# def open_new_correction(self, kernel_length, AWG_sampling_rate, name):
# '''
# Opens a new correction with name 'filename', i.e. initializes the
# combined kernel to a Dirac delta and empties kernel_list of the
# kernel object associated with self.
# Args:
# kernel_length (float):
# Length of the corrections kernel in s.
# AWG_sampling_rate (float):
# Sampling rate of the AWG generating the flux pulses in Hz.
# name (string):
# Name for the new kernel. The files will be named after
# this, but with different suffixes (e.g. '_combined.json').
# '''
# self.kernel_length = int(kernel_length * AWG_sampling_rate)
# self.filename = name
# self._iteration = 0
# # Initialize kernel to Dirac delta
# init_ker = np.zeros(self.kernel_length)
# init_ker[0] = 1
# self.kernel_combined_dict = {
# 'metadata': {}, # dictionary of kernel dictionaries
# 'kernel': list(init_ker),
# 'iteration': 0
# }
# self.save_kernel_file(self.kernel_combined_dict,
# '{}_combined.json'.format(self.filename))
# # Configure kernel object
# self.kernel_object.add_kernel_to_kernel_list(
# '{}_combined.json'.format(self.filename))
# def resume_correction(self, filename):
# '''
# Loads combined kernel from the specified file and prepares for adding
# new corrections to that kernel.
# '''
# # Remove '_combined.json' from filename
# self.filename = '_'.join(filename.split('_')[:-1])
# self.kernel_combined_dict = self.load_kernel_file(filename)
# self._iteration = self.kernel_combined_dict['iteration']
# self.kernel_length = len(self.kernel_combined_dict['kernel'])
# # Configure kernel object
# self.kernel_object.kernel_list([])
# self.kernel_object.add_kernel_to_kernel_list(filename)
# def empty_kernel_list(self):
# self.kernel_object.kernel_list([])
def measure_trace(self, verbose=True):
raise NotImplementedError(
'Base class is not attached to physical instruments and does not '
'implement measurements.')
def fit_exp_model(self, start_time_fit, end_time_fit):
'''
Fits an exponential of the form
A * exp(-t/tau) + offset
to the last trace that was measured (self.waveform).
The fit model and result are saved in self.fit_model and self.fit_res,
respectively. The new predistortion kernel and information about the
fit is stored in self.new_kernel_dict.
Args:
start_time_fit (float): start of the fitted interval
end_time_fit (float): end of the fitted interval
'''
self._start_idx = np.argmin(np.abs(self.time_pts - start_time_fit))
self._stop_idx = np.argmin(np.abs(self.time_pts - end_time_fit))
# Prepare the fit model
self.fit_model = lmfit.Model(fm.gain_corr_ExpDecayFunc)
self.fit_model.set_param_hint('gc',
value=self.waveform[self._stop_idx],
vary=True)
self.fit_model.set_param_hint('amp',
value=(self.waveform[self._start_idx] -
self.waveform[self._stop_idx]),
vary=True)
self.fit_model.set_param_hint('tau',
value=end_time_fit-start_time_fit,
vary=True)
params = self.fit_model.make_params()
# Do the fit
fit_res = self.fit_model.fit(
data=self.waveform[self._start_idx:self._stop_idx],
t=self.time_pts[self._start_idx:self._stop_idx],
params=params)
self.fitted_waveform = fit_res.eval(
t=self.time_pts[self._start_idx:self._stop_idx])
# Analytic form of the predistorted square pulse (input that creates a
# square pulse at the output)
amp = fit_res.best_values['amp']
tau = fit_res.best_values['tau']
# Check if parameters are physical and print warnings if not
if tau < 0:
print('Warning: unphysical tau = {} (expect tau > 0).'
.format(tau))
# Save the results
self.fit_res = fit_res
self.predicted_waveform = kf.exponential_decay_correction(
self.waveform, tau=tau, amp=amp,
sampling_rate=self.scope_sampling_rate)
def fit_high_pass(self, start_time_fit, end_time_fit):
'''
Fits a model for a simple RC high-pass
exp(-t/tau), tau = RC
to the last trace that was measured (self.waveform).
The fit model and result are saved in self.fit_model and self.fit_res,
respectively. The new predistortion kernel and information about the
fit is stored in self.new_kernel_dict.
Args:
start_time_fit (float): start of the fitted interval
end_time_fit (float): end of the fitted interval
'''
self._start_idx = np.argmin(np.abs(self.time_pts - start_time_fit))
self._stop_idx = np.argmin(np.abs(self.time_pts - end_time_fit))
# Prepare the fit model: exponential, where only tau is varied
self.fit_model = lmfit.Model(fm.ExpDecayFunc)
self.fit_model.set_param_hint('tau',
value=end_time_fit-start_time_fit,
vary=True)
self.fit_model.set_param_hint('offset',
value=0,
vary=False)
self.fit_model.set_param_hint('amplitude',
value=1,
vary=True)
self.fit_model.set_param_hint('n', value=1, vary=False)
params = self.fit_model.make_params()
# Do the fit
fit_res = self.fit_model.fit(
data=self.waveform[self._start_idx:self._stop_idx],
t=self.time_pts[self._start_idx:self._stop_idx],
params=params)
self.fitted_waveform = fit_res.eval(
t=self.time_pts[self._start_idx:self._stop_idx])
tau = fit_res.best_values['tau']
# Check if parameters are physical and print warnings if not
if tau < 0:
print('Warning: unphysical tau = {} (expect tau > 0).'
.format(tau))
# Save the fit results and predicted correction
self.fit_res = fit_res
self.predicted_waveform = kf.bias_tee_correction(
self.waveform, tau=tau, sampling_rate=self.scope_sampling_rate)
def fit_spline(self, start_time_fit, end_time_fit, s=0.001,
weight_tau='inf'):
'''
Fit the data using a spline interpolation.
The fit model and result are saved in self.fit_model and self.fit_res,
respectively. The new predistortion kernel and information about the
fit is stored in self.new_kernel_dict.
Args:
start_time_fit (float):
Start of the fitted interval.
end_time_fit (float):
End of the fitted interval.
s (float):
Smoothing condition for the spline. See documentation on
scipy.interpolate.splrep for more information.
weight_tau (float or 'auto'):
The points are weighted by a decaying exponential with
time constant weight_tau.
If this is 'auto' the time constant is chosen to be
end_time_fit.
If this is 'inf' all weights are set to 1.
Smaller weight means the spline can have a larger
distance from this point. See documentation on
scipy.interpolate.splrep for more information.
'''
self._start_idx = np.argmin(np.abs(self.time_pts - start_time_fit))
self._stop_idx = np.argmin(np.abs(self.time_pts - end_time_fit))
if weight_tau == 'auto':
weight_tau = end_time_fit
if weight_tau == 'inf':
splWeights = np.ones(self._stop_idx - self._start_idx)
else:
splWeights = np.exp(
-self.time_pts[self._start_idx:self._stop_idx] / weight_tau)
splTuple = sc_intpl.splrep(
x=self.time_pts[self._start_idx:self._stop_idx],
y=self.waveform[self._start_idx:self._stop_idx],
w=splWeights,
s=s)
splStep = sc_intpl.splev(
self.time_pts[self._start_idx:self._stop_idx],
splTuple, ext=3)
# Pad step response with avg of last 10 points (assuming the user has
# chosen the range such that the response has become flat)
splStep = np.concatenate((splStep,
np.ones(self.kernel_length - len(splStep)) *
np.mean(splStep[-10:])))
self.fit_res = None
self.fit_model = None
self.fitted_waveform = splStep[:self._stop_idx-self._start_idx]
# Calculate the kernel and invert it.
h = np.empty_like(splStep)
h[0] = splStep[0]
h[1:] = splStep[1:] - splStep[:-1]
filterMatrix = np.zeros((len(h), len(h)))
for n in range(len(h)):
for m in range(n+1):
filterMatrix[n, m] = h[n - m]
new_ker = scipy.linalg.inv(filterMatrix)[:, 0]
self.new_step = np.convolve(new_ker,
np.ones(len(splStep)))[:len(splStep)]
self.new_kernel_dict = {
'name': self.filename + '_' + str(self._iteration),
'filter_params': {},
'fit': {
'model': 'spline',
's': s,
'weight_tau': weight_tau
},
'kernel': list(new_ker)
}
def plot_trace(self, start_time=-.5e-6, stop_time=10e-6, nr_plot_pts=4000,
save_y_range=True):
'''
Plot last trace that was measured (self.waveform).
Args:
start_time (float): Start of the plotted interval.
stop_time (float): End of the plotted interval.
save_y_range (bool):
Keep the current y-range of the plot.
'''
start_idx = np.argmin(np.abs(self.time_pts - start_time))
stop_idx = np.argmin(np.abs(self.time_pts - stop_time))
step = max(
int(len(self.time_pts[start_idx:stop_idx]) // nr_plot_pts), 1)
# Save the y-range of the plot if a window is open.
err = False
try:
x_range, y_range = self.vw.subplots[0].getViewBox().viewRange()
except Exception as e:
print(e)
err = True
plot_t_pts = self.time_pts[:len(self.waveform)]
# Plot
self.vw.clear()
self.vw.add(x=plot_t_pts[start_idx:stop_idx:step],
y=self.waveform[start_idx:stop_idx:step],
symbol='o', symbolSize=5, name='Measured waveform')
if self.predicted_waveform is not None:
start_idx = np.argmin(np.abs(self.time_pts - start_time))
stop_idx = np.argmin(np.abs(self.time_pts - stop_time))
step = max(
int(len(self.time_pts[start_idx:stop_idx]) // nr_plot_pts), 1)
self.vw.add(x=self.time_pts[start_idx:stop_idx:step],
y=self.predicted_waveform[start_idx:stop_idx:step],
name='Predicted waveform')
self.vw.add(x=[start_time, stop_time],
y=[self.waveform[stop_idx]]*2,
color=(150, 150, 150))
self.vw.add(x=[start_time, stop_time],
y=[0]*2,
color=(150, 150, 150))
self.vw.add(x=[start_time, stop_time],
y=[-self.waveform[stop_idx]]*2,
color=(150, 150, 150))
# Set the y-range to previous value
if save_y_range and not err:
self.vw.subplots[0].setYRange(y_range[0], y_range[1])
# Labels need to be set in the end, else they don't show sometimes
self.vw.subplots[0].getAxis('bottom').setLabel('t', 's')
self.vw.subplots[0].getAxis('left').setLabel('Amplitude', 'V')
def plot_fit(self, start_time=0, stop_time=10e-6, save_y_range=True,
nr_plot_pts=4000):
'''
Plot last trace that was measured (self.waveform) and the latest fit.
Args:
start_time (float): Start of the plotted interval.
stop_time (float): End of the plotted interval.
save_y_range (bool):
Keep the current y-range of the plot.
'''
self.plot_trace(start_time=start_time, stop_time=stop_time,
save_y_range=save_y_range, nr_plot_pts=nr_plot_pts)
self.vw.add(x=self.time_pts[self._start_idx:self._stop_idx],
y=self.fitted_waveform,
color = '#2ca02c',
name='Fit')
# Labels need to be set in the end, else they don't show sometimes
self.vw.subplots[0].getAxis('bottom').setLabel('t', 's')
self.vw.subplots[0].getAxis('left').setLabel('amp', 'V')
def test_new_kernel(self):
'''
Save the new kernel self.new_kernel_dict to its own file and add it to
the kernel list of the kernel object.
'''
self._iteration
dist_kern = self.instr_dist_kern.get_instr()
if self._fit_model_loop == 'high-pass':
tau = self.fit_res.best_values['tau']
model = {'model': 'high-pass', 'params': {'tau':tau}}
dist_kern.set('filter_model_{:02}'.format(self._iteration), model)
elif self._fit_model_loop == 'exponential':
tau = self.fit_res.best_values['tau']
amp = self.fit_res.best_values['amp']
model = {'model': 'exponential', 'params':{'tau':tau, 'amp':amp}}
dist_kern.set('filter_model_{:02}'.format(self._iteration), model)
else:
raise NotImplementedError
def apply_new_kernel(self):
'''
The correction number (self._iteration) is incremented, such that
the kernel file for the latest distortion is not overwritten anymore.
'''
self._iteration += 1 # This correction is considered completed.
def discard_new_kernel(self):
'''
Removes a the last kernel that was added from the distortions.
'''
dist_kern = self.instr_dist_kern.get_instr()
dist_kern.set('filter_model_{:02}'.format(self._iteration), {})
def interactive_loop(self):
'''
Starts interactive loop to iteratively add corrections.
'''
# Loop:
# 1. Measure trace and plot
# 2. Fit and plot
# 3. Test correction and plot
# -> discard: back to 2.
# -> approve: continue with 4.
# 4. Apply correction
# -> quit?
# -> back to 2.
print('********\n'
'Interactive room-temperature distortion corrections\n'
'********\n'
'At any prompts you may use these commands:\n'
+ self._loop_helpstring)
while True:
inp = input('New kernel? ([y]/n) ')
if inp in ['y', 'n', '']:
break
if inp == 'y':
print('Resetting all kernels in kernel object')
self.instr_dist_kern.get_instr().reset_kernels()
self._iteration = 0
else:
# Continue working with current kernel; determine how many filters
# already exist.
self._iteration = self.instr_dist_kern.get_instr().get_first_empty_filter()
print('Starting from iteration {}'.format(self._iteration))
# 1. Measure trace and plot
self.measure_trace()
# Set up initial plot range
self._t_start_loop = 0
self._t_stop_loop = self.time_pts[-1]
self.plot_trace(self._t_start_loop, self._t_stop_loop,
save_y_range=False, nr_plot_pts=self.cfg_nr_plot_points())
# LOOP STARTS HERE
# Default fit model used, high-pass is typically the first model
self._fit_model_loop = 'high-pass'
while True:
print('\n-- Correction number {} --'.format(self._iteration))
print('Current fit model: {}'.format(self._fit_model_loop))
# 2. Fit and plot
repeat = True
while repeat:
inp = input('Fit range: ')
repeat, quit = self._handle_interactive_input(inp, 'any')
if not quit and not repeat:
try:
inp = inp.split(' ')
fit_start = float(inp[0])
fit_stop = float(inp[1])
except Exception as e:
print('input format: "t_start t_stop"')
repeat = True
if quit:
# Exit loop
break
if self._fit_model_loop == 'exponential':
self.fit_exp_model(fit_start, fit_stop)
elif self._fit_model_loop == 'high-pass':
self.fit_high_pass(fit_start, fit_stop)
elif self._fit_model_loop == 'spline':
self.fit_spline(fit_start, fit_stop)
self.plot_fit(self._t_start_loop, self._t_stop_loop,
nr_plot_pts=self.cfg_nr_plot_points())
repeat = True
while repeat:
inp = input('Accept? ([y]/n) ').strip()
repeat, quit = self._handle_interactive_input(inp,
['y', 'n', ''])
if quit:
# Exit loop
break
elif inp != 'y' and inp != '':
# Go back to 2.
continue
# Fit was accepted -> save plot
if self.auto_save_plots:
self.save_plot('fit_{}.png'.format(self._iteration))
# 3. Test correction and plot
# Save last data, in case new distortion is rejected.
previous_t = self.time_pts
previous_wave = self.waveform
print('Testing new correction.')
self.test_new_kernel()
self.measure_trace()
self.plot_trace(self._t_start_loop, self._t_stop_loop,
nr_plot_pts=self.cfg_nr_plot_points())
repeat = True
while repeat:
inp = input('Accept? ([y]/n) ').strip()
repeat, quit = self._handle_interactive_input(inp,
['y', 'n', ''])
if quit:
# Exit loop
break
elif inp != 'y' and inp != '':
print('Discarding new correction.')
self.discard_new_kernel()
self.time_pts = previous_t
self.waveform = previous_wave
self.plot_trace(self._t_start_loop, self._t_stop_loop,
nr_plot_pts=self.cfg_nr_plot_points())
# Go back to 2.
continue
# Correction was accepted -> save plot
if self.auto_save_plots:
self.save_plot('trace_{}.png'.format(self._iteration))
# 4. Apply correction
print('Applying new correction.')
self.apply_new_kernel()
def _handle_interactive_input(self, inp, valid_inputs):
'''
Handles input from user in an interactive loop session. Takes
action in special cases.
Args:
inp (string): Input given by the user.
valid_inputs (list of strings or 'any'):
List of inputs that are accepted. Any input is
accepted if this is 'any'.
Returns:
repeat (bool): Should the input prompt be repeated.
quit (bool): Should the loop be exited.
'''
repeat = True
quit = False
inp_elements = inp.split(' ')
if (inp_elements[0].lower() == 'xrange'
and len(inp_elements) == 3):
self._t_start_loop = float(inp_elements[1])
self._t_stop_loop = float(inp_elements[2])
if len(self.vw.traces) == 4: # 3 grey lines + 1 data trace
# Only data plotted
self.plot_trace(self._t_start_loop, self._t_stop_loop,
nr_plot_pts=self.cfg_nr_plot_points())
else:
# Fit also plotted
self.plot_fit(self._t_start_loop, self._t_stop_loop,
nr_plot_pts=self.cfg_nr_plot_points())
elif inp_elements[0] == 'm':
# Remeasures the trace
print('Remeasuring trace')
self.measure_trace()
self.plot_trace(self._t_start_loop, self._t_stop_loop,
save_y_range=False, nr_plot_pts=self.cfg_nr_plot_points())
elif inp_elements[0] == 'h':
print(self._loop_helpstring)
elif inp_elements[0] == 'q':
self.print_summary()
quit = True
repeat = False
elif inp_elements[0] == 'p':
if len(inp_elements) == 1:
try:
# for param, val in self.new_kernel_dict['fit'].items():
# print('{} = {}'.format(param, val))
print(self.fit_res.best_values)
except KeyError:
print('No fit has been done yet!')
else:
self._update_latest_params(json_string=inp[1:])
elif (inp_elements[0] == 's' and len(inp_elements == 2)):
self.save_plot('{}.png'.format(inp_elements[1]))
print('Current plot saved.')
elif (inp_elements[0] == 'model' and len(inp_elements) == 2):
if inp_elements[1] in self.known_fit_models:
self._fit_model_loop = str(inp_elements[1])
print('Using fit model "{}".'.format(self._fit_model_loop))
else:
print('Model "{}" unknown. Please choose from {}.'
.format(inp_elements[1], self.known_fit_models))
elif valid_inputs != 'any':
if inp not in valid_inputs:
print('Valid inputs: {}'.format(valid_inputs))
else:
repeat = False
else:
# Any input ok
repeat = False
return repeat, quit
def _update_latest_params(self, json_string):
"""
Uses a JSON formatted string to update the parameters of the
latest fit.
For each model does the following
1. update the 'fit' dict
4. calculate the new "fit"
5. Plot the new "fit"
Currently only supported for the high-pass and exponential model.
"""
try:
par_dict = json.loads(json_string)
except Exception as e:
print(e)
return
# 1. update the 'fit' dict
self.fit_res.best_values.update(par_dict)
self.fitted_waveform = self.fit_res.eval(
t=self.time_pts[self._start_idx:self._stop_idx],
tau=self.fit_res.best_values['tau'])
if self._fit_model_loop == 'high-pass':
self.predicted_waveform = kf.bias_tee_correction(
self.waveform, tau=self.fit_res.best_values['tau'],
sampling_rate=self.scope_sampling_rate)
elif self._fit_model_loop == 'exponential':
self.predicted_waveform = kf.exponential_decay_correction(
self.waveform, tau=self.fit_res.best_values['tau'],
amp=self.fit_res.best_values['amp'],
sampling_rate=self.scope_sampling_rate)
# The fit results still have to be updated
self.plot_fit(self._t_start_loop, self._t_stop_loop,
nr_plot_pts=self.cfg_nr_plot_points())
def print_summary(self):
'''
Prints a summary of all corrections that have been applied.
'''
self.instr_dist_kern.get_instr().print_overview()
def _set_square_amp(self, square_amp: float):
old_square_amp = self.square_amp
self.square_amp = square_amp
if len(self.waveform) > 0:
self.waveform = self.waveform*old_square_amp/self.square_amp
self.plot_trace(self._t_start_loop, self._t_stop_loop,
nr_plot_pts=self.cfg_nr_plot_points())
print('Updated square amp from {} to {}'.format(old_square_amp,
square_amp))
class Dummy_distortion_corrector(Distortion_corrector):
def measure_trace(self, verbose=True):
sampling_rate = 5e9
# Generate some dummy square wave
self.raw_waveform = np.concatenate([np.zeros(100), np.ones(50000),
np.zeros(1000)])
noise = np.random.rand(len(self.raw_waveform)) * 0.02
self.raw_waveform += noise
self.raw_waveform = np.convolve(
self.raw_waveform, self.kernel_object.get_decay_kernel_1())
self.raw_time_pts = np.arange(len(self.raw_waveform))/sampling_rate
# Normalize waveform and find rising edge
self.waveform = self.detect_edge_and_normalize_wf(self.raw_waveform)
self.time_pts = np.arange(len(self.waveform))/sampling_rate
class RT_distortion_corrector_AWG8(Distortion_corrector):
def __init__(self, name, measure_scope_trace,
nr_plot_points: int=1000, **kw):
'''
Instantiates an object.
Note: Sampling rate of the scope is assumed to be 5 GHz. Sampling rate
of the AWG is assumed to be 1 GHz.
Args:
flux_lutman (Instrument):
Lookup table manager for the AWG.
oscilloscope (Instrument):
Oscilloscope instrument.
nr_plot_points (int):
Number of points of the waveform that are plotted. Can be
changed in self.cfg_nr_plot_points().
'''
super().__init__(name, sampling_rate=2.4e9,
nr_plot_points=nr_plot_points, **kw)
self.add_parameter('instr_flux_lutman',
parameter_class=InstrumentRefParameter)
self.measure_scope_trace = measure_scope_trace
self.raw_waveform = []
self.raw_time_pts = []
def measure_trace(self, verbose=True):
'''
Measure a trace with the oscilloscope.
Raw data is saved to self.raw_time_pts and self.raw_waveform.
Data clipped to start at the rising edge is saved to self.time_pts
and self.waveform.
N.B. This measure trace method makes two assumptions
1. The scope is properly configured.
2. The CCLight is running the correct program that triggers the
AWG8.
'''
# Upload waveform
self.instr_flux_lutman.get_instr().load_waveform_onto_AWG_lookuptable(
'square', regenerate_waveforms=True)
if verbose:
print('Measuring trace...')
self.raw_time_pts, self.waveform = self.measure_scope_trace()
# Find rising edge
if self.edge_idx == None:
# this is because finding the edge is usally most robust in the
# beginning
self.edge_idx = detect_edge(self.waveform, edge_level=0.02)
self.time_pts = self.raw_time_pts - self.raw_time_pts[self.edge_idx]
self.scope_sampling_rate = 1/(self.time_pts[1]-self.time_pts[0])
class RT_distortion_corrector_QWG(Distortion_corrector):
def __init__(self, name, measure_scope_trace,
nr_plot_points: int=1000, **kw):
'''
Instantiates an object.
Note: Sampling rate of the scope is assumed to be 5 GHz. Sampling rate
of the AWG is assumed to be 1 GHz.
Args:
flux_lutman (Instrument):
Lookup table manager for the AWG.
oscilloscope (Instrument):
Oscilloscope instrument.
nr_plot_points (int):
Number of points of the waveform that are plotted. Can be
changed in self.cfg_nr_plot_points().
'''
super().__init__(name, sampling_rate=1e9,
nr_plot_points=nr_plot_points, **kw)
self.add_parameter('instr_flux_lutman',
parameter_class=InstrumentRefParameter)
self.measure_scope_trace = measure_scope_trace
self.raw_waveform = []
self.raw_time_pts = []
self._edge_for_trace = 0.05
def measure_trace(self, verbose=True):
'''
Measure a trace with the oscilloscope.
Raw data is saved to self.raw_time_pts and self.raw_waveform.
Data clipped to start at the rising edge is saved to self.time_pts
and self.waveform.
N.B. This measure trace method makes two assumptions
1. The scope is properly configured.
2. The CCLight is running the correct program that triggers the
AWG8.
'''
# Upload waveform
self.instr_flux_lutman.get_instr().load_waveform_onto_AWG_lookuptable(
'square', regenerate_waveforms=True)
if verbose:
print('Measuring trace...')
self.raw_time_pts, self.waveform = self.measure_scope_trace()
# Find rising edge
if self.edge_idx == None:
# this is because finding the edge is usally most robust in the
# beginning
self.edge_idx = detect_edge(self.waveform, edge_level=self._edge_for_trace)
self.time_pts = self.raw_time_pts - self.raw_time_pts[self.edge_idx]
self.scope_sampling_rate = 1/(self.time_pts[1]-self.time_pts[0])
# def detect_edge(y, edge_level=0.1):
# """
# Trivial edge detection algortihm
# """
# edge_idx = -1
# abs_edge_change = (np.max(y) - np.min(y))*edge_level
# for i in range(len(y) - 1):
# if (y[i+1] - y[i]) > abs_edge_change:
# edge_idx = i
# print('edge detected at idx:', edge_idx)
# break
# if edge_idx < 0:
# # This is an important error but should not crash the
# # process
# logging.warning('Failed to find rising edge.')
# edge_idx = 0
# return edge_idx
def detect_edge(y, edge_level=0.10):
"""
Detects the first crossing of some threshold and returns the index
"""
th = y > edge_level*np.max(y)
# marks all but the first occurence of True to False
th[1:][th[:-1] & th[1:]] = False
return np.where(th)[0][0]
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import itertools
import random
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
from sqlalchemy import sql
from neutron.common import constants
from neutron.common import utils
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_hamode_db
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
@six.add_metaclass(abc.ABCMeta)
class L3Scheduler(object):
def __init__(self):
self.min_ha_agents = cfg.CONF.min_l3_agents_per_router
self.max_ha_agents = cfg.CONF.max_l3_agents_per_router
@abc.abstractmethod
def schedule(self, plugin, context, router_id,
candidates=None, hints=None):
"""Schedule the router to an active L3 agent.
Schedule the router only if it is not already scheduled.
"""
pass
def _router_has_binding(self, context, router_id, l3_agent_id):
router_binding_model = l3_agentschedulers_db.RouterL3AgentBinding
query = context.session.query(router_binding_model)
query = query.filter(router_binding_model.router_id == router_id,
router_binding_model.l3_agent_id == l3_agent_id)
return query.count() > 0
def _filter_unscheduled_routers(self, context, plugin, routers):
"""Filter from list of routers the ones that are not scheduled."""
unscheduled_routers = []
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router['id']])
if l3_agents:
LOG.debug('Router %(router_id)s has already been '
'hosted by L3 agent %(agent_id)s',
{'router_id': router['id'],
'agent_id': l3_agents[0]['id']})
else:
unscheduled_routers.append(router)
return unscheduled_routers
def _get_unscheduled_routers(self, context, plugin):
"""Get routers with no agent binding."""
# TODO(gongysh) consider the disabled agent's router
no_agent_binding = ~sql.exists().where(
l3_db.Router.id ==
l3_agentschedulers_db.RouterL3AgentBinding.router_id)
query = context.session.query(l3_db.Router.id).filter(no_agent_binding)
unscheduled_router_ids = [router_id_[0] for router_id_ in query]
if unscheduled_router_ids:
return plugin.get_routers(
context, filters={'id': unscheduled_router_ids})
return []
def _get_routers_to_schedule(self, context, plugin,
router_ids=None, exclude_distributed=False):
"""Verify that the routers specified need to be scheduled.
:param context: the context
:param plugin: the core plugin
:param router_ids: the list of routers to be checked for scheduling
:param exclude_distributed: whether or not to consider dvr routers
:returns: the list of routers to be scheduled
"""
if router_ids is not None:
routers = plugin.get_routers(context, filters={'id': router_ids})
unscheduled_routers = self._filter_unscheduled_routers(
context, plugin, routers)
else:
unscheduled_routers = self._get_unscheduled_routers(context,
plugin)
if exclude_distributed:
unscheduled_routers = [
r for r in unscheduled_routers if not r.get('distributed')
]
return unscheduled_routers
def _get_routers_can_schedule(self, context, plugin, routers, l3_agent):
"""Get the subset of routers that can be scheduled on the L3 agent."""
ids_to_discard = set()
for router in routers:
# check if the l3 agent is compatible with the router
candidates = plugin.get_l3_agent_candidates(
context, router, [l3_agent])
if not candidates:
ids_to_discard.add(router['id'])
return [r for r in routers if r['id'] not in ids_to_discard]
def auto_schedule_routers(self, plugin, context, host, router_ids):
"""Schedule non-hosted routers to L3 Agent running on host.
If router_ids is given, each router in router_ids is scheduled
if it is not scheduled yet. Otherwise all unscheduled routers
are scheduled.
Do not schedule the routers which are hosted already
by active l3 agents.
:returns: True if routers have been successfully assigned to host
"""
l3_agent = plugin.get_enabled_agent_on_host(
context, constants.AGENT_TYPE_L3, host)
if not l3_agent:
return False
# NOTE(armando-migliaccio): DVR routers should not be auto
# scheduled because auto-scheduling may interfere with the
# placement rules for IR and SNAT namespaces.
unscheduled_routers = self._get_routers_to_schedule(
context, plugin, router_ids, exclude_distributed=True)
if not unscheduled_routers:
if utils.is_extension_supported(
plugin, constants.L3_HA_MODE_EXT_ALIAS):
return self._schedule_ha_routers_to_additional_agent(
plugin, context, l3_agent)
target_routers = self._get_routers_can_schedule(
context, plugin, unscheduled_routers, l3_agent)
if not target_routers:
LOG.warn(_LW('No routers compatible with L3 agent configuration'
' on host %s'), host)
return False
self._bind_routers(context, plugin, target_routers, l3_agent)
return True
def _get_candidates(self, plugin, context, sync_router):
"""Return L3 agents where a router could be scheduled."""
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [sync_router['id']], admin_state_up=True)
if l3_agents and not sync_router.get('distributed', False):
LOG.debug('Router %(router_id)s has already been hosted'
' by L3 agent %(agent_id)s',
{'router_id': sync_router['id'],
'agent_id': l3_agents[0]['id']})
return
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_LW('No active L3 agents'))
return
new_l3agents = plugin.get_l3_agent_candidates(context,
sync_router,
active_l3_agents)
old_l3agentset = set(l3_agents)
if sync_router.get('distributed', False):
new_l3agentset = set(new_l3agents)
candidates = list(new_l3agentset - old_l3agentset)
else:
candidates = new_l3agents
if not candidates:
LOG.warn(_LW('No L3 agents can host the router %s'),
sync_router['id'])
return candidates
def _bind_routers(self, context, plugin, routers, l3_agent):
for router in routers:
if router.get('ha'):
if not self._router_has_binding(context, router['id'],
l3_agent.id):
self._create_ha_router_binding(
plugin, context, router['id'],
router['tenant_id'], l3_agent)
else:
self.bind_router(context, router['id'], l3_agent)
def bind_router(self, context, router_id, chosen_agent):
"""Bind the router to the l3 agent which has been chosen."""
try:
with context.session.begin(subtransactions=True):
binding = l3_agentschedulers_db.RouterL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = router_id
context.session.add(binding)
except db_exc.DBDuplicateEntry:
LOG.debug('Router %(router_id)s has already been scheduled '
'to L3 agent %(agent_id)s.',
{'agent_id': chosen_agent.id,
'router_id': router_id})
return
except db_exc.DBReferenceError:
LOG.debug('Router %s has already been removed '
'by concurrent operation', router_id)
return
LOG.debug('Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s', {'router_id': router_id,
'agent_id': chosen_agent.id})
def _schedule_router(self, plugin, context, router_id,
candidates=None):
sync_router = plugin.get_router(context, router_id)
router_distributed = sync_router.get('distributed', False)
if router_distributed:
# For Distributed routers check for SNAT Binding before
# calling the schedule_snat_router
snat_bindings = plugin.get_snat_bindings(context, [router_id])
router_gw_exists = sync_router.get('external_gateway_info', False)
if not snat_bindings and router_gw_exists:
# If GW exists for DVR routers and no SNAT binding
# call the schedule_snat_router
return plugin.schedule_snat_router(
context, router_id, sync_router)
if not router_gw_exists and snat_bindings:
# If DVR router and no Gateway but SNAT Binding exists then
# call the unbind_snat_servicenode to unbind the snat service
# from agent
plugin.unbind_snat_servicenode(context, router_id)
return
candidates = candidates or self._get_candidates(
plugin, context, sync_router)
if not candidates:
return
if router_distributed:
for chosen_agent in candidates:
self.bind_router(context, router_id, chosen_agent)
elif sync_router.get('ha', False):
chosen_agents = self._bind_ha_router(plugin, context,
router_id, candidates)
if not chosen_agents:
return
chosen_agent = chosen_agents[-1]
else:
chosen_agent = self._choose_router_agent(
plugin, context, candidates)
self.bind_router(context, router_id, chosen_agent)
return chosen_agent
@abc.abstractmethod
def _choose_router_agent(self, plugin, context, candidates):
"""Choose an agent from candidates based on a specific policy."""
pass
@abc.abstractmethod
def _choose_router_agents_for_ha(self, plugin, context, candidates):
"""Choose agents from candidates based on a specific policy."""
pass
def _get_num_of_agents_for_ha(self, candidates_count):
return (min(self.max_ha_agents, candidates_count) if self.max_ha_agents
else candidates_count)
def _enough_candidates_for_ha(self, candidates):
if not candidates or len(candidates) < self.min_ha_agents:
LOG.error(_LE("Not enough candidates, a HA router needs at least "
"%s agents"), self.min_ha_agents)
return False
return True
def _create_ha_router_binding(self, plugin, context, router_id, tenant_id,
agent):
"""Creates and binds a new HA port for this agent."""
ha_network = plugin.get_ha_network(context, tenant_id)
port_binding = plugin.add_ha_port(context.elevated(), router_id,
ha_network.network.id, tenant_id)
port_binding.l3_agent_id = agent['id']
self.bind_router(context, router_id, agent)
def _schedule_ha_routers_to_additional_agent(self, plugin, context, agent):
"""Bind already scheduled routers to the agent.
Retrieve the number of agents per router and check if the router has
to be scheduled on the given agent if max_l3_agents_per_router
is not yet reached.
"""
routers_agents = plugin.get_ha_routers_l3_agents_count(context)
scheduled = False
admin_ctx = context.elevated()
for router_id, tenant_id, agents in routers_agents:
max_agents_not_reached = (
not self.max_ha_agents or agents < self.max_ha_agents)
if max_agents_not_reached:
if not self._router_has_binding(admin_ctx, router_id,
agent.id):
self._create_ha_router_binding(plugin, admin_ctx,
router_id, tenant_id,
agent)
scheduled = True
return scheduled
def _bind_ha_router_to_agents(self, plugin, context, router_id,
chosen_agents):
port_bindings = plugin.get_ha_router_port_bindings(context,
[router_id])
for port_binding, agent in itertools.izip(port_bindings,
chosen_agents):
port_binding.l3_agent_id = agent.id
self.bind_router(context, router_id, agent)
LOG.debug('HA Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s)',
{'router_id': router_id, 'agent_id': agent.id})
def _bind_ha_router(self, plugin, context, router_id, candidates):
"""Bind a HA router to agents based on a specific policy."""
if not self._enough_candidates_for_ha(candidates):
return
chosen_agents = self._choose_router_agents_for_ha(
plugin, context, candidates)
self._bind_ha_router_to_agents(plugin, context, router_id,
chosen_agents)
return chosen_agents
class ChanceScheduler(L3Scheduler):
"""Randomly allocate an L3 agent for a router."""
def schedule(self, plugin, context, router_id,
candidates=None):
return self._schedule_router(
plugin, context, router_id, candidates=candidates)
def _choose_router_agent(self, plugin, context, candidates):
return random.choice(candidates)
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
return random.sample(candidates, num_agents)
class LeastRoutersScheduler(L3Scheduler):
"""Allocate to an L3 agent with the least number of routers bound."""
def schedule(self, plugin, context, router_id,
candidates=None):
return self._schedule_router(
plugin, context, router_id, candidates=candidates)
def _choose_router_agent(self, plugin, context, candidates):
candidate_ids = [candidate['id'] for candidate in candidates]
chosen_agent = plugin.get_l3_agent_with_min_routers(
context, candidate_ids)
return chosen_agent
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
context, [candidate['id'] for candidate in candidates])
return ordered_agents[:num_agents]
|
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import platform
import unittest
from .x86translator import X86TranslationTestCase
@unittest.skipUnless(platform.machine().lower() == 'x86_64',
'Not running on an x86_64 system')
class X86TranslationSseTests(X86TranslationTestCase):
def test_lddqu(self):
# TODO: Implement.
pass
def test_movaps(self):
asm = ["movaps xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["xmm1"] = 0x87654321876543218765432187654321
res = ctx_init["xmm1"]
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_movd_1(self):
# MOVD mm, r/m32
asm = ["movd mm0, eax"]
ctx_init = self._init_context()
ctx_init["mm0"] = 0x1234567812345678
ctx_init["rax"] = 0xffffffff87654321
res = 0x0000000087654321
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["mm0"], res)
self.assertEquals(reil_ctx_out["rax"], ctx_init["rax"])
def test_movd_2(self):
# MOVD r/m32, mm
asm = ["movd eax, mm0"]
ctx_init = self._init_context()
ctx_init["mm0"] = 0x1234567812345678
ctx_init["rax"] = 0xffffffff87654321
res = 0x0000000012345678
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["rax"], res)
self.assertEquals(reil_ctx_out["mm0"], ctx_init["mm0"])
def test_movd_3(self):
# MOVD xmm, r/m32
asm = ["movd xmm0, eax"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["rax"] = 0xffffffff87654321
res = 0x00000000000000000000000087654321
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["rax"], ctx_init["rax"])
def test_movd_4(self):
# MOVD r/m32, xmm
asm = ["movd eax, xmm0"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["rax"] = 0xffffffff87654321
res = 0x0000000012345678
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["rax"], res)
self.assertEquals(reil_ctx_out["xmm0"], ctx_init["xmm0"])
def test_movdqa(self):
# TODO: Implement.
pass
def test_movdqu(self):
# TODO: Implement.
pass
def test_movhpd(self):
asm = ["movhpd xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["xmm1"] = 0x87654321876543218765432187654321
res = 0x87654321876543211234567812345678
# x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# NOTE Hack to be able to test this instr (src oprnd should be a memory
# operand instead of a xmm register).
# -------------------------------------------------------------------- #
address = 0xdeadbeef
reil_instrs = self._asm_to_reil(asm, address)
reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=address << 8, registers=ctx_init)
# -------------------------------------------------------------------- #
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_movlpd(self):
asm = ["movlpd xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["xmm1"] = 0x87654321876543218765432187654321
res = 0x12345678123456788765432187654321
# x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# NOTE Hack to be able to test this instr (src oprnd should be a memory
# operand instead of a xmm register).
# -------------------------------------------------------------------- #
address = 0xdeadbeef
reil_instrs = self._asm_to_reil(asm, address)
reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=address << 8, registers=ctx_init)
# -------------------------------------------------------------------- #
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_movq_1(self):
# MOVQ mm, r/m64
asm = ["movq mm0, rax"]
ctx_init = self._init_context()
ctx_init["mm0"] = 0x1234567812345678
ctx_init["rax"] = 0x8765432187654321
res = 0x8765432187654321
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["mm0"], res)
self.assertEquals(reil_ctx_out["rax"], ctx_init["rax"])
def test_movq_2(self):
# MOVQ r/m64, mm
asm = ["movq rax, mm0"]
ctx_init = self._init_context()
ctx_init["mm0"] = 0x1234567812345678
ctx_init["rax"] = 0x8765432187654321
res = 0x1234567812345678
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["rax"], res)
self.assertEquals(reil_ctx_out["mm0"], ctx_init["mm0"])
def test_movq_3(self):
# MOVQ xmm, r/m64
asm = ["movq xmm0, rax"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["rax"] = 0x8765432187654321
res = 0x00000000000000008765432187654321
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["rax"], ctx_init["rax"])
def test_movq_4(self):
# MOVQ r/m64, xmm
asm = ["movq rax, xmm0"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["rax"] = 0x8765432187654321
res = 0x1234567812345678
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["rax"], res)
self.assertEquals(reil_ctx_out["xmm0"], ctx_init["xmm0"])
def test_movsd_sse(self):
# TODO: Implement.
pass
def test_pcmpeqb(self):
asm = ["pcmpeqb xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x11145178113156181231517111345618
ctx_init["xmm1"] = 0x12345678123456781234567812345678
res = 0x000000ff0000ff00ff00000000ffff00
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_pminub(self):
asm = ["pminub xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x88776655443322118877665544332211
ctx_init["xmm1"] = 0x992277aa113311FF992277aa113311FF
res = 0x88226655113311118822665511331111
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# # NOTE Hack to be able to test this instr (src oprnd should be a memory
# # operand instead of a xmm register).
# # -------------------------------------------------------------------- #
# address = 0xdeadbeef
# reil_instrs = self._asm_to_reil(asm, address)
# reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=address << 8, registers=ctx_init)
# # -------------------------------------------------------------------- #
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_pmovmskb(self):
asm = ["pmovmskb eax, xmm0"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781274861892345698
res = 0x29 # 00101001b
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["rax"] & 0xffffffff, res)
def test_por_1(self):
asm = ["por mm0, mm1"]
ctx_init = self._init_context()
ctx_init["mm0"] = 0x1234567812345678
ctx_init["mm1"] = 0x8765432187654321
res = ctx_init["mm0"] | ctx_init["mm1"]
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["mm0"], res)
self.assertEquals(reil_ctx_out["mm1"], ctx_init["mm1"])
def test_por_2(self):
asm = ["por xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
ctx_init["xmm1"] = 0x87654321876543218765432187654321
res = ctx_init["xmm0"] | ctx_init["xmm1"]
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_pshufd(self):
asm = ["pshufd xmm0, xmm1, 0x93"]
# 10010011
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x00000000000000000000000000000000
ctx_init["xmm1"] = 0x44444444333333332222222211111111
res = 0x33333333222222221111111144444444
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# # NOTE Hack to be able to test this instr (src oprnd should be a memory
# # operand instead of a xmm register).
# # -------------------------------------------------------------------- #
# address = 0xdeadbeef
# reil_instrs = self._asm_to_reil(asm, address)
# reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=address << 8, registers=ctx_init)
# # -------------------------------------------------------------------- #
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_pslldq_1(self):
asm = ["pslldq xmm0, 7"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781274861892345698
res = 0x78127486189234569800000000000000
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
def test_pslldq_2(self):
asm = ["pslldq xmm0, 16"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781274861892345698
res = 0x00000000000000000000000000000000
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
def test_psrldq_1(self):
asm = ["psrldq xmm0, 7"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781234567812345678
res = 0x00000000000000123456781234567812
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
def test_psrldq_2(self):
asm = ["psrldq xmm0, 16"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x12345678123456781274861892345698
res = 0x00000000000000000000000000000000
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
def test_psubb(self):
asm = ["psubb xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x11145178113156181231517111345618
ctx_init["xmm1"] = 0x12345678123456781234567812345678
res = 0xffe0fb00fffd00a000fdfbf9ff0000a0
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_punpcklbw(self):
asm = ["punpcklbw xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x88776655443322118877665544332211
ctx_init["xmm1"] = 0xffeeddccbbaa9988ffeeddccbbaa9988
res = 0xff88ee77dd66cc55bb44aa3399228811
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# # NOTE Hack to be able to test this instr (src oprnd should be a memory
# # operand instead of a xmm register).
# # -------------------------------------------------------------------- #
# address = 0xdeadbeef
# reil_instrs = self._asm_to_reil(asm, address)
# reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=address << 8, registers=ctx_init)
# # -------------------------------------------------------------------- #
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_punpcklqdq(self):
asm = ["punpcklqdq xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x55555555555555551111111111111111
ctx_init["xmm1"] = 0x44444444444444448888888888888888
res = 0x88888888888888881111111111111111
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# # NOTE Hack to be able to test this instr (src oprnd should be a memory
# # operand instead of a xmm register).
# # -------------------------------------------------------------------- #
# address = 0xdeadbeef
# reil_instrs = self._asm_to_reil(asm, address)
# reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=address << 8, registers=ctx_init)
# # -------------------------------------------------------------------- #
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_punpcklwd(self):
asm = ["punpcklwd xmm0, xmm1"]
ctx_init = self._init_context()
ctx_init["xmm0"] = 0x88887777666655554444333322221111
ctx_init["xmm1"] = 0x11112222333344445555666677778888
res = 0x55554444666633337777222288881111
x86_ctx_out, reil_ctx_out = self._run_code(asm, 0xdeadbeef, ctx_init)
# # NOTE Hack to be able to test this instr (src oprnd should be a memory
# # operand instead of a xmm register).
# # -------------------------------------------------------------------- #
# address = 0xdeadbeef
# reil_instrs = self._asm_to_reil(asm, address)
# reil_ctx_out, _ = self.reil_emulator.execute(reil_instrs, start=address << 8, registers=ctx_init)
# # -------------------------------------------------------------------- #
# TODO: Compare mm/xmm registers when comparing contexts.
# cmp_result = self._compare_contexts(ctx_init, x86_ctx_out, reil_ctx_out)
# if not cmp_result:
# self._save_failing_context(ctx_init)
# self.assertTrue(cmp_result, self._print_contexts(ctx_init, x86_ctx_out, reil_ctx_out))
self.assertEquals(reil_ctx_out["xmm0"], res)
self.assertEquals(reil_ctx_out["xmm1"], ctx_init["xmm1"])
def test_pxor(self):
# TODO: Implement.
pass
def test_vmovdqa(self):
# TODO: Implement.
pass
|
|
#!/usr/bin/python
# (C) Copyright 2015 Hewlett-Packard Development Company, L.P.
DOCUMENTATION = '''
---
module: monasca_alarm_definition
short_description: crud operations on Monasca alarm definitions
description:
- Performs crud operations (create/update/delete) on monasca alarm definitions
- Monasca project homepage - https://wiki.openstack.org/wiki/Monasca
- When relevant the alarm_definition_id is in the output and can be used with the register action
author: Tim Kuhlman <tim@backgroundprocess.com>
requirements: [ python-monascaclient ]
options:
alarm_actions:
required: false
description:
- Array of notification method IDs that are invoked for the transition to the ALARM state.
api_version:
required: false
default: '2_0'
description:
- The monasca api version.
description:
required: false
description:
- The description associated with the alarm
expression:
required: false
description:
- The alarm expression, required for create/update operations.
keystone_password:
required: false
description:
- Keystone password to use for authentication, required unless a keystone_token is specified.
keystone_url:
required: false
description:
- Keystone url to authenticate against, required unless keystone_token isdefined.
Example http://192.168.10.5:5000/v3
keystone_token:
required: false
description:
- Keystone token to use with the monasca api. If this is specified the monasca_api_url is required but
the keystone_user and keystone_password aren't.
keystone_user:
required: false
description:
- Keystone user to log in as, required unless a keystone_token is specified.
keystone_project:
required: false
description:
- Keystone project name to obtain a token for, defaults to the user's default project
match_by:
required: false
default: "[hostname]"
description:
- Alarm definition match by, see the monasca api documentation for more detail.
monasca_api_url:
required: false
description:
- If unset the service endpoing registered with keystone will be used.
name:
required: true
description:
- The alarm definition name
ok_actions:
required: false
description:
- Array of notification method IDs that are invoked for the transition to the OK state.
severity:
required: false
default: "LOW"
description:
- The severity set for the alarm must be LOW, MEDIUM, HIGH or CRITICAL
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the account should exist. When C(absent), removes the user account.
undetermined_actions:
required: false
description:
- Array of notification method IDs that are invoked for the transition to the UNDETERMINED state.
'''
EXAMPLES = '''
- name: Host Alive Alarm
monasca_alarm_definition:
name: "Host Alive Alarm"
expression: "host_alive_status > 0"
keystone_url: "{{keystone_url}}"
keystone_user: "{{keystone_user}}"
keystone_password: "{{keystone_password}}"
tags:
- alarms
- system_alarms
register: out
- name: Create System Alarm Definitions
monasca_alarm_definition:
name: "{{item.name}}"
expression: "{{item.expression}}"
keystone_token: "{{out.keystone_token}}"
monasca_api_url: "{{out.monasca_api_url}}"
with_items:
- { name: "High CPU usage", expression: "avg(cpu.idle_perc) < 10 times 3" }
- { name: "Disk Inode Usage", expression: "disk.inode_used_perc > 90" }
'''
from ansible.module_utils.basic import *
import os
try:
from monascaclient import client
from monascaclient import ksclient
except ImportError:
# In many installs the python-monascaclient is available in a venv, switch to the most common location
activate_this = os.path.realpath('/opt/venvs/monasca-client/bin/activate_this.py')
try:
execfile(activate_this, dict(__file__=activate_this))
from monascaclient import client
from monascaclient import ksclient
except ImportError:
monascaclient_found = False
else:
monascaclient_found = True
else:
monascaclient_found = True
# With Ansible modules including other files presents difficulties otherwise this would be in its own module
class MonascaAnsible(object):
""" A base class used to build Monasca Client based Ansible Modules
As input an ansible.module_utils.basic.AnsibleModule object is expected. It should have at least
these params defined:
- api_version
- keystone_token and monasca_api_url or keystone_url, keystone_user and keystone_password and optionally
monasca_api_url
"""
def __init__(self, module):
self.module = module
self._keystone_auth()
self.exit_data = {'keystone_token': self.token, 'monasca_api_url': self.api_url}
self.monasca = client.Client(self.module.params['api_version'], self.api_url, token=self.token)
def _exit_json(self, **kwargs):
""" Exit with supplied kwargs combined with the self.exit_data
"""
kwargs.update(self.exit_data)
self.module.exit_json(**kwargs)
def _keystone_auth(self):
""" Authenticate to Keystone and set self.token and self.api_url
"""
if self.module.params['keystone_token'] is None:
ks = ksclient.KSClient(auth_url=self.module.params['keystone_url'],
username=self.module.params['keystone_user'],
password=self.module.params['keystone_password'],
project_name=self.module.params['keystone_project'])
self.token = ks.token
if self.module.params['monasca_api_url'] is None:
self.api_url = ks.monasca_url
else:
self.api_url = self.module.params['monasca_api_url']
else:
if self.module.params['monasca_api_url'] is None:
self.module.fail_json(msg='Error: When specifying keystone_token, monasca_api_url is required')
self.token = self.module.params['keystone_token']
self.api_url = self.module.params['monasca_api_url']
class MonascaDefinition(MonascaAnsible):
def run(self):
name = self.module.params['name']
expression = self.module.params['expression']
# Find existing definitions
definitions = {definition['name']: definition for definition in self.monasca.alarm_definitions.list()}
if self.module.params['state'] == 'absent':
if name not in definitions.keys():
self._exit_json(changed=False)
if self.module.check_mode:
self._exit_json(changed=True)
resp = self.monasca.alarm_definitions.delete(alarm_id=definitions[name]['id'])
if resp.status_code == 204:
self._exit_json(changed=True)
else:
self.module.fail_json(msg=str(resp.status_code) + resp.text)
else: # Only other option is state=present
def_kwargs = {"name": name, "description": self.module.params['description'], "expression": expression,
"match_by": self.module.params['match_by'], "severity": self.module.params['severity'],
"alarm_actions": self.module.params['alarm_actions'],
"ok_actions": self.module.params['ok_actions'],
"undetermined_actions": self.module.params['undetermined_actions']}
if name in definitions.keys():
if definitions[name]['expression'] == expression and \
definitions[name]['alarm_actions'] == self.module.params['alarm_actions'] and \
definitions[name]['ok_actions'] == self.module.params['ok_actions'] and \
definitions[name]['undetermined_actions'] == self.module.params['undetermined_actions']:
self._exit_json(changed=False, alarm_definition_id=definitions[name]['id'])
def_kwargs['alarm_id'] = definitions[name]['id']
if self.module.check_mode:
self._exit_json(changed=True, alarm_definition_id=definitions[name]['id'])
body = self.monasca.alarm_definitions.patch(**def_kwargs)
else:
if self.module.check_mode:
self._exit_json(changed=True)
body = self.monasca.alarm_definitions.create(**def_kwargs)
if 'id' in body:
self._exit_json(changed=True, alarm_definition_id=body['id'])
else:
self.module.fail_json(msg=body)
def main():
module = AnsibleModule(
argument_spec=dict(
alarm_actions=dict(required=False, default=[], type='list'),
api_version=dict(required=False, default='2_0', type='str'),
description=dict(required=False, type='str'),
expression=dict(required=False, type='str'),
keystone_password=dict(required=False, type='str'),
keystone_token=dict(required=False, type='str'),
keystone_url=dict(required=False, type='str'),
keystone_user=dict(required=False, type='str'),
keystone_project=dict(required=False, type='str'),
match_by=dict(default=['hostname'], type='list'),
monasca_api_url=dict(required=False, type='str'),
name=dict(required=True, type='str'),
ok_actions=dict(required=False, default=[], type='list'),
severity=dict(default='LOW', type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str'),
undetermined_actions=dict(required=False, default=[], type='list')
),
supports_check_mode=True
)
if not monascaclient_found:
module.fail_json(msg="python-monascaclient >= 1.0.9 is required")
definition = MonascaDefinition(module)
definition.run()
if __name__ == "__main__":
main()
|
|
# Written by Bram Cohen
# see LICENSE.txt for license information
# For a description of the algorithm see https://wiki.theory.org/BitTorrentSpecification#Choking_and_Optimistic_Unchoking.
from random import randrange
class Choker:
def __init__(self, max_uploads, schedule, done = lambda: False, min_uploads = None):
# Maximum and minimum number of peers we should unchoke.
self.max_uploads = max_uploads
if min_uploads is None:
min_uploads = max_uploads
self.min_uploads = min_uploads
# Function to schedule events in the reactor loop of RawServer.
self.schedule = schedule
# Instances of Connection defined in Connecter.py.
self.connections = []
# Counter that controls when we rotate the optimistically unchoked peer.
self.count = 0
# Returns whether we have all the pieces and are seeding.
self.done = done
schedule(self._round_robin, 10)
def _round_robin(self):
self.schedule(self._round_robin, 10)
self.count += 1
if self.count % 3 == 0:
# Visit here every 30 seconds.
for i in xrange(len(self.connections)):
u = self.connections[i].get_upload()
if u.is_choked() and u.is_interested():
# Rotate the connections so this choked but interested peer is at the front.
# The _rechoke will unchoke this peer, perhaps as the optimistic unchoke.
self.connections = self.connections[i:] + self.connections[:i]
break
self._rechoke()
def _snubbed(self, c):
if self.done():
return False
return c.get_download().is_snubbed()
def _rate(self, c):
if self.done():
# Return upload speed to peer if this client is a seed.
return c.get_upload().get_rate()
else:
# Return download speed from peer if this client is also a peer.
return c.get_download().get_rate()
def _rechoke(self):
preferred = []
for c in self.connections:
if not self._snubbed(c) and c.get_upload().is_interested():
# This peer isn't snubbing us and is interested in pieces we have.
preferred.append((-self._rate(c), c))
# Sort so maximum download or upload rates are at front of array.
preferred.sort()
# Get the (max_uploads - 1) fastest connections to peers.
del preferred[self.max_uploads - 1:]
preferred = [x[1] for x in preferred]
count = len(preferred)
# True if we have designated a peer as optimistically unchoked.
hit = False
for c in self.connections:
u = c.get_upload()
if c in preferred:
# Unchoke this connection if one of the fastest.
u.unchoke()
else:
# If max_uploads = min_uploads, then we should visit here at least once.
if count < self.min_uploads or not hit:
# Not enough uploads started, or haven't designated a peer as optimistically unchoked.
# Unchoke this connection.
# Note that this can unchoke peers that are not interested.
u.unchoke()
if u.is_interested():
# This is a peer that is actually going to download from us and consume bandwidth.
count += 1
hit = True
else:
u.choke()
def connection_made(self, connection, p = None):
# Give a new peer 3x chance of starting as optimistically unchoked.
if p is None:
p = randrange(-2, len(self.connections) + 1)
self.connections.insert(max(p, 0), connection)
self._rechoke()
def connection_lost(self, connection):
self.connections.remove(connection)
if connection.get_upload().is_interested() and not connection.get_upload().is_choked():
# Lost connection to this unchoked peer, so can now unchoke a different one.
self._rechoke()
def interested(self, connection):
if not connection.get_upload().is_choked():
self._rechoke()
def not_interested(self, connection):
if not connection.get_upload().is_choked():
self._rechoke()
def change_max_uploads(self, newval):
def foo(self=self, newval=newval):
self._change_max_uploads(newval)
self.schedule(foo, 0);
def _change_max_uploads(self, newval):
self.max_uploads = newval
self._rechoke()
class DummyScheduler:
def __init__(self):
self.s = []
def __call__(self, func, delay):
self.s.append((func, delay))
class DummyConnection:
def __init__(self, v = 0):
self.u = DummyUploader()
self.d = DummyDownloader(self)
self.v = v
def get_upload(self):
return self.u
def get_download(self):
return self.d
class DummyDownloader:
def __init__(self, c):
self.s = False
self.c = c
def is_snubbed(self):
return self.s
def get_rate(self):
return self.c.v
class DummyUploader:
def __init__(self):
self.i = False
self.c = True
def choke(self):
if not self.c:
self.c = True
def unchoke(self):
if self.c:
self.c = False
def is_choked(self):
return self.c
def is_interested(self):
return self.i
def test_round_robin_with_no_downloads():
s = DummyScheduler()
Choker(2, s)
assert len(s.s) == 1
assert s.s[0][1] == 10
s.s[0][0]()
del s.s[0]
assert len(s.s) == 1
assert s.s[0][1] == 10
s.s[0][0]()
del s.s[0]
s.s[0][0]()
del s.s[0]
s.s[0][0]()
del s.s[0]
def test_resort():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection()
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c4 = DummyConnection(3)
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
assert not c1.u.c
choker.connection_made(c2, 1)
assert not c1.u.c
assert not c2.u.c
choker.connection_made(c3, 1)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
c2.v = 2
c3.v = 1
choker.connection_made(c4, 1)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
assert not c4.u.c
choker.connection_lost(c4)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
s.s[0][0]()
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
def test_interest():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection()
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
assert not c1.u.c
choker.connection_made(c2, 1)
assert not c1.u.c
assert not c2.u.c
choker.connection_made(c3, 1)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
c3.u.i = False
choker.not_interested(c3)
assert not c1.u.c
assert not c2.u.c
assert not c3.u.c
c3.u.i = True
choker.interested(c3)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
choker.connection_lost(c3)
assert not c1.u.c
assert not c2.u.c
def test_robin_interest():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c1.u.i = True
choker.connection_made(c2)
assert not c2.u.c
choker.connection_made(c1, 0)
assert not c1.u.c
assert c2.u.c
c1.u.i = False
choker.not_interested(c1)
assert not c1.u.c
assert not c2.u.c
c1.u.i = True
choker.interested(c1)
assert not c1.u.c
assert c2.u.c
choker.connection_lost(c1)
assert not c2.u.c
def test_skip_not_interested():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c1.u.i = True
c3.u.i = True
choker.connection_made(c2)
assert not c2.u.c
choker.connection_made(c1, 0)
assert not c1.u.c
assert c2.u.c
choker.connection_made(c3, 2)
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f = s.s[0][0]
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert c2.u.c
assert not c3.u.c
def test_connection_lost_no_interrupt():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c1.u.i = True
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
choker.connection_made(c2, 1)
choker.connection_made(c3, 2)
f = s.s[0][0]
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert not c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert not c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert not c2.u.c
assert c3.u.c
choker.connection_lost(c3)
assert c1.u.c
assert not c2.u.c
f()
assert not c1.u.c
assert c2.u.c
choker.connection_lost(c2)
assert not c1.u.c
def test_connection_made_no_interrupt():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c1.u.i = True
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
choker.connection_made(c2, 1)
f = s.s[0][0]
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
choker.connection_made(c3, 1)
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert c2.u.c
assert not c3.u.c
def test_round_robin():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c1.u.i = True
c2.u.i = True
choker.connection_made(c1)
choker.connection_made(c2, 1)
f = s.s[0][0]
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
f()
assert c1.u.c
assert not c2.u.c
f()
assert c1.u.c
assert not c2.u.c
f()
assert c1.u.c
assert not c2.u.c
f()
assert not c1.u.c
assert c2.u.c
def test_multi():
s = DummyScheduler()
choker = Choker(4, s)
c1 = DummyConnection(0)
c2 = DummyConnection(0)
c3 = DummyConnection(0)
c4 = DummyConnection(8)
c5 = DummyConnection(0)
c6 = DummyConnection(0)
c7 = DummyConnection(6)
c8 = DummyConnection(0)
c9 = DummyConnection(9)
c10 = DummyConnection(7)
c11 = DummyConnection(10)
choker.connection_made(c1, 0)
choker.connection_made(c2, 1)
choker.connection_made(c3, 2)
choker.connection_made(c4, 3)
choker.connection_made(c5, 4)
choker.connection_made(c6, 5)
choker.connection_made(c7, 6)
choker.connection_made(c8, 7)
choker.connection_made(c9, 8)
choker.connection_made(c10, 9)
choker.connection_made(c11, 10)
c2.u.i = True
c4.u.i = True
c6.u.i = True
c8.u.i = True
c10.u.i = True
c2.d.s = True
c6.d.s = True
c8.d.s = True
s.s[0][0]()
assert not c1.u.c
assert not c2.u.c
assert not c3.u.c
assert not c4.u.c
assert not c5.u.c
assert not c6.u.c
assert c7.u.c
assert c8.u.c
assert c9.u.c
assert not c10.u.c
assert c11.u.c
|
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""CSS support for CodeIntel"""
import os
from os.path import isfile, isdir, exists, dirname, abspath, splitext, join
import sys
import stat
import string
from io import StringIO
import logging
import traceback
from pprint import pprint
import SilverCity
from SilverCity.Lexer import Lexer
from SilverCity import ScintillaConstants
from SilverCity.ScintillaConstants import (
SCE_CSS_DIRECTIVE, SCE_CSS_DOUBLESTRING, SCE_CSS_IDENTIFIER,
SCE_CSS_IDENTIFIER2, SCE_CSS_OPERATOR, SCE_CSS_SINGLESTRING,
SCE_CSS_TAG, SCE_CSS_UNKNOWN_IDENTIFIER, SCE_CSS_VALUE,
SCE_UDL_CSS_COMMENT, SCE_UDL_CSS_DEFAULT, SCE_UDL_CSS_IDENTIFIER,
SCE_UDL_CSS_NUMBER, SCE_UDL_CSS_OPERATOR, SCE_UDL_CSS_STRING,
SCE_UDL_CSS_WORD, SCE_UDL_M_STRING, SCE_UDL_M_ATTRNAME, SCE_UDL_M_OPERATOR,
)
from SilverCity import Keywords
from codeintel2.common import *
from codeintel2.buffer import Buffer
from codeintel2.util import (OrdPunctLast, make_short_name_dict,
makePerformantLogger)
from codeintel2.langintel import LangIntel, ParenStyleCalltipIntelMixin
from codeintel2.udl import UDLBuffer, is_udl_css_style
from codeintel2.accessor import AccessorCache
if _xpcom_:
from xpcom.server import UnwrapObject
#---- globals
lang = "CSS"
log = logging.getLogger("codeintel.css")
makePerformantLogger(log)
WHITESPACE = tuple(" \t\r\n") # care about '\v', '\f'?
#---- language support
# Taken from the Scite version 2.0.2 css.properties file
# Silvercity wants the # of wordlists to be the same as the
# number hardwired in the lexer, so that's why there are 5 empty lists.
raw_word_lists = [
# CSS1 keywords
"""
background background-attachment background-color background-image
background-position background-repeat border border-bottom
border-bottom-width border-color border-left border-left-width
border-right border-right-width border-style border-top
border-top-width border-width
clear color display float font
font-family font-size font-style font-variant font-weight height
letter-spacing line-height list-style list-style-image
list-style-position list-style-type margin margin-bottom margin-left
margin-right margin-top padding padding-bottom padding-left
padding-right padding-top text-align text-decoration text-indent
text-transform vertical-align white-space width word-spacing
""",
# CSS pseudo-classes
"""
active after before first first-child first-letter first-line
focus hover lang left link right visited
""",
# CSS2 keywords
"""
ascent azimuth baseline bbox border-bottom-color
border-bottom-style border-collapse border-color border-left-color
border-left-style border-right-color border-right-style
border-spacing border-style border-top-color border-top-style
bottom cap-height caption-side centerline clip content
counter-increment counter-reset cue cue-after cue-before cursor
definition-src descent direction elevation empty-cells
font-size-adjust font-stretch left marker-offset marks mathline
max-height max-width min-height min-width orphans outline
outline-color outline-style outline-width overflow page
page-break-after page-break-before page-break-inside panose-1
pause pause-after pause-before pitch pitch-range play-during
position quotes richness right size slope speak speak-header
speak-numeral speak-punctuation speech-rate src stemh stemv stress
table-layout text-shadow top topline unicode-bidi unicode-range
units-per-em visibility voice-family volume widows widths x-height
z-index
""",
# CSS3 Properties
"""
border-top-left-radius
border-top-right-radius
border-bottom-left-radius
border-bottom-right-radius
border-radius
""",
# Pseudo-elements
"",
# Browser-Specific CSS Properties
"",
# Browser-Specific Pseudo-classes
"",
# Browser-Specific Pseudo-elements
"",
]
class CSSLexer(Lexer):
lang = "CSS"
def __init__(self):
self._properties = SilverCity.PropertySet()
self._lexer = SilverCity.find_lexer_module_by_id(
ScintillaConstants.SCLEX_CSS)
self._keyword_lists = []
for i in range(len(raw_word_lists)):
self._keyword_lists.append(SilverCity.WordList(raw_word_lists[i]))
class _StraightCSSStyleClassifier(object):
def is_css_style(self, style, accessorCacheBack=None):
return True
def is_default(self, style, accessorCacheBack=None):
return style in self.default_styles
def is_comment(self, style, accessorCacheBack=None):
return style in self.comment_styles
def is_string(self, style, accessorCacheBack=None):
return style in self.string_styles
def is_operator(self, style, accessorCacheBack=None):
return style in self.operator_styles or \
style == ScintillaConstants.SCE_CSS_IMPORTANT
def is_identifier(self, style, accessorCacheBack=None):
return style in self.identifier_styles
def is_value(self, style, accessorCacheBack=None):
return style in self.value_styles
def is_tag(self, style, accessorCacheBack=None):
return style in self.tag_styles
def is_class(self, style, accessorCacheBack=None):
return style in self.class_styles
def is_number(self, style, accessorCacheBack=None):
return style in self.number_styles
@property
def default_styles(self):
return (ScintillaConstants.SCE_CSS_DEFAULT, )
@property
def comment_styles(self):
return (ScintillaConstants.SCE_CSS_COMMENT,)
@property
def string_styles(self):
return (ScintillaConstants.SCE_CSS_SINGLESTRING,
ScintillaConstants.SCE_CSS_DOUBLESTRING)
@property
def operator_styles(self):
return (ScintillaConstants.SCE_CSS_OPERATOR, )
@property
def identifier_styles(self):
return (ScintillaConstants.SCE_CSS_IDENTIFIER,
ScintillaConstants.SCE_CSS_IDENTIFIER2,
ScintillaConstants.SCE_CSS_UNKNOWN_IDENTIFIER)
@property
def value_styles(self):
return (ScintillaConstants.SCE_CSS_VALUE,
ScintillaConstants.SCE_CSS_NUMBER)
@property
def tag_styles(self):
return (ScintillaConstants.SCE_CSS_TAG, )
@property
def class_styles(self):
return (ScintillaConstants.SCE_CSS_CLASS, )
@property
def number_styles(self):
return ()
@property
def ignore_styles(self):
return (ScintillaConstants.SCE_CSS_DEFAULT,
ScintillaConstants.SCE_CSS_COMMENT)
DebugStatus = False
class _UDLCSSStyleClassifier(_StraightCSSStyleClassifier):
def is_css_style(self, style, accessorCacheBack=None):
return is_udl_css_style(style)
def _is_html_style_attribute(self, ac, style):
# Check to see if it's a html style attribute
# Note: We are starting from the html string delimiter, i.e.:
# <body style=<|>"abc...
DEBUG = DebugStatus
# We may have already found out this is a style attribute, check it
if getattr(ac, "is_html_style_attribute", False):
return True
p, ch, style = ac.getPrecedingPosCharStyle(style,
ignore_styles=self.ignore_styles)
if DEBUG:
print(" _is_html_style_attribute:: Prev style: %d, ch: %r" % (
style, ch, ))
if style == SCE_UDL_M_OPERATOR:
p, ch, style = ac.getPrecedingPosCharStyle(style,
ignore_styles=self.ignore_styles)
if style == SCE_UDL_M_ATTRNAME:
p, name = ac.getTextBackWithStyle(style)
if DEBUG:
print(" _is_html_style_attribute:: HTML Attribute: %r" % (
name, ))
if name == "style":
# Remember this is a html style attribute
ac.is_html_style_attribute = True
return True
return False
def is_identifier(self, style, accessorCacheBack=None):
if style not in self.identifier_styles:
return False
# Previous style must be operator and one of "{;"
ac = accessorCacheBack
if ac is not None:
DEBUG = DebugStatus
# DEBUG = True
pcs = ac.getCurrentPosCharStyle()
if DEBUG:
print(" is_identifier:: pcs: %r" % (pcs, ))
try:
# Check that the preceding character before the identifier
ppcs = ac.getPrecedingPosCharStyle(pcs[2],
ignore_styles=self.ignore_styles)
if DEBUG:
print(" is_identifier:: ppcs: %r" % (ppcs, ))
if self.is_operator(ppcs[2]) and ppcs[1] in "{;":
return True
elif ppcs[2] == SCE_UDL_M_STRING and \
self._is_html_style_attribute(ac, ppcs[2]):
return True
if DEBUG:
print(" is_identifier:: Not an identifier style")
finally:
# Reset the accessor back to the current position
ac.resetToPosition(pcs[0])
return False
def is_class(self, style, accessorCacheBack=None):
ac = accessorCacheBack
if ac is not None:
pcs = ac.getCurrentPosCharStyle()
print(" is_class:: pcs: %r" % (pcs, ))
if self.is_operator(pcs[2]) and pcs[1] in ">.;}{":
return True
try:
DEBUG = DebugStatus
# Check that the preceding character before the identifier is a
# "."
ppcs = ac.getPrecedingPosCharStyle(pcs[2],
ignore_styles=self.ignore_styles)
if DEBUG:
print(" is_class:: ppcs: %r" % (ppcs, ))
if ppcs[2] in self.identifier_styles:
ppcs = ac.getPrecedingPosCharStyle(ppcs[2],
ignore_styles=self.ignore_styles)
if self.is_operator(ppcs[2]) and ppcs[1] == ".":
return True
elif not is_udl_css_style(ppcs[2]):
return True
# If there is no identifer, may be operator, which is okay
elif not is_udl_css_style(ppcs[2]) or \
(self.is_operator(ppcs[2]) and ppcs[1] in "};"):
return True
if DEBUG:
print(" is_class:: Not a class style")
finally:
# Reset the accessor back to the current position
ac.resetToPosition(pcs[0])
return False
def is_tag(self, style, accessorCacheBack=None):
ac = accessorCacheBack
if ac is not None:
# Tags follow operators or other tags
# For use, we'll go back until we find an operator in "}>"
if style in self.identifier_styles:
DEBUG = DebugStatus
p, ch, style = ac.getCurrentPosCharStyle()
start_p = p
min_p = max(0, p - 50)
try:
while p > min_p:
# Check that the preceding character before the
# identifier is a "."
p, ch, style = ac.getPrecedingPosCharStyle(style,
ignore_styles=self.ignore_styles)
if style in self.operator_styles:
# Thats good, we get our decision now
if ch in "}>":
return True
elif ch == ",":
# Might be following another tag, "div, div",
# http://bugs.activestate.com/show_bug.cgi?id=58637
continue
if DEBUG:
print(" is_tag:: Not a tag operator ch: %s" % (ch))
return False
elif not self.is_css_style(style):
if DEBUG:
print(" is_tag:: Not a css style: %d, ch: %r" % (style, ch, ))
if style == SCE_UDL_M_STRING and \
self._is_html_style_attribute(ac, style):
return False
return True
elif style not in self.identifier_styles:
if DEBUG:
print(" is_tag:: Not a tag style, style: %d" % (style))
return False
# else: # Thats okay, we'll keep going
finally:
# Reset the accessor back to the current position
ac.resetToPosition(start_p)
return False
@property
def default_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_DEFAULT, )
@property
def comment_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_COMMENT,)
@property
def string_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_STRING, )
@property
def operator_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_OPERATOR, )
@property
def identifier_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_IDENTIFIER,
ScintillaConstants.SCE_UDL_CSS_WORD)
@property
def value_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_WORD,
ScintillaConstants.SCE_UDL_CSS_IDENTIFIER,
ScintillaConstants.SCE_UDL_CSS_NUMBER)
@property
def tag_styles(self):
return (ScintillaConstants.SCE_CSS_TAG, )
@property
def number_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_NUMBER, )
@property
def ignore_styles(self):
return (ScintillaConstants.SCE_UDL_CSS_DEFAULT,
ScintillaConstants.SCE_UDL_CSS_COMMENT)
StraightCSSStyleClassifier = _StraightCSSStyleClassifier()
UDLCSSStyleClassifier = _UDLCSSStyleClassifier()
class CSSLangIntel(LangIntel, ParenStyleCalltipIntelMixin):
lang = "CSS"
@LazyClassAttribute
def CSS_ATTRIBUTES(self):
# CSS attributes:
# key (string) is the css property (attribute) name
# value (list) is the possible css property (attribute) values
from codeintel2 import constants_css3 as constants_css
from codeintel2 import constants_css_microsoft_extensions
from codeintel2 import constants_css_moz_extensions
from codeintel2 import constants_css_webkit_extensions
attrs = constants_css.CSS_ATTR_DICT.copy()
attrs.update(
constants_css_microsoft_extensions.CSS_MICROSOFT_SPECIFIC_ATTRS_DICT)
attrs.update(constants_css_moz_extensions.CSS_MOZ_SPECIFIC_ATTRS_DICT)
attrs.update(
constants_css_webkit_extensions.CSS_WEBKIT_SPECIFIC_ATTRS_DICT)
return attrs
@LazyClassAttribute
def CSS_PROPERTY_NAMES(self):
# Setup the names triggered for "property-names"
return sorted(list(self.CSS_ATTRIBUTES.keys()), key=OrdPunctLast)
@LazyClassAttribute
def CSS_PROPERTY_ATTRIBUTE_CALLTIPS_DICT(self):
# Calltips for css property attributes
from codeintel2 import constants_css3 as constants_css
from codeintel2 import constants_css_microsoft_extensions
from codeintel2 import constants_css_moz_extensions
from codeintel2 import constants_css_webkit_extensions
calltips = constants_css.CSS_PROPERTY_ATTRIBUTE_CALLTIPS_DICT.copy()
calltips.update(
constants_css_microsoft_extensions.CSS_MICROSOFT_SPECIFIC_CALLTIP_DICT)
calltips.update(
constants_css_moz_extensions.CSS_MOZ_SPECIFIC_CALLTIP_DICT)
calltips.update(
constants_css_webkit_extensions.CSS_WEBKIT_SPECIFIC_CALLTIP_DICT)
return calltips
@LazyClassAttribute
def CSS_HTML_TAG_NAMES(self):
# Tag names
return sorted(Keywords.hypertext_elements.split())
@LazyClassAttribute
def CSS_PSEUDO_CLASS_NAMES(self):
# pseudo-class-names
from codeintel2 import constants_css3 as constants_css
return sorted(constants_css.CSS_PSEUDO_CLASS_NAMES, key=OrdPunctLast)
@LazyClassAttribute
def CSS_AT_RULE_NAMES(self):
# at rules
return sorted(
["import", "media", "charset", "font-face", "page", "namespace"],
key=OrdPunctLast)
def preceding_trg_from_pos(self, buf, pos, curr_pos):
DEBUG = DebugStatus # not using 'logging' system, because want to be fast
# DEBUG = True # not using 'logging' system, because want to be fast
if DEBUG:
print("\npreceding_trg_from_pos -- pos: %d, curr_pos: %d" % (
pos, curr_pos, ))
if isinstance(buf, UDLBuffer):
styleClassifier = UDLCSSStyleClassifier
else:
styleClassifier = StraightCSSStyleClassifier
ac = AccessorCache(buf.accessor, curr_pos+1, fetchsize=50)
currTrg = self._trg_from_pos(buf, (curr_pos == pos) and pos or pos+1,
implicit=False, DEBUG=DEBUG,
ac=ac, styleClassifier=styleClassifier)
if DEBUG:
print(" currTrg: %r" % (currTrg, ))
# If we're not looking for a previous trigger, or else the current
# trigger position is for a calltip, then do not look any further.
if (pos == curr_pos) or (currTrg and currTrg.form == TRG_FORM_CALLTIP):
return currTrg
# Else, work our way backwards from pos.
ac.resetToPosition(pos+1)
p, ch, style = ac.getPrevPosCharStyle()
if DEBUG:
print(" preceding_trg_from_pos: p: %r, ch: %r, style: %r" % (p, ch, style))
min_p = max(0, p - 200)
ignore_styles = styleClassifier.comment_styles + \
styleClassifier.string_styles + \
styleClassifier.number_styles
while p > min_p and styleClassifier.is_css_style(style):
p, ch, style = ac.getPrecedingPosCharStyle(
style, ignore_styles=ignore_styles, max_look_back=100)
if DEBUG:
print(" preceding_trg_from_pos: Trying preceding p: %r, ch: %r, style: %r" % (p, ch, style))
if ch and (isident(ch) or ch in ":( \t"):
trg = self._trg_from_pos(buf, p+1, implicit=False, DEBUG=DEBUG,
ac=ac, styleClassifier=styleClassifier)
if trg is not None:
if DEBUG:
print("trg: %r" % (trg, ))
if currTrg is not None:
if currTrg.type != trg.type:
if DEBUG:
print(" Next trigger is a different type, ending search")
return None
elif currTrg.form != trg.form:
return trg
elif DEBUG:
print(" Found same trigger again, continuing " \
"looking for a different trigger")
else:
return trg
return None
def _trg_from_pos(self, buf, pos, implicit=True, DEBUG=False, ac=None, styleClassifier=None):
# DEBUG = True # not using 'logging' system, because want to be fast
if DEBUG:
print("\n----- CSS _trg_from_pos(pos=%r, implicit=%r) -----"\
% (pos, implicit))
try:
if pos == 0:
return None
if ac is None:
ac = AccessorCache(buf.accessor, pos, fetchsize=50)
else:
ac.resetToPosition(pos)
# Ensure this variable is initialized as False, it is used by UDL
# for checking if the css style is inside of a html tag, example:
# <p style="mycss: value;" />
# When it's found that it is such a case, this value is set True
ac.is_html_style_attribute = False
last_pos, last_char, last_style = ac.getPrevPosCharStyle()
if DEBUG:
print(" _trg_from_pos:: last_pos: %s" % last_pos)
print(" last_char: %r" % last_char)
print(" last_style: %s" % last_style)
# The easy ones are triggering after any of '#.[: '.
# For speed, let's get the common ' ' out of the way. The only
# trigger on space is 'complete-property-values'.
if styleClassifier.is_default(last_style):
if DEBUG:
print(" _trg_from_pos:: Default style: %d, ch: %r" % (last_style, last_char))
# Move backwards resolving ambiguity, default on "property-
# values"
min_pos = max(0, pos - 200)
while last_pos > min_pos:
last_pos, last_char, last_style = ac.getPrevPosCharStyle()
if styleClassifier.is_operator(last_style, ac) or styleClassifier.is_value(last_style, ac):
if DEBUG:
print(" _trg_from_pos: space => property-values")
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
pos, implicit)
elif styleClassifier.is_tag(last_style, ac):
if DEBUG:
print(" _trg_from_pos: space => tag-names")
return Trigger("CSS", TRG_FORM_CPLN, "tag-names",
pos, implicit)
elif styleClassifier.is_identifier(last_style, ac):
if DEBUG:
print(" _trg_from_pos: space => property-names")
return Trigger("CSS", TRG_FORM_CPLN, "property-names",
pos, implicit)
if DEBUG:
print(" _trg_from_pos: couldn't resolve space, settling on property-names")
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
pos, implicit)
elif styleClassifier.is_operator(last_style, ac):
# anchors
if DEBUG:
print(" _trg_from_pos:: OPERATOR style")
if last_char == '#':
return Trigger("CSS", TRG_FORM_CPLN, "anchors",
pos, implicit)
elif last_char == ':':
try:
p, ch, style = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.ignore_styles)
if DEBUG:
print(" _trg_from_pos:: Looking at p: %d, ch: %r, style: %d" % (p, ch, style))
except IndexError:
style = None
if DEBUG:
print(" _trg_from_pos:: style: %r" % (style))
if style is None or \
not styleClassifier.is_identifier(style, ac):
# if style is None or \
# not styleClassifier.is_css_style(style) or \
# styleClassifier.is_class(style, ac):
# complete for pseudo-class-names
return Trigger(
"CSS", TRG_FORM_CPLN, "pseudo-class-names",
pos, implicit)
else:
# if styleClassifier.is_identifier(style, ac):
# calltip for property-values
return Trigger(
"CSS", TRG_FORM_CALLTIP, "property-values",
pos, implicit)
# class-names
elif last_char == '.':
return Trigger("CSS", TRG_FORM_CPLN, "class-names",
pos, implicit)
# at-rule
elif last_char == '@':
# p, ch, style = ac.getPrevPosCharStyle(ignore_styles=styleClassifier.comment_styles)
# XXX - Should check not beyond first rule set
# - Should check not within a rule block.
return Trigger("CSS", TRG_FORM_CPLN, "at-rule",
pos, implicit)
elif last_char == '/':
try:
p, ch, style = ac.getPrevPosCharStyle()
except IndexError:
pass
else:
if ch == "<":
# Looks like start of closing '</style>'
# tag. While typing this the styling will
# still be in the CSS range.
return Trigger(buf.m_lang, TRG_FORM_CPLN,
"end-tag", pos, implicit)
# tag-names
elif styleClassifier.is_tag(last_style, ac):
# We trigger on tag names of specified length >= 1 char
if DEBUG:
print(" _trg_from_pos:: TAG style")
p, ch, style = last_pos, last_char, last_style
try:
while p >= 0:
if DEBUG:
print(" _trg_from_pos:: Looking at p: %d, ch: %r, style: %d" % (p, ch, style))
if not isident(ch):
p += 1
break
elif style != last_style:
if DEBUG:
print(" _trg_from_pos:: Current style is not a tag: %d" % (style))
return None
p, ch, style = ac.getPrevPosCharStyle()
except IndexError:
p = 0
return Trigger("CSS", TRG_FORM_CPLN, "tag-names",
p, implicit)
elif styleClassifier.is_identifier(last_style, ac):
if DEBUG:
print(" _trg_from_pos:: IDENTIFIER style")
# property-names
# print "here", accessor.text_range(0, pos)
# We trigger on identifier names with any length >= 1 char
pos = last_pos
while pos >= 0:
pos, ch, style = ac.getPrevPosCharStyle()
if not isident(ch):
break
elif style != last_style:
return None
extentLength = last_pos - pos
# cover ": " following the identifier if it's there (since we
# add it to the autocomplete in _async_eval_at_trg)
following_text = ac.text_range(last_pos + 1, last_pos + 3)
for idx, char in enumerate(": "):
try:
if following_text[idx] == char:
extentLength += 1
else:
break
except IndexError:
break
return Trigger("CSS", TRG_FORM_CPLN, "property-names",
pos+1, implicit, extentLength=extentLength)
elif styleClassifier.is_value(last_style, ac):
p, ch, style = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.comment_styles)
if DEBUG:
print(" _trg_from_pos:: VALUE style")
print(" _trg_from_pos:: p: %s" % p)
print(" _trg_from_pos:: ch: %r" % ch)
print(" _trg_from_pos:: style: %s" % style)
ac.dump()
# Implicit triggering only happens on a whitespace character
# after any one of these ":,%) " characters
# Note: last_char can be a value style yet also be whitespace
# in straight CSS.
if last_char in WHITESPACE:
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
last_pos+1, implicit)
elif ch in WHITESPACE or ch in ":,%)":
# Check to ensure this is not a pseudo-class! Bug:
# http://bugs.activestate.com/show_bug.cgi?id=71073
if ch == ":":
# Last style must be an identifier then!
pp, pch, pstyle = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.ignore_styles)
if DEBUG:
print("pp: %d, pch: %r, pstyle: %d" % (pp, pch,
pstyle))
if not styleClassifier.is_identifier(pstyle, ac):
# This is likely a pseudo-class definition then,
# no trigger here.
if DEBUG:
print("pseudo-class style found, no trigger.")
return None
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
p+1, implicit)
# For explicit, we can also be inside a property already
if not implicit and isident(ch):
# If there is already part of a value there, we need to move
# the trigger point "p" to the start of the value.
while isident(ch):
p, ch, style = ac.getPrevPosCharStyle()
return Trigger("CSS", TRG_FORM_CPLN, "property-values",
p+1, implicit)
return None
elif DEBUG:
print(" _trg_from_pos:: Unexpected style: %d, ch: %r" % (last_style, last_char))
# XXX "at-property-names" - Might be used later
# elif last_style == SCE_CSS_DIRECTIVE:
# # property-names
# # We trigger on identifier names with length == 3
# #print "here", accessor.text_range(0, pos)
# if pos >= 4 and accessor.char_at_pos(pos - 4) == ' ' and \
# self._is_ident_of_length(accessor, pos, length=3):
# # We are good for completion
# if DEBUG:
# print "Got a trigger for 'at-property-names'"
# return Trigger("CSS", TRG_FORM_CPLN, "at-property-names",
# pos-3, implicit, extra={"ac": ac})
except IndexError:
# Wen't out of range of buffer before we found anything useful
pass
if DEBUG:
print("----- CSS trg_from_pos() -----")
return None
def trg_from_pos(self, buf, pos, implicit=True, ac=None):
DEBUG = DebugStatus # not using 'logging' system, because want to be fast
if isinstance(buf, UDLBuffer):
# This is CSS content in a multi-lang buffer.
return self._trg_from_pos(buf, pos, implicit, DEBUG, ac, UDLCSSStyleClassifier)
else:
return self._trg_from_pos(buf, pos, implicit, DEBUG, ac, StraightCSSStyleClassifier)
def _async_eval_at_trg(self, buf, trg, ctlr, styleClassifier):
# Note: Currently this is NOT asynchronous. I believe that is fine
# as long as evaluation is fast -- because the IDE UI thread could
# be blocked on this. If processing might be slow (e.g. scanning
# a number of project files for appropriate anchors, etc.), then
# this should be made asynchronous.
DEBUG = DebugStatus
# DEBUG = True
if DEBUG:
print("\n----- async_eval_at_trg(trg=%r) -----"\
% (trg))
# Setup the AccessorCache
extra = trg.extra
ac = None
# print "Extra: %r" % (extra)
if isinstance(extra, dict):
extra = extra.get("extra", None)
if isinstance(extra, dict):
ac = extra.get("ac", None)
if ac and DEBUG:
print(" _async_eval_at_trg:: Trigger had existing AC")
ac.dump()
if ac is None:
if DEBUG:
print(" _async_eval_at_trg:: Created new trigger!")
ac = AccessorCache(buf.accessor, trg.pos, fetchsize=20)
ctlr.start(buf, trg)
pos = trg.pos
try:
if trg.id == ("CSS", TRG_FORM_CPLN, "tag-names"):
if DEBUG:
print(" _async_eval_at_trg:: 'tag-names'")
cplns = self.CSS_HTML_TAG_NAMES
if DEBUG:
print(" _async_eval_at_trg:: cplns:", cplns)
if cplns:
ctlr.set_cplns([("element", v) for v in cplns])
ctlr.done("success")
elif trg.id == ("CSS", TRG_FORM_CPLN, "anchors"):
# Can be a colour or an id tag, depending upon what the
# previous char/style is
# The previous style must be an op style or alphanumeric ch
# i = 0
# max_total_lookback = 100 # Up to 100 chars back
# while i < max_total_lookback:
# p, ch, style = ac.getPrecedingPosCharStyle(last_style,
# ignore_styles=styleClassifier.ignore_styles)
# if not is_udl_css_style(style) or \
# (styleClassifier.is_operator(style, ac) and \
# ch in "};"):
# i = last_pos - p
# XXX - Needs to lookup the project HTML files for anchors...
# anchors = self._get_all_anchors_names_in_project(accessor)
ctlr.done("success")
elif trg.id == ("CSS", TRG_FORM_CPLN, "class-names"):
# raise NotImplementedError("not yet implemented: completion for "
# "most css triggers")
ctlr.done("success")
elif trg.id == ("CSS", TRG_FORM_CPLN, "property-names"):
cplns = self.CSS_PROPERTY_NAMES
if cplns:
# Note: we add the colon as well - see bug 89913.
ctlr.set_cplns([("property", v + ": ") for v in cplns])
# We want to show the property values after autocompleting.
trg.retriggerOnCompletion = True
# print " _async_eval_at_trg:: cplns:", cplns
ctlr.done("success")
elif trg.id == ("CSS", TRG_FORM_CALLTIP, "property-values"):
property, v1, v2 \
= self._extract_css_declaration(ac, styleClassifier, trg,
is_for_calltip=True)
if DEBUG:
print(" _async_eval_at_trg:: Property name: %r" % \
(property, ))
try:
calltip = self.CSS_PROPERTY_ATTRIBUTE_CALLTIPS_DICT[
property]
if DEBUG:
print(" _async_eval_at_trg:: calltip:", calltip)
ctlr.set_calltips([calltip])
except KeyError:
# print "Unknown CSS property: '%s'" % (property)
pass # Ignore unknown CSS attributes
ctlr.done("success")
elif trg.id == ("CSS", TRG_FORM_CPLN, "property-values"):
property, current_value, values \
= self._extract_css_declaration(ac, styleClassifier, trg)
if DEBUG:
print(" _async_eval_at_trg:: XXX property: %r, " \
" current_value: %r, values: %r" % (property,
current_value,
values))
try:
# print "\ndict:", self.CSS_ATTRIBUTES[property]
property_values = sorted(self.CSS_ATTRIBUTES[property],
key=OrdPunctLast)
# Check if it matches anything, if not, dismiss the list
if current_value:
clen = len(current_value)
for v in property_values:
if clen <= len(v) and current_value == v[:clen]:
# Found a match
break
# Else, return the full list, even though no match made
# XXX - May want to cancel the CC list, any way to do
# this?
cplns = [("value", v)
for v in property_values
if v not in values or v == current_value]
ctlr.set_cplns(cplns)
except KeyError:
if DEBUG:
print(" _async_eval_at_trg:: Unknown CSS property: "\
"'%s'" % (property))
pass # Ignore unknown CSS attributes
ctlr.done("success")
# XXX Handling for property not in list.
elif trg.id == ("CSS", TRG_FORM_CPLN, "pseudo-class-names"):
cplns = [("pseudo-class", v)
for v in self.CSS_PSEUDO_CLASS_NAMES]
ctlr.set_cplns(cplns)
ctlr.done("success")
elif trg.id == ("CSS", TRG_FORM_CPLN, "at-rule"):
cplns = [("rule", v)
for v in self.CSS_AT_RULE_NAMES]
ctlr.set_cplns(cplns)
ctlr.done("success")
# Punt - Lower priority
# elif trg.id == ("CSS", TRG_FORM_CPLN, "units"):
# Punt - Fancy
# elif trg.id == ("CSS", TRG_FORM_CPLN, "import-url"):
# Punt - uncommon
# elif trg.id == ("CSS", TRG_FORM_CPLN, "attr-names"):
# elif trg.id == ("CSS", TRG_FORM_CPLN, "attr-values"):
else:
raise NotImplementedError("not yet implemented: completion for "
"most css triggers: trg.id: %s" % (trg.id,))
except IndexError:
# Tried to go out of range of buffer, nothing appropriate found
if DEBUG:
print(" _async_eval_at_trg:: ** Out of range error **")
ctlr.done("success")
def async_eval_at_trg(self, buf, trg, ctlr):
if isinstance(buf, UDLBuffer):
# This is CSS content in a multi-lang buffer.
return self._async_eval_at_trg(buf, trg, ctlr,
UDLCSSStyleClassifier)
else:
return self._async_eval_at_trg(buf, trg, ctlr,
StraightCSSStyleClassifier)
def _get_all_anchors_names_in_project(self):
# anchors = []
# pos = 0
# LENGTH = accessor.length
# style = 0
# func_style_at_pos = accessor.style_at_pos
# func_char_at_pos = accessor.char_at_pos
# while pos < LENGTH:
# if func_char_at_pos(pos) == '#' and \
# func_style_at_pos(pos) == SCE_CSS_OPERATOR:
# # Likely an anchor
# pass
# pos += 1
# return anchors
return []
def _is_ident_of_length(self, accessor, pos, length=3):
# Fourth char to left should not be an identifier
if pos > length and isident(accessor.char_at_pos((pos - length) - 1)):
return False
# chars to left should all be identifiers
for i in range(pos - 1, (pos - length) - 1, -1):
if not isident(accessor.char_at_pos(i)):
return False
return True
def _extract_css_declaration(self, ac, styleClassifier, trg,
is_for_calltip=False):
"""Extract the CSS declaration around the given position.
Returns a 3-tuple:
(<property>, <current_value>, <value_list>)
If is_for_calltip is true, we do not bother to parse out the values, so
<current_value> and <value_list> will be empty.
The value gets parsed into <value_list>, a list of individual values.
Comments and strings are striped from the return value.
If the <current_value> is '', then the trigger position is
ready to start a new value.
"""
DEBUG = DebugStatus
# DEBUG = True
# PERF: Use accessor.gen_chars_and_styles() if possible.
try:
ac.resetToPosition(trg.pos)
p, ch, style = ac.getPrevPosCharStyle()
if not styleClassifier.is_operator(style, ac):
if DEBUG:
print("Current ch is not an operator, so getting the " \
"preceeding one, p: %d, ch: %r, style: %d" % \
(p, ch, style, ))
p, ch, style = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.ignore_styles)
except IndexError:
# This occurs when already at the end of the buffer, so we reset to
# the last buffer position then
ac.resetToPosition(trg.pos - 1)
p, ch, style = ac.getCurrentPosCharStyle()
if DEBUG:
print("""------ _extract_css_declaration -----""")
print(" _extract_css_declaration:: Trg.pos: %d" % (trg.pos))
# ac._debug = True
print(" _extract_css_declaration:: pos: %r" % (p))
print(" _extract_css_declaration:: ch: %r" % (ch))
print(" _extract_css_declaration:: style: %r" % (style))
ac.dump()
# Walk back to ':' operator.
num_close_parenthesis = 0
min_pos = max(0, trg.pos - 200) # Lookback up to 200 chars in total
while p >= min_pos:
# print "ch: %r, style: %d" % (ch, style, )
if ch == ':' and styleClassifier.is_operator(style, ac):
break
elif num_close_parenthesis > 0:
if ch == "(":
num_close_parenthesis -= 1
if DEBUG:
print("Found matching open paren," \
" num_close_parenthesis now: %d" % (
num_close_parenthesis))
elif DEBUG:
print("Ignoring everything inside the parenthesis")
elif ch == "(" and (styleClassifier.is_operator(style) or
styleClassifier.is_value(style)):
if DEBUG:
print("Already inside a paren, no cpln's then.")
# XXX SCSS and Less support arithmetic expressions
return (None, None, None)
elif ch == ")" and (styleClassifier.is_operator(style) or
styleClassifier.is_value(style)):
num_close_parenthesis += 1
if DEBUG:
print("Found close paren, need to skip over contents," \
" num_close_parenthesis: %d" % (
num_close_parenthesis))
elif styleClassifier.is_operator(style):
if ch not in ":,%":
if DEBUG:
print("%s: couldn't find ':' operator, found invalid " \
"operator: %d %r %d" % (trg.name, p, ch, style))
# TODO: SCSS and Less support arithmetic expressions
return (None, None, None)
elif styleClassifier.is_string(style):
# Used to skip over string items in property values
if DEBUG:
print("Found string style, ignoring it")
elif not (styleClassifier.is_value(style) or styleClassifier.is_default(style)):
# old CSS lexer: everything betwee ":" and ';' used to be a
# value.
if DEBUG:
print("%s: couldn't find ':' operator, found invalid " \
"style: pcs: %d %r %d" % (trg.name, p, ch, style))
return (None, None, None)
p, ch, style = ac.getPrevPosCharStyle(
ignore_styles=styleClassifier.ignore_styles)
else:
if DEBUG:
print("%s: couldn't find ':' operator within 200 chars, " \
"giving up" % (trg.name))
return (None, None, None)
if DEBUG:
print(" _extract_css_declaration:: Found ':' at pos: %d" % (p))
# Parse out the property name.
colan_pos = p
p, ch, style = ac.getPrecedingPosCharStyle(style,
ignore_styles=styleClassifier.ignore_styles,
max_look_back=150)
if style not in styleClassifier.identifier_styles:
if DEBUG:
print(" _extract_css_declaration:: No identifier style found" \
" before ':', found style %d instead" % (style))
return (None, None, None)
p, property = ac.getTextBackWithStyle(style)
property = property.strip()
if is_for_calltip:
# We have all the info we need
if DEBUG:
print(" _extract_css_declaration:: Returning property: %r" % (
property))
return (property, '', [])
# Walk forward parsing the value information, ends when we hit a ";" or
# have gone ahead a maximum of 200 chars.
ac.resetToPosition(colan_pos)
prev_pos, prev_ch, prev_style = ac.getCurrentPosCharStyle()
from_pos = prev_pos
p = colan_pos
# Value info, list of tuples (pos, text)
value_info = []
max_p = p + 200
try:
while p < max_p:
p, ch, style = ac.getNextPosCharStyle(
max_look_ahead=100, ignore_styles=styleClassifier.comment_styles)
if p is None or not styleClassifier.is_css_style(style):
# Went past max_look_ahead, just use what we've got then
if DEBUG:
print("%s: css value reached max length or end of " \
"document: trg.pos %d" % (trg.name, trg.pos))
value_info.append((from_pos, ac.text_range(from_pos, p)))
break
# Sass test
if ch == "\n" and self.lang == "Sass" and styleClassifier.is_default(style):
value_info.append((from_pos, ac.text_range(from_pos, p)))
break
if ch in WHITESPACE or styleClassifier.is_string(style):
if not prev_ch in WHITESPACE and not styleClassifier.is_string(prev_style):
value_info.append((
from_pos, ac.text_range(from_pos, p)))
from_pos = p+1
elif styleClassifier.is_operator(style):
if ch in ";{}":
value_info.append((
from_pos, ac.text_range(from_pos, p)))
break
# Other chars should be okay to collect
elif not styleClassifier.is_value(style) and \
style not in styleClassifier.ignore_styles:
if DEBUG:
print("%s: invalid style found: pos %d, style: %d" % (
trg.name, trg.pos, style))
return (None, None, None)
prev_pos, prev_ch, prev_style = p, ch, style
else:
if DEBUG:
print("%s: css value too long: trg.pos %d" % (trg.name, trg.pos))
return (None, None, None)
except IndexError:
if DEBUG:
print("ran out of buffer")
# Work out the values and the current value
current_value = None
values = []
trg_pos = trg.pos
for p, value in value_info:
if value and _isident_first_char(value[0]):
if DEBUG:
print("Is a valid value, p: %d, value: %r" % (p, value, ))
values.append(value)
if current_value is None and trg_pos >= p and \
trg_pos <= p + len(value):
current_value = value
if DEBUG:
print(" _extract_css_declaration:: Returning property: %r, " \
"current_value: %r, values: %r" % (property, current_value,
values))
return (property, current_value, values)
class CSSBuffer(Buffer):
lang = "CSS"
sce_prefixes = ["SCE_CSS_"]
# Removed '(' - double braces for completions that contain a '(' (bug 80063)
# Removed '.' - conflict with floating point values: .5em (bug 80126)
# Removed '{' - gets in way of "rule {" style declarations (bug 82358)
# Removed '#' - gets in the way of hex colors and id selectors (bug 82968)
# Removed '>' - gets in the way of child selectors (bug 87403)
cpln_fillup_chars = " '\";},/"
cpln_stop_chars = " ('\";{},.>/"
#---- internal support stuff
_ident_chars_dictionary = dict((ch, 1) for ch in
string.ascii_lowercase + string.ascii_uppercase + string.digits + "-")
def _isident_first_char(char):
return isident(char) and char != "-" and (char < "0" or char > "9")
def isident(char):
# In CSS2, identifiers (including element names, classes, and IDs in
# selectors) can contain only the characters [A-Za-z0-9] and ISO 10646
# characters 161 and higher, plus the hyphen (-); they cannot start with a
# hyphen or a digit
return char in _ident_chars_dictionary or ord(char) >= 161
def _isdigit(char):
return "0" <= char <= "9"
def _is_udl_css_ident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" \
or char == "_" or char == "="
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info(lang,
silvercity_lexer=CSSLexer(),
buf_class=CSSBuffer,
langintel_class=CSSLangIntel,
is_cpln_lang=True)
|
|
"""
Gimp image parser (XCF file, ".xcf" extension).
You can find informations about XCF file in Gimp source code. URL to read
CVS online:
http://cvs.gnome.org/viewcvs/gimp/app/xcf/
\--> files xcf-read.c and xcf-load.c
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet, ParserError,
UInt8, UInt32, Enum, Float32, String, PascalString32, RawBytes)
from lib.hachoir_parser.image.common import RGBA
from lib.hachoir_core.endian import NETWORK_ENDIAN
class XcfCompression(FieldSet):
static_size = 8
COMPRESSION_NAME = {
0: u"None",
1: u"RLE",
2: u"Zlib",
3: u"Fractal"
}
def createFields(self):
yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME)
class XcfResolution(StaticFieldSet):
format = (
(Float32, "xres", "X resolution in DPI"),
(Float32, "yres", "Y resolution in DPI")
)
class XcfTattoo(StaticFieldSet):
format = ((UInt32, "tattoo", "Tattoo"),)
class LayerOffsets(StaticFieldSet):
format = (
(UInt32, "ofst_x", "Offset X"),
(UInt32, "ofst_y", "Offset Y")
)
class LayerMode(FieldSet):
static_size = 32
MODE_NAME = {
0: u"Normal",
1: u"Dissolve",
2: u"Behind",
3: u"Multiply",
4: u"Screen",
5: u"Overlay",
6: u"Difference",
7: u"Addition",
8: u"Subtract",
9: u"Darken only",
10: u"Lighten only",
11: u"Hue",
12: u"Saturation",
13: u"Color",
14: u"Value",
15: u"Divide",
16: u"Dodge",
17: u"Burn",
18: u"Hard light",
19: u"Soft light",
20: u"Grain extract",
21: u"Grain merge",
22: u"Color erase"
}
def createFields(self):
yield Enum(UInt32(self, "mode", "Layer mode"), self.MODE_NAME)
class GimpBoolean(UInt32):
def __init__(self, parent, name):
UInt32.__init__(self, parent, name)
def createValue(self):
return 1 == UInt32.createValue(self)
class XcfUnit(StaticFieldSet):
format = ((UInt32, "unit", "Unit"),)
class XcfParasiteEntry(FieldSet):
def createFields(self):
yield PascalString32(self, "name", "Name", strip="\0", charset="UTF-8")
yield UInt32(self, "flags", "Flags")
yield PascalString32(self, "data", "Data", strip=" \0", charset="UTF-8")
class XcfLevel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "offset", "Offset")
offset = self["offset"].value
if offset == 0:
return
data_offsets = []
while (self.absolute_address + self.current_size)/8 < offset:
chunk = UInt32(self, "data_offset[]", "Data offset")
yield chunk
if chunk.value == 0:
break
data_offsets.append(chunk)
if (self.absolute_address + self.current_size)/8 != offset:
raise ParserError("Problem with level offset.")
previous = offset
for chunk in data_offsets:
data_offset = chunk.value
size = data_offset - previous
yield RawBytes(self, "data[]", size, "Data content of %s" % chunk.name)
previous = data_offset
class XcfHierarchy(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width")
yield UInt32(self, "height", "Height")
yield UInt32(self, "bpp", "Bits/pixel")
offsets = []
while True:
chunk = UInt32(self, "offset[]", "Level offset")
yield chunk
if chunk.value == 0:
break
offsets.append(chunk.value)
for offset in offsets:
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfLevel(self, "level[]", "Level")
# yield XcfChannel(self, "channel[]", "Channel"))
class XcfChannel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Channel width")
yield UInt32(self, "height", "Channel height")
yield PascalString32(self, "name", "Channel name", strip="\0", charset="UTF-8")
for field in readProperties(self):
yield field
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
def createDescription(self):
return 'Channel "%s"' % self["name"].value
class XcfLayer(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Layer width in pixels")
yield UInt32(self, "height", "Layer height in pixels")
yield Enum(UInt32(self, "type", "Layer type"), XcfFile.IMAGE_TYPE_NAME)
yield PascalString32(self, "name", "Layer name", strip="\0", charset="UTF-8")
for prop in readProperties(self):
yield prop
# --
# TODO: Hack for Gimp 1.2 files
# --
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield UInt32(self, "mask_ofs", "Layer mask offset")
padding = self.seekByte(self["hierarchy_ofs"].value, relative=False)
if padding is not None:
yield padding
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
# TODO: Read layer mask if needed: self["mask_ofs"].value != 0
def createDescription(self):
return 'Layer "%s"' % self["name"].value
class XcfParasites(FieldSet):
def createFields(self):
size = self["../size"].value * 8
while self.current_size < size:
yield XcfParasiteEntry(self, "parasite[]", "Parasite")
class XcfProperty(FieldSet):
PROP_COMPRESSION = 17
PROP_RESOLUTION = 19
PROP_PARASITES = 21
TYPE_NAME = {
0: u"End",
1: u"Colormap",
2: u"Active layer",
3: u"Active channel",
4: u"Selection",
5: u"Floating selection",
6: u"Opacity",
7: u"Mode",
8: u"Visible",
9: u"Linked",
10: u"Lock alpha",
11: u"Apply mask",
12: u"Edit mask",
13: u"Show mask",
14: u"Show masked",
15: u"Offsets",
16: u"Color",
17: u"Compression",
18: u"Guides",
19: u"Resolution",
20: u"Tattoo",
21: u"Parasites",
22: u"Unit",
23: u"Paths",
24: u"User unit",
25: u"Vectors",
26: u"Text layer flags",
}
handler = {
6: RGBA,
7: LayerMode,
8: GimpBoolean,
9: GimpBoolean,
10: GimpBoolean,
11: GimpBoolean,
12: GimpBoolean,
13: GimpBoolean,
15: LayerOffsets,
17: XcfCompression,
19: XcfResolution,
20: XcfTattoo,
21: XcfParasites,
22: XcfUnit
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + self["size"].value) * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Property type"), self.TYPE_NAME)
yield UInt32(self, "size", "Property size")
size = self["size"].value
if 0 < size:
cls = self.handler.get(self["type"].value, None)
if cls:
yield cls(self, "data", size=size*8)
else:
yield RawBytes(self, "data", size, "Data")
def createDescription(self):
return "Property: %s" % self["type"].display
def readProperties(parser):
while True:
prop = XcfProperty(parser, "property[]")
yield prop
if prop["type"].value == 0:
return
class XcfFile(Parser):
PARSER_TAGS = {
"id": "xcf",
"category": "image",
"file_ext": ("xcf",),
"mime": (u"image/x-xcf", u"application/x-gimp-image"),
"min_size": (26 + 8 + 4 + 4)*8, # header+empty property+layer offset+channel offset
"magic": (
('gimp xcf file\0', 0),
('gimp xcf v002\0', 0),
),
"description": "Gimp (XCF) picture"
}
endian = NETWORK_ENDIAN
IMAGE_TYPE_NAME = {
0: u"RGB",
1: u"Gray",
2: u"Indexed"
}
def validate(self):
if self.stream.readBytes(0, 14) not in ('gimp xcf file\0', 'gimp xcf v002\0'):
return "Wrong signature"
return True
def createFields(self):
# Read signature
yield String(self, "signature", 14, "Gimp picture signature (ends with nul byte)", charset="ASCII")
# Read image general informations (width, height, type)
yield UInt32(self, "width", "Image width")
yield UInt32(self, "height", "Image height")
yield Enum(UInt32(self, "type", "Image type"), self.IMAGE_TYPE_NAME)
for prop in readProperties(self):
yield prop
# Read layer offsets
layer_offsets = []
while True:
chunk = UInt32(self, "layer_offset[]", "Layer offset")
yield chunk
if chunk.value == 0:
break
layer_offsets.append(chunk.value)
# Read channel offsets
channel_offsets = []
while True:
chunk = UInt32(self, "channel_offset[]", "Channel offset")
yield chunk
if chunk.value == 0:
break
channel_offsets.append(chunk.value)
# Read layers
for index, offset in enumerate(layer_offsets):
if index+1 < len(layer_offsets):
size = (layer_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
yield XcfLayer(self, "layer[]", size=size)
# Read channels
for index, offset in enumerate(channel_offsets):
if index+1 < len(channel_offsets):
size = (channel_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfChannel(self, "channel[]", "Channel", size=size)
|
|
# -*- coding: utf-8 -*-
import copy
import functools
from rest_framework import status as http_status
import json
import logging
import os
from flask import request, make_response
from mako.lookup import TemplateLookup
from mako.template import Template
import markupsafe
from werkzeug.exceptions import NotFound
import werkzeug.wrappers
from framework import sentry
from framework.exceptions import HTTPError
from framework.flask import app, redirect
from framework.sessions import session
from website import settings
logger = logging.getLogger(__name__)
TEMPLATE_DIR = settings.TEMPLATES_PATH
_TPL_LOOKUP = TemplateLookup(
default_filters=[
'unicode', # default filter; must set explicitly when overriding
],
directories=[
TEMPLATE_DIR,
settings.ADDON_PATH,
],
module_directory='/tmp/mako_modules'
)
_TPL_LOOKUP_SAFE = TemplateLookup(
default_filters=[
'unicode', # default filter; must set explicitly when overriding
'temp_ampersand_fixer', # FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
'h',
],
imports=[
'from website.util.sanitize import temp_ampersand_fixer', # FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
],
directories=[
TEMPLATE_DIR,
settings.ADDON_PATH,
],
module_directory='/tmp/mako_modules',
)
REDIRECT_CODES = [
http_status.HTTP_301_MOVED_PERMANENTLY,
http_status.HTTP_302_FOUND,
]
class Rule(object):
""" Container for routing and rendering rules."""
@staticmethod
def _ensure_list(value):
if not isinstance(value, list):
return [value]
return value
def __init__(self, routes, methods, view_func_or_data, renderer,
view_kwargs=None, endpoint_suffix=''):
"""Rule constructor.
:param routes: Route or list of routes
:param methods: HTTP method or list of methods
:param view_func_or_data: View function or data; pass data
if view returns a constant data dictionary
:param renderer: Renderer object or function
:param view_kwargs: Optional kwargs to pass to view function
:param endpoint_suffix: Optional suffix to append to endpoint name;
useful for disambiguating routes by HTTP verb
"""
if not callable(renderer):
raise ValueError('Argument renderer must be callable.')
self.routes = self._ensure_list(routes)
self.methods = self._ensure_list(methods)
self.view_func_or_data = view_func_or_data
self.renderer = renderer
self.view_kwargs = view_kwargs or {}
self.endpoint_suffix = endpoint_suffix
def wrap_with_renderer(fn, renderer, renderer_kwargs=None, debug_mode=True):
"""
:param fn: View function; must return a dictionary or a tuple containing
(up to) a dictionary, status code, headers, and redirect URL
:param renderer: Renderer object or function
:param renderer_kwargs: Optional kwargs to pass to renderer
:return: Wrapped view function
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if session:
session_error_code = session.data.get('auth_error_code')
else:
session_error_code = None
if session_error_code:
return renderer(
HTTPError(session_error_code),
**renderer_kwargs or {}
)
try:
if renderer_kwargs:
kwargs.update(renderer_kwargs)
data = fn(*args, **kwargs)
except HTTPError as error:
data = error
except Exception as error:
logger.exception(error)
if settings.SENTRY_DSN and not app.debug:
sentry.log_exception()
if debug_mode:
raise
data = HTTPError(
http_status.HTTP_500_INTERNAL_SERVER_ERROR,
message=repr(error),
)
return renderer(data, **renderer_kwargs or {})
return wrapped
def data_to_lambda(data):
"""Create a lambda function that takes arbitrary arguments and returns
a deep copy of the passed data. This function must deep copy the data,
else other code operating on the returned data can change the return value
of the lambda.
"""
return lambda *args, **kwargs: copy.deepcopy(data)
view_functions = {}
def process_rules(app, rules, prefix=''):
"""Add URL routes to Flask / Werkzeug lookup table.
:param app: Flask / Werkzeug app
:param rules: List of Rule objects
:param prefix: Optional prefix for rule URLs
"""
for rule in rules:
# Handle view function
if callable(rule.view_func_or_data):
view_func = rule.view_func_or_data
renderer_name = getattr(
rule.renderer,
'__name__',
rule.renderer.__class__.__name__
)
endpoint = '{}__{}'.format(
renderer_name,
rule.view_func_or_data.__name__
)
view_functions[endpoint] = rule.view_func_or_data
# Handle view data: wrap in lambda and build endpoint from
# HTTP methods
else:
view_func = data_to_lambda(rule.view_func_or_data)
endpoint = '__'.join(
route.replace('/', '') for route in rule.routes
)
# Wrap view function with renderer
wrapped_view_func = wrap_with_renderer(
view_func,
rule.renderer,
rule.view_kwargs,
debug_mode=app.debug
)
# Add routes
for url in rule.routes:
try:
app.add_url_rule(
prefix + url,
endpoint=endpoint + rule.endpoint_suffix,
view_func=wrapped_view_func,
methods=rule.methods,
)
except AssertionError:
raise AssertionError('URLRule({}, {})\'s view function name is overwriting an existing endpoint'.format(prefix + url, view_func.__name__ + rule.endpoint_suffix))
### Renderer helpers ###
def render_mustache_string(tpl_string, data):
import pystache
return pystache.render(tpl_string, context=data)
def render_jinja_string(tpl, data):
pass
mako_cache = {}
def render_mako_string(tpldir, tplname, data, trust=True):
"""Render a mako template to a string.
:param tpldir:
:param tplname:
:param data:
:param trust: Optional. If ``False``, markup-save escaping will be enabled
"""
show_errors = settings.DEBUG_MODE # thanks to abought
# TODO: The "trust" flag is expected to be temporary, and should be removed
# once all templates manually set it to False.
lookup_obj = _TPL_LOOKUP_SAFE if trust is False else _TPL_LOOKUP
tpl = mako_cache.get(tplname)
if tpl is None:
with open(os.path.join(tpldir, tplname)) as f:
tpl_text = f.read()
tpl = Template(
tpl_text,
format_exceptions=show_errors,
lookup=lookup_obj,
input_encoding='utf-8',
output_encoding='utf-8',
default_filters=lookup_obj.template_args['default_filters'],
imports=lookup_obj.template_args['imports'] # FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. See [#OSF-4432]
)
# Don't cache in debug mode
if not app.debug:
mako_cache[tplname] = tpl
return tpl.render(**data)
renderer_extension_map = {
'.stache': render_mustache_string,
'.jinja': render_jinja_string,
'.mako': render_mako_string,
}
def unpack(data, n=4):
"""Unpack data to tuple of length n.
:param data: Object or tuple of length <= n
:param n: Length to pad tuple
"""
if not isinstance(data, tuple):
data = (data,)
return data + (None,) * (n - len(data))
def proxy_url(url):
"""Call Flask view function for a given URL.
:param url: URL to follow
:return: Return value of view function, wrapped in Werkzeug Response
"""
# Get URL map, passing current request method; else method defaults to GET
match = app.url_map.bind('').match(url, method=request.method)
response = app.view_functions[match[0]](**match[1])
return make_response(response)
def call_url(url, view_kwargs=None):
"""Look up and call view function by URL.
:param url: URL
:param view_kwargs: Optional kwargs to pass to view function
:return: Data from view function
"""
# Parse view function and args
func_name, func_data = app.url_map.bind('').match(url)
if view_kwargs is not None:
func_data.update(view_kwargs)
view_function = view_functions[func_name]
# Call view function
rv = view_function(**func_data)
# Extract data from return value
rv, _, _, _ = unpack(rv)
# Follow redirects
if isinstance(rv, werkzeug.wrappers.BaseResponse) \
and rv.status_code in REDIRECT_CODES:
redirect_url = rv.headers['Location']
return call_url(redirect_url)
return rv
### Renderers ###
class Renderer(object):
CONTENT_TYPE = 'text/html'
def render(self, data, redirect_url, *args, **kwargs):
raise NotImplementedError
def handle_error(self, error):
raise NotImplementedError
def __call__(self, data, *args, **kwargs):
"""Render data returned by a view function.
:param data: Dictionary or tuple of (up to) dictionary,
status code, headers, and redirect URL
:return: Flask / Werkzeug response object
"""
# Handle error
if isinstance(data, HTTPError):
return self.handle_error(data)
# Return if response
if isinstance(data, werkzeug.wrappers.BaseResponse):
return data
# Unpack tuple
data, status_code, headers, redirect_url = unpack(data)
# Call subclass render
rendered = self.render(data, redirect_url, *args, **kwargs)
# Return if response
if isinstance(rendered, werkzeug.wrappers.BaseResponse):
return rendered
# Set content type in headers
headers = headers or {}
headers['Content-Type'] = self.CONTENT_TYPE + '; charset=' + kwargs.get('charset', 'utf-8')
# Package as response
return make_response(rendered, status_code, headers)
class JSONRenderer(Renderer):
"""Renderer for API views. Generates JSON; ignores
redirects from views and exceptions.
"""
CONTENT_TYPE = 'application/json'
class Encoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_json'):
try:
return obj.to_json()
except TypeError: # BS4 objects have to_json that isn't callable
return str(obj)
return json.JSONEncoder.default(self, obj)
def handle_error(self, error):
headers = {'Content-Type': self.CONTENT_TYPE}
return self.render(error.to_data(), None), error.code, headers
def render(self, data, redirect_url, *args, **kwargs):
return json.dumps(data, cls=self.Encoder)
# Create a single JSONRenderer instance to avoid repeated construction
json_renderer = JSONRenderer()
class XMLRenderer(Renderer):
"""Renderer for API views. Generates XML; ignores
redirects from views and exceptions.
"""
CONTENT_TYPE = 'application/xml'
def handle_error(self, error):
return str(error.to_data()['message_long']), error.code
def render(self, data, redirect_url, *args, **kwargs):
return data
# Create a single XMLRenderer instance to avoid repeated construction
xml_renderer = XMLRenderer()
class WebRenderer(Renderer):
"""Renderer for web views. Generates HTML; follows redirects
from views and exceptions.
"""
CONTENT_TYPE = 'text/html'
error_template = 'error.mako'
# TODO: Should be a function, not a method
def detect_renderer(self, renderer, filename):
if renderer:
return renderer
try:
_, extension = os.path.splitext(filename)
return renderer_extension_map[extension]
except KeyError:
raise KeyError(
'Could not infer renderer from file name: {}'.format(
filename
)
)
def __init__(self, template_name,
renderer=None, error_renderer=None,
data=None, detect_render_nested=True,
trust=True, template_dir=TEMPLATE_DIR):
"""Construct WebRenderer.
:param template_name: Name of template file
:param renderer: Renderer callable; attempt to auto-detect if None
:param error_renderer: Renderer for error views; attempt to
auto-detect if None
:param data: Optional dictionary or dictionary-generating function
to add to data from view function
:param detect_render_nested: Auto-detect renderers for nested
templates?
:param trust: Boolean: If true, turn off markup-safe escaping
:param template_dir: Path to template directory
"""
self.template_name = template_name
self.data = data or {}
self.detect_render_nested = detect_render_nested
self.trust = trust
self.template_dir = template_dir
self.renderer = self.detect_renderer(renderer, template_name)
self.error_renderer = self.detect_renderer(
error_renderer,
self.error_template
)
def handle_error(self, error):
"""Handle an HTTPError.
:param error: HTTPError object
:return: HTML error page
"""
# Follow redirects
if error.redirect_url is not None:
return redirect(error.redirect_url)
# Check for custom error template
error_template = self.error_template
if getattr(error, 'template', None):
error_template = error.template
# Render error page
# todo: use message / data from exception in error page
error_data = error.to_data()
return self.render(
error_data,
None,
template_name=error_template
), error.code
def render_element(self, element, data):
"""Render an embedded template.
:param element: The template embed (HtmlElement).
Ex: <div mod-meta='{"tpl": "name.html", "replace": true}'></div>
:param data: Dictionary to be passed to the template as context
:return: 2-tuple: (<result>, <flag: replace div>)
"""
attributes_string = element.get('mod-meta')
# Return debug <div> if JSON cannot be parsed
try:
element_meta = json.loads(attributes_string)
except ValueError:
return '<div>No JSON object could be decoded: {}</div>'.format(
markupsafe.escape(attributes_string)
), True
uri = element_meta.get('uri')
is_replace = element_meta.get('replace', False)
kwargs = element_meta.get('kwargs', {})
view_kwargs = element_meta.get('view_kwargs', {})
error_msg = element_meta.get('error', None)
# TODO: Is copy enough? Discuss.
render_data = copy.copy(data)
render_data.update(kwargs)
if uri:
# Catch errors and return appropriate debug divs
# todo: add debug parameter
try:
uri_data = call_url(uri, view_kwargs=view_kwargs)
render_data.update(uri_data)
except NotFound:
return '<div>URI {} not found</div>'.format(markupsafe.escape(uri)), is_replace
except Exception as error:
logger.exception(error)
if error_msg:
return '<div>{}</div>'.format(markupsafe.escape(str(error_msg))), is_replace
return '<div>Error retrieving URI {}: {}</div>'.format(
uri,
repr(error)
), is_replace
try:
template_rendered = self._render(
render_data,
element_meta['tpl'],
)
except Exception as error:
logger.exception(error)
return '<div>Error rendering template {}: {}'.format(
element_meta['tpl'],
repr(error)
), is_replace
return template_rendered, is_replace
def _render(self, data, template_name=None):
"""Render output of view function to HTML.
:param data: Data dictionary from view function
:param template_name: Name of template file
:return: Rendered HTML
"""
nested = template_name is None
template_name = template_name or self.template_name
if nested and self.detect_render_nested:
try:
renderer = self.detect_renderer(None, template_name)
except KeyError:
renderer = self.renderer
else:
renderer = self.renderer
# Catch errors and return appropriate debug divs
# todo: add debug parameter
try:
# TODO: Seems like Jinja2 and handlebars renderers would not work with this call sig
rendered = renderer(self.template_dir, template_name, data, trust=self.trust)
except IOError:
return '<div>Template {} not found.</div>'.format(template_name)
## Parse HTML using html5lib; lxml is too strict and e.g. throws
## errors if missing parent container; htmlparser mangles whitespace
## and breaks replacement
#parsed = BeautifulSoup(rendered, 'html5lib')
#subtemplates = parsed.find_all(
# lambda tag: tag.has_attr('mod-meta')
#)
#
#for element in subtemplates:
#
# # Extract HTML of original element
# element_html = str(element)
#
# # Render nested template
# template_rendered, is_replace = self.render_element(element, data)
#
# # Build replacement
# if is_replace:
# replacement = template_rendered
# else:
# element.string = template_rendered
# replacement = str(element)
#
# # Replace
# rendered = rendered.replace(element_html, replacement)
return rendered
def render(self, data, redirect_url, *args, **kwargs):
"""Render output of view function to HTML, following redirects
and adding optional auxiliary data to view function response
:param data: Data dictionary from view function
:param redirect_url: Redirect URL; follow if not None
:return: Rendered HTML
"""
# Follow redirects
if redirect_url is not None:
return redirect(redirect_url)
template_name = kwargs.get('template_name')
# Load extra data
extra_data = self.data if isinstance(self.data, dict) else self.data()
data.update({key: val for key, val in extra_data.items() if key not in data})
return self._render(data, template_name)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import functools
import hashlib
import inspect
import os
import pyclbr
import random
import re
import shutil
import sys
import tempfile
import time
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from xml.sax import saxutils
from eventlet import event
from eventlet import greenthread
from eventlet import pools
from oslo.config import cfg
import paramiko
from cinder.brick.initiator import connector
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import gettextutils
from cinder.openstack.common import importutils
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import timeutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
synchronized = lockutils.synchronized_with_prefix('cinder-')
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `cinder.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(CONF.state_path, "etc", "cinder", config_path),
os.path.join(CONF.state_path, "etc", config_path),
os.path.join(CONF.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def as_int(obj, quiet=True):
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError(_("Can not translate %s to integer.") % (obj))
return obj
def check_exclusive_options(**kwargs):
"""Checks that only one of the provided options is actually not-none.
Iterates over all the kwargs passed in and checks that only one of said
arguments is not-none, if more than one is not-none then an exception will
be raised with the names of those arguments who were not-none.
"""
if not kwargs:
return
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.iteritems():
if v is not None:
exclusive_options[k] = True
if len(exclusive_options) > 1:
# Change the format of the names from pythonic to
# something that is more readable.
#
# Ex: 'the_key' -> 'the key'
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ", ".join(sorted(names))
msg = (_("May specify only one of %s") % (names))
raise exception.InvalidInput(reason=msg)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and not 'root_helper' in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# First, check no space in the middle of arg
arg_len = len(arg.split())
if arg_len > 1:
raise exception.SSHInjectionThreat(command=str(cmd_list))
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if arg == c:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
def create_channel(client, width, height):
"""Invoke an interactive shell session on server."""
channel = client.invoke_shell()
channel.resize_pty(width, height)
return channel
class SSHPool(pools.Pool):
"""A simple eventlet pool to hold ssh connections."""
def __init__(self, ip, port, conn_timeout, login, password=None,
privatekey=None, *args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
self.conn_timeout = conn_timeout if conn_timeout else None
self.privatekey = privatekey
super(SSHPool, self).__init__(*args, **kwargs)
def create(self):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password:
ssh.connect(self.ip,
port=self.port,
username=self.login,
password=self.password,
timeout=self.conn_timeout)
elif self.privatekey:
pkfile = os.path.expanduser(self.privatekey)
privatekey = paramiko.RSAKey.from_private_key_file(pkfile)
ssh.connect(self.ip,
port=self.port,
username=self.login,
pkey=privatekey,
timeout=self.conn_timeout)
else:
msg = _("Specify a password or private_key")
raise exception.CinderException(msg)
# Paramiko by default sets the socket timeout to 0.1 seconds,
# ignoring what we set thru the sshclient. This doesn't help for
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
# the server will keep the connection open. All that does is send
# a keepalive packet every ssh_conn_timeout seconds.
if self.conn_timeout:
transport = ssh.get_transport()
transport.sock.settimeout(None)
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
msg = _("Error connecting via ssh: %s") % e
LOG.error(msg)
raise paramiko.SSHException(msg)
def get(self):
"""Return an item from the pool, when one is available.
This may cause the calling greenthread to block. Check if a
connection is active before returning it.
For dead connections create and return a new connection.
"""
conn = super(SSHPool, self).get()
if conn:
if conn.get_transport().is_active():
return conn
else:
conn.close()
return self.create()
def remove(self, ssh):
"""Close an ssh client and remove it from free_items."""
ssh.close()
ssh = None
if ssh in self.free_items:
self.free_items.pop(ssh)
if self.current_size > 0:
self.current_size -= 1
def cinderdir():
import cinder
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [r.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
r.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in xrange(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
r.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug(_('backend %s'), self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException as se:
raise expat.ExpatError()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return (val == 'true' or val == 'false' or
val == 'yes' or val == 'no' or
val == 'y' or val == 'n' or
val == '1' or val == '0')
def monkey_patch():
"""If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def generate_glance_url():
"""Generate the URL to glance."""
# TODO(jk0): This will eventually need to take SSL into consideration
# when supported in glance.
return "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def total_seconds(td):
"""Local total_seconds implementation for compatibility with python 2.6"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return ((td.days * 86400 + td.seconds) * 10 ** 6 +
td.microseconds) / 10.0 ** 6
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
any(map(checksum.update, iter(lambda: file_like_object.read(32768), '')))
return checksum.hexdigest()
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
return abs(elapsed) <= CONF.service_down_time
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug(_('Could not remove tmpdir: %s'), str(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first"""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def get_root_helper():
return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties():
"""wrapper for the brick calls to automatically set
the root_helper needed for cinder.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts)
def require_driver_initialized(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
raise exception.DriverNotInitialized(driver=driver_name)
return func(self, *args, **kwargs)
return wrapper
|
|
import logging
from io import BufferedReader
import bigchaindb_driver as bd
import prov.graph as provgraph
import prov.model as provmodel
from bigchaindb_driver import pool as bdpool
from networkx import is_directed_acyclic_graph
from networkx import isolates
from networkx import topological_sort
from prov2bigchaindb.core import utils, local_stores, accounts
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class BaseClient(object):
""" BigchainDB Base Client """
def __init__(self, host: str = '0.0.0.0', port: int = 9984,
num_connections: int = 5, local_store: local_stores.SqliteStore = local_stores.SqliteStore()):
"""
Instantiate Base Client object
:param host: BigchaindDB Hostname or IP (default: 0.0.0.0)
:type host: str
:param port: BigchaindDB Port (default: 9984)
:type port: int
:param num_connections: Amount of connections made to BigchainDB node
:type num_connections: int
:param local_store: Local database object
:type local_store: SqliteStore
"""
assert num_connections > 0
self.node = 'http://{}:{}'.format(host, str(port))
self.connections = num_connections * [bd.BigchainDB(self.node)]
self.connection_pool = bdpool.Pool(self.connections)
self.store = local_store
def test_transaction(self, tx: dict) -> bool:
"""
Validate a transaction against BigchainDB
:param tx: Transaction to test
:type tx: dict
:return: True or Exception
:rtype: bool
"""
reason = None
if not utils.is_valid_tx(tx['id'], self.connection_pool.get_connection()):
reason = "TX is invalid"
elif not utils.is_block_to_tx_valid(tx['id'], self.connection_pool.get_connection()):
reason = "Block is invalid"
if reason is None:
return True
log.error("Test failed: %s", tx['id'])
raise Exception(reason)
def _get_bigchain_connection(self) -> bd.BigchainDB:
"""
Returns BigchainDB connection
:return: BigchainDB connection object
:rtype: bd.BigchainDB
"""
return self.connection_pool.get_connection()
def save_document(self, document: object) -> object:
"""
Abstract method to store a document
:param document: Document to save
:type document: object
:return: id
:rtype: object
"""
raise NotImplementedError("Abstract method")
def get_document(self, document_id: object) -> provmodel.ProvDocument:
"""
Abstract method to retrieve a document
:param document_id: Document to save
:type document_id: object
:rtype: ProvDocument
"""
raise NotImplementedError("Abstract method")
class DocumentConceptClient(BaseClient):
""""""
def __init__(self, account_id: str = None, host: str = '0.0.0.0', port: int = 9984, num_connections: int = 1,
local_store: local_stores.SqliteStore = local_stores.SqliteStore()):
"""
Instantiate Document Client object
:param host: BigchaindDB Hostname or IP (default: 0.0.0.0)
:type host: str
:param port: BigchaindDB Port (default: 9984)
:type port: int
:param local_store: Local database object
:type local_store: SqliteStore
"""
super().__init__(host, port, num_connections, local_store)
self.account = accounts.DocumentConceptAccount(account_id, self.store)
def save_document(self, document: str or bytes or provmodel.ProvDocument) -> str:
"""
Write a document into BigchainDB
:param document: Document as JSON/XML/PROVN
:type document: str or bytes or ProvDocument
:return: Transaction id of document
:rtype: str
"""
log.info("Save document...")
prov_document = utils.to_prov_document(content=document)
asset = {'prov': prov_document.serialize(format='json')}
tx_id = self.account.save_asset(asset, self._get_bigchain_connection())
log.info("Saved document in Tx with id: %s", tx_id)
return tx_id
def get_document(self, tx_id: str) -> provmodel.ProvDocument:
"""
Retrieve a document by transaction id from BigchainDB
:param tx_id: Transaction Id of Document
:type tx_id: str
:return: Document as ProvDocument object
:rtype: ProvDocument
"""
log.info("Retrieve and build document")
tx = self._get_bigchain_connection().transactions.retrieve(tx_id)
self.test_transaction(tx)
if 'id' in tx['asset'].keys():
tx = self._get_bigchain_connection().transactions.get(asset_id=tx['asset']['id'])[0]
self.test_transaction(tx)
log.info("Success")
return utils.to_prov_document(tx['asset']['data']['prov'])
class GraphConceptClient(BaseClient):
""""""
def __init__(self, host: str = '0.0.0.0', port: int = 9984, num_connections: int = 5,
local_store: local_stores.SqliteStore = local_stores.SqliteStore()):
"""
Instantiate Graph Client object
:param host: BigchaindDB Hostname or IP (default: 0.0.0.0)
:type host: str
:param port: BigchaindDB Port (default: 9984)
:type port: int
:param local_store: Local database object
:type local_store: SqliteStore
"""
super().__init__(host, port, num_connections, local_store=local_store)
self.accounts = []
@staticmethod
def calculate_account_data(prov_document: provmodel.ProvDocument) -> list:
"""
Transforms a ProvDocument into a tuple with ProvElement, list of ProvRelation and list of Namespaces
:param prov_document: Document to transform
:type prov_document:
:return: List of tuples(element, relations, namespace)
:rtype: list
"""
namespaces = prov_document.get_registered_namespaces()
g = provgraph.prov_to_graph(prov_document=prov_document)
elements = []
for node, node_dict in g.adjacency():
relations = {'with_id': [], 'without_id': []}
# print(node)
for tmp_relations in node_dict.values():
for relation in tmp_relations.values():
relation = relation['relation']
if relation.identifier:
relations['with_id'].append(relation)
else:
relations['without_id'].append(relation)
elements.append((node, relations, namespaces))
return elements
def save_document(self, document: str or BufferedReader or provmodel.ProvDocument) -> list:
"""
Write a document into BigchainDB
:param document: Document as JSON/XML/PROVN
:type document: str or BufferedReader or ProvDocument
:return: List of transaction ids
:rtype: list
"""
log.info("Save document...")
document_tx_ids = []
prov_document = utils.to_prov_document(content=document)
elements = GraphConceptClient.calculate_account_data(prov_document)
id_mapping = {}
log.info("Create and Save instances")
for prov_element, prov_relations, namespaces in elements:
for rel in prov_relations['with_id']:
id_mapping[rel.identifier] = ''
for prov_element, prov_relations, namespaces in elements:
account = accounts.GraphConceptAccount(prov_element, prov_relations, id_mapping, namespaces, self.store)
self.accounts.append(account)
tx_id = account.save_instance_asset(self._get_bigchain_connection())
document_tx_ids.append(tx_id)
log.info("Save relations with ids")
for account in filter(lambda acc: acc.has_relations_with_id, self.accounts):
document_tx_ids += account.save_relations_with_ids(self._get_bigchain_connection())
log.info("Save relations without ids")
for account in filter(lambda acc: acc.has_relations_without_id, self.accounts):
document_tx_ids += account.save_relations_without_ids(self._get_bigchain_connection())
log.info("Saved document in %s Tx", len(document_tx_ids))
return document_tx_ids
def get_document(self, document_tx_ids: list) -> provmodel.ProvDocument:
"""
Retrieve a document by a list transaction ids from BigchainDB
:param document_tx_ids: Transaction Ids of Document
:type document_tx_ids: list
:return: Document as ProvDocument object
:rtype: ProvDocument
"""
log.info("Retrieve and rebuild document...")
doc = provmodel.ProvDocument()
for i in document_tx_ids:
log.info("tx id: %s",i)
tx = self._get_bigchain_connection().transactions.get(asset_id=i)[0]
self.test_transaction(tx)
if 'id' in tx['asset'].keys():
tx = self._get_bigchain_connection().transactions.get(asset_id=tx['asset']['id'])[0]
self.test_transaction(tx)
tmp_doc = utils.to_prov_document(tx['asset']['data']['prov'])
for namespace in tmp_doc.get_registered_namespaces():
doc.add_namespace(namespace)
for record in tmp_doc.get_records():
doc.add_record(record=record)
log.info("Success")
return doc
class RoleConceptClient(BaseClient):
""""""
def __init__(self, host: str = '0.0.0.0', port: int = 9984, num_connections: int = 5,
local_store: local_stores.SqliteStore = local_stores.SqliteStore()):
"""
Instantiate Role Client object
:param host: BigchaindDB Hostname or IP (default: 0.0.0.0)
:type host: str
:param port: BigchaindDB Port (default: 9984)
:type port: int
:param local_store: Local database object
:type local_store: SqliteStore
"""
super().__init__(host, port, num_connections, local_store=local_store)
self.accounts = []
@staticmethod
def calculate_account_data(prov_document: provmodel.ProvDocument) -> list:
"""
Transforms a ProvDocument into a list of tuples including:
ProvAgent, list of ProvRelations from agent,
list of ProvElements associated to ProvAgent,
list of Namespaces
:param prov_document: Document to transform
:type prov_document:
:return: List of tuples(ProvAgent, list(), list(), list())
:rtype: list
"""
namespaces = prov_document.get_registered_namespaces()
g = provgraph.prov_to_graph(prov_document=prov_document)
sorted_nodes = list(reversed(list(topological_sort(g))))
agents = list(filter(lambda elem: isinstance(elem, provmodel.ProvAgent), sorted_nodes))
elements = list(filter(lambda elem: not isinstance(elem, provmodel.ProvAgent), sorted_nodes))
# Check on compatibility
if not is_directed_acyclic_graph(g):
raise Exception("Provenance graph is not acyclic")
if list(isolates(g)):
raise Exception("Provenance not compatible with role-based concept. Has isolated Elements")
for element in elements:
if provmodel.ProvAgent not in [type(n) for n in g.neighbors(element)]:
raise Exception(
"Provenance not compatible with role-based concept. Element {} has not relation to any agent".format(
element))
accounts = []
for agent in agents:
# find out-going relations from agent
agent_relations = []
for u, v in g.out_edges(agent):
# Todo check if filter does not left out some info
agent_relations.append(g.get_edge_data(u, v)[0]['relation'])
agent_elements = {}
i = 0
for element in elements:
element_relations = []
if g.has_edge(element, agent):
for u, v in set(g.out_edges(element)):
for relation in g[u][v].values():
element_relations.append(relation['relation'])
agent_elements[i] = {element: element_relations}
i += 1
accounts.append((agent, agent_relations, agent_elements, namespaces))
return accounts
def save_document(self, document: str or BufferedReader or provmodel.ProvDocument) -> list:
"""
Write a document into BigchainDB
:param document: Document as JSON/XML/PROVN
:type document: str or BufferedReader or ProvDocument
:return: List of transaction ids
:rtype: list
"""
log.info("Save document...")
document_tx_ids = []
prov_document = utils.to_prov_document(content=document)
account_data = RoleConceptClient.calculate_account_data(prov_document)
id_mapping = {}
log.info("Create and Save instances")
for agent, relations, elements, namespaces in account_data:
account = accounts.RoleConceptAccount(agent, relations, elements, id_mapping, namespaces, self.store)
self.accounts.append(account)
tx_id = account.save_instance_asset(self._get_bigchain_connection())
document_tx_ids.append(tx_id)
log.info("Save elements")
for account in self.accounts:
document_tx_ids += account.save_elements(self._get_bigchain_connection())
log.info("Saved document in %s Tx", len(document_tx_ids))
return document_tx_ids
def get_document(self, document_tx_ids: list) -> provmodel.ProvDocument:
"""
Returns a document by a list transaction ids from BigchainDB
:param document_tx_ids: Transaction Ids of Document
:type document_tx_ids: list
:return: Document as ProvDocument object
:rtype: ProvDocument
"""
log.info("Retrieve and rebuild document...")
doc = provmodel.ProvDocument()
for i in document_tx_ids:
tx = self._get_bigchain_connection().transactions.get(asset_id=i)[0]
self.test_transaction(tx)
if 'id' in tx['asset'].keys():
tx = self._get_bigchain_connection().transactions.get(asset_id=tx['asset']['id'])[0]
self.test_transaction(tx)
tmp_doc = utils.to_prov_document(tx['asset']['data']['prov'])
for namespace in tmp_doc.get_registered_namespaces():
doc.add_namespace(namespace)
for record in tmp_doc.get_records():
doc.add_record(record=record)
log.info("Success")
return doc
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add required service command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
archetype_required = {
'aquilon': ["dns", "aqd", "ntp", "bootserver", "support-group", "lemon",
"syslogng"],
'esx_cluster': ["esx_management_server"],
'vmhost': ["dns", "ntp", "syslogng"],
}
class TestAddRequiredService(TestBrokerCommand):
def test_100_add_afs(self):
command = "add required service --service afs --archetype aquilon"
command += " --justification tcm=12345678"
self.noouttest(command.split(" "))
def test_101_add_afs_redundant(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "aquilon", "--personality", "unixeng-test"]
out = self.statustest(command)
self.matchoutput(out,
"Warning: Service afs is already required by "
"archetype aquilon. Did you mean to use "
"--environment_override?",
command)
def test_102_add_afs_override(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "aquilon", "--personality", "utpers-dev",
"--environment_override", "qa"]
self.noouttest(command)
def test_105_show_afs(self):
command = "show service --service afs"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Required for Archetype: aquilon", command)
self.matchoutput(out,
"Required for Personality: unixeng-test Archetype: aquilon",
command)
self.searchoutput(out,
r'Required for Personality: utpers-dev Archetype: aquilon\s*'
r'Stage: next\s*'
r'Environment Override: qa',
command)
def test_105_search_personality(self):
command = ["search_personality", "--required_service", "afs"]
out = self.commandtest(command)
self.matchoutput(out, "aquilon/utpers-dev", command)
self.matchoutput(out, "aquilon/unixeng-test", command)
command = ["search_personality", "--required_service", "afs",
"--environment_override", "qa"]
out = self.commandtest(command)
self.matchoutput(out, "aquilon/utpers-dev", command)
self.matchclean(out, "unixeng-test", command)
self.noouttest(["search_personality", "--required_service", "afs",
"--environment_override", "prod"])
def test_105_check_personality_proto(self):
command = ["show_personality", "--personality", "utpers-dev",
"--personality_stage", "next", "--format", "proto"]
personality = self.protobuftest(command, expect=1)[0]
self.assertEqual(len(personality.required_services), 1)
self.assertEqual(personality.required_services[0].service, 'afs')
self.assertEqual(personality.required_services[0].instance, '')
self.assertEqual(personality.required_services[0].host_environment, 'qa')
def test_110_add_defaults(self):
# Setup required services, as expected by the templates.
for archetype, servicelist in archetype_required.items():
for service in servicelist:
self.noouttest(["add_required_service", "--service", service,
"--archetype", archetype,
"--justification", "tcm=12345678"])
def test_115_verify_defaults(self):
all_services = set()
for archetype, servicelist in archetype_required.items():
all_services.update(servicelist)
for archetype, servicelist in archetype_required.items():
command = ["show_archetype", "--archetype", archetype]
out = self.commandtest(command)
for service in servicelist:
self.matchoutput(out, "Service: %s" % service, command)
for service in all_services - set(servicelist):
self.matchclean(out, "Service: %s" % service, command)
def test_120_add_choosers(self):
for service in ["chooser1", "chooser2", "chooser3"]:
command = ["add_required_service", "--service", service,
"--archetype=aquilon", "--personality=unixeng-test"]
self.noouttest(command)
def test_125_show_personality_current(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=unixeng-test"]
out = self.commandtest(command)
self.matchoutput(out, "Stage: current", command)
self.matchclean(out, "chooser1", command)
self.matchclean(out, "chooser2", command)
self.matchclean(out, "chooser3", command)
def test_125_show_personality_next(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=unixeng-test",
"--personality_stage=next"]
out = self.commandtest(command)
self.matchoutput(out, "Stage: next", command)
self.matchoutput(out, "Service: chooser1", command)
self.matchoutput(out, "Service: chooser2", command)
self.matchoutput(out, "Service: chooser3", command)
def test_125_show_personality_next_proto(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=unixeng-test",
"--personality_stage=next", "--format", "proto"]
personality = self.protobuftest(command, expect=1)[0]
self.assertEqual(personality.archetype.name, "aquilon")
self.assertEqual(personality.name, "unixeng-test")
self.assertEqual(personality.stage, "next")
services = set(item.service for item in personality.required_services)
self.assertTrue("chooser1" in services)
self.assertTrue("chooser2" in services)
self.assertTrue("chooser3" in services)
def test_125_show_service(self):
command = "show service --service chooser1"
out = self.commandtest(command.split(" "))
self.searchoutput(out,
r"Required for Personality: unixeng-test Archetype: aquilon$"
r"\s+Stage: next$",
command)
def test_125_show_stage_diff(self):
command = ["show_diff", "--personality", "unixeng-test",
"--archetype", "aquilon",
"--personality_stage", "current", "--other_stage", "next"]
out = self.commandtest(command)
self.searchoutput(out,
r'missing Required Services in Personality aquilon/unixeng-test@current:$'
r'\s*afs$'
r'\s*chooser1$'
r'\s*chooser2$'
r'\s*chooser3$',
command)
def test_125_show_override_diff(self):
command = ["show_diff", "--archetype", "aquilon",
"--personality", "unixeng-test", "--personality_stage", "next",
"--other", "utpers-dev", "--other_stage", "next"]
out = self.commandtest(command)
self.searchoutput(out,
r'matching Required Services with different values:\s*'
r'afs value=None, othervalue=qa$',
command)
def test_129_promite_unixeng_test(self):
self.noouttest(["promote", "--personality", "unixeng-test",
"--archetype", "aquilon"])
def test_130_add_utsvc(self):
command = ["add_required_service", "--personality=compileserver",
"--service=utsvc", "--archetype=aquilon"]
self.noouttest(command)
def test_135_verify_utsvc(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=compileserver"]
out = self.commandtest(command)
self.matchoutput(out, "Service: utsvc", command)
def test_140_add_scope_test(self):
command = ["add_required_service", "--personality=utpers-dev",
"--service=scope_test", "--archetype=aquilon"]
self.noouttest(command)
def test_145_verify_scope_test(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=utpers-dev",
"--personality_stage=next"]
out = self.commandtest(command)
self.matchoutput(out, "Service: scope_test", command)
def test_150_copy_personality(self):
self.noouttest(["add_personality", "--personality", "required_svc_test",
"--eon_id", "2", "--archetype", "aquilon",
"--copy_from", "utpers-dev",
"--copy_stage", "next",
"--host_environment", "dev"])
command = ["show_personality", "--archetype=aquilon",
"--personality=required_svc_test",
"--personality_stage=next"]
out = self.commandtest(command)
self.matchoutput(out, "Service: scope_test", command)
self.matchoutput(out, "Stage: next", command)
self.successtest(["del_personality", "--personality", "required_svc_test",
"--archetype", "aquilon"])
def test_160_add_badservice(self):
command = ["add_required_service", "--service=badservice",
"--personality=badpersonality2", "--archetype=aquilon"]
self.noouttest(command)
def test_165_verify_badservice(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=badpersonality2"]
out = self.commandtest(command)
self.matchoutput(out, "Service: badservice", command)
def test_170_add_solaris(self):
command = ["add_required_service", "--service", "ips",
"--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64"]
self.noouttest(command)
def test_175_show_os(self):
command = ["show_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64"]
out = self.commandtest(command)
self.matchoutput(out, "Required Service: ips", command)
def test_175_show_service(self):
command = ["show_service", "--service", "ips"]
out = self.commandtest(command)
self.matchoutput(out, "Required for Operating System: solaris "
"Version: 11.1-x86_64 Archetype: aquilon",
command)
def test_176_copy_os(self):
command = ["add_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.2-x86_64", "--copy_version", "11.1-x86_64"]
self.noouttest(command)
def test_177_verify_copy(self):
command = ["show_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.2-x86_64"]
out = self.commandtest(command)
self.matchoutput(out, "Required Service: ips", command)
def test_178_del_copy(self):
self.noouttest(["del_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.2-x86_64"])
def test_200_archetype_duplicate(self):
command = "add required service --service afs --archetype aquilon"
command += " --justification tcm=12345678"
self.badrequesttest(command.split(" "))
def test_200_personality_duplicate(self):
command = ["add_required_service", "--service", "chooser1",
"--archetype", "aquilon", "--personality", "unixeng-test"]
out = self.badrequesttest(command)
self.matchoutput(out, "Service chooser1 is already required by "
"personality aquilon/unixeng-test@next.",
command)
def test_200_os_duplicate(self):
command = ["add_required_service", "--service", "ips",
"--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Service ips is already required by operating system "
"aquilon/solaris-11.1-x86_64.",
command)
def test_200_missing_service(self):
command = ["add_required_service", "--service",
"service-does-not-exist", "--archetype", "aquilon",
"--justification", "tcm=12345678"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Service service-does-not-exist not found.",
command)
def test_200_missing_personality(self):
command = ["add_required_service", "--service", "afs",
"--personality", "personality-does-not-exist",
"--archetype", "aquilon"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality personality-does-not-exist, "
"archetype aquilon not found.",
command)
def test_200_missing_personality_stage(self):
command = ["add_required_service", "--service", "afs",
"--personality", "nostage", "--archetype", "aquilon",
"--personality_stage", "previous"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality aquilon/nostage does not have stage "
"previous.",
command)
def test_200_bad_personality_stage(self):
command = ["add_required_service", "--service", "afs",
"--personality", "nostage", "--archetype", "aquilon",
"--personality_stage", "no-such-stage"]
out = self.badrequesttest(command)
self.matchoutput(out, "'no-such-stage' is not a valid personality "
"stage.", command)
def test_200_noncompilable_archetype(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "windows"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "Archetype windows is not compileable, "
"required services are not supported.", command)
def test_200_noncompilable_os(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "windows",
"--osname", "windows", "--osversion", "nt61e"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "Archetype windows is not compileable, "
"required services are not supported.", command)
def test_200_noncompilable_personality(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "windows", "--personality", "generic"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "Archetype windows is not compileable, "
"required services are not supported.", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddRequiredService)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
from tornado.escape import json_decode, utf8, to_unicode, recursive_unicode, native_str
from tornado.iostream import IOStream
from tornado.template import DictLoader
from tornado.testing import LogTrapTestCase, AsyncHTTPTestCase
from tornado.util import b, bytes_type, ObjectDict
from tornado.web import RequestHandler, authenticated, Application, asynchronous, url, HTTPError, StaticFileHandler, _create_signature
import binascii
import logging
import os
import re
import socket
import sys
class CookieTestRequestHandler(RequestHandler):
# stub out enough methods to make the secure_cookie functions work
def __init__(self):
# don't call super.__init__
self._cookies = {}
self.application = ObjectDict(settings=dict(cookie_secret='0123456789'))
def get_cookie(self, name):
return self._cookies.get(name)
def set_cookie(self, name, value, expires_days=None):
self._cookies[name] = value
class SecureCookieTest(LogTrapTestCase):
def test_round_trip(self):
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b('bar'))
self.assertEqual(handler.get_secure_cookie('foo'), b('bar'))
def test_cookie_tampering_future_timestamp(self):
handler = CookieTestRequestHandler()
# this string base64-encodes to '12345678'
handler.set_secure_cookie('foo', binascii.a2b_hex(b('d76df8e7aefc')))
cookie = handler._cookies['foo']
match = re.match(b(r'12345678\|([0-9]+)\|([0-9a-f]+)'), cookie)
assert match
timestamp = match.group(1)
sig = match.group(2)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '12345678', timestamp),
sig)
# shifting digits from payload to timestamp doesn't alter signature
# (this is not desirable behavior, just confirming that that's how it
# works)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '1234', b('5678') + timestamp),
sig)
# tamper with the cookie
handler._cookies['foo'] = utf8('1234|5678%s|%s' % (timestamp, sig))
# it gets rejected
assert handler.get_secure_cookie('foo') is None
class CookieTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
class SetCookieHandler(RequestHandler):
def get(self):
# Try setting cookies with different argument types
# to ensure that everything gets encoded correctly
self.set_cookie("str", "asdf")
self.set_cookie("unicode", u"qwer")
self.set_cookie("bytes", b("zxcv"))
class GetCookieHandler(RequestHandler):
def get(self):
self.write(self.get_cookie("foo"))
class SetCookieDomainHandler(RequestHandler):
def get(self):
# unicode domain and path arguments shouldn't break things
# either (see bug #285)
self.set_cookie("unicode_args", "blah", domain=u"foo.com",
path=u"/foo")
class SetCookieSpecialCharHandler(RequestHandler):
def get(self):
self.set_cookie("equals", "a=b")
self.set_cookie("semicolon", "a;b")
self.set_cookie("quote", 'a"b')
return Application([
("/set", SetCookieHandler),
("/get", GetCookieHandler),
("/set_domain", SetCookieDomainHandler),
("/special_char", SetCookieSpecialCharHandler),
])
def test_set_cookie(self):
response = self.fetch("/set")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["str=asdf; Path=/",
"unicode=qwer; Path=/",
"bytes=zxcv; Path=/"])
def test_get_cookie(self):
response = self.fetch("/get", headers={"Cookie": "foo=bar"})
self.assertEqual(response.body, b("bar"))
response = self.fetch("/get", headers={"Cookie": 'foo="bar"'})
self.assertEqual(response.body, b("bar"))
def test_set_cookie_domain(self):
response = self.fetch("/set_domain")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["unicode_args=blah; Domain=foo.com; Path=/foo"])
def test_cookie_special_char(self):
response = self.fetch("/special_char")
headers = response.headers.get_list("Set-Cookie")
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0], 'equals="a=b"; Path=/')
# python 2.7 octal-escapes the semicolon; older versions leave it alone
self.assertTrue(headers[1] in ('semicolon="a;b"; Path=/',
'semicolon="a\\073b"; Path=/'),
headers[1])
self.assertEqual(headers[2], 'quote="a\\"b"; Path=/')
data = [('foo=a=b', 'a=b'),
('foo="a=b"', 'a=b'),
('foo="a;b"', 'a;b'),
#('foo=a\\073b', 'a;b'), # even encoded, ";" is a delimiter
('foo="a\\073b"', 'a;b'),
('foo="a\\"b"', 'a"b'),
]
for header, expected in data:
logging.info("trying %r", header)
response = self.fetch("/get", headers={"Cookie": header})
self.assertEqual(response.body, utf8(expected))
class AuthRedirectRequestHandler(RequestHandler):
def initialize(self, login_url):
self.login_url = login_url
def get_login_url(self):
return self.login_url
@authenticated
def get(self):
# we'll never actually get here because the test doesn't follow redirects
self.send_error(500)
class AuthRedirectTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([('/relative', AuthRedirectRequestHandler,
dict(login_url='/login')),
('/absolute', AuthRedirectRequestHandler,
dict(login_url='http://example.com/login'))])
def test_relative_auth_redirect(self):
self.http_client.fetch(self.get_url('/relative'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/login?next=%2Frelative')
def test_absolute_auth_redirect(self):
self.http_client.fetch(self.get_url('/absolute'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertTrue(re.match(
'http://example.com/login\?next=http%3A%2F%2Flocalhost%3A[0-9]+%2Fabsolute',
response.headers['Location']), response.headers['Location'])
class ConnectionCloseHandler(RequestHandler):
def initialize(self, test):
self.test = test
@asynchronous
def get(self):
self.test.on_handler_waiting()
def on_connection_close(self):
self.test.on_connection_close()
class ConnectionCloseTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([('/', ConnectionCloseHandler, dict(test=self))])
def test_connection_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", self.get_http_port()))
self.stream = IOStream(s, io_loop=self.io_loop)
self.stream.write(b("GET / HTTP/1.0\r\n\r\n"))
self.wait()
def on_handler_waiting(self):
logging.info('handler waiting')
self.stream.close()
def on_connection_close(self):
logging.info('connection closed')
self.stop()
class EchoHandler(RequestHandler):
def get(self, path):
# Type checks: web.py interfaces convert argument values to
# unicode strings (by default, but see also decode_argument).
# In httpserver.py (i.e. self.request.arguments), they're left
# as bytes. Keys are always native strings.
for key in self.request.arguments:
assert type(key) == str, repr(key)
for value in self.request.arguments[key]:
assert type(value) == bytes_type, repr(value)
for value in self.get_arguments(key):
assert type(value) == unicode, repr(value)
assert type(path) == unicode, repr(path)
self.write(dict(path=path,
args=recursive_unicode(self.request.arguments)))
class RequestEncodingTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([("/(.*)", EchoHandler)])
def test_question_mark(self):
# Ensure that url-encoded question marks are handled properly
self.assertEqual(json_decode(self.fetch('/%3F').body),
dict(path='?', args={}))
self.assertEqual(json_decode(self.fetch('/%3F?%3F=%3F').body),
dict(path='?', args={'?': ['?']}))
def test_path_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(json_decode(self.fetch('/%C3%A9?arg=%C3%A9').body),
{u"path":u"\u00e9",
u"args": {u"arg": [u"\u00e9"]}})
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
self.check_type('status', self.get_status(), int)
# get_argument is an exception from the general rule of using
# type str for non-body data mainly for historical reasons.
self.check_type('argument', self.get_argument('foo'), unicode)
self.check_type('cookie_key', self.cookies.keys()[0], str)
self.check_type('cookie_value', self.cookies.values()[0].value, str)
self.check_type('xsrf_token', self.xsrf_token, bytes_type)
self.check_type('xsrf_form_html', self.xsrf_form_html(), str)
self.check_type('reverse_url', self.reverse_url('typecheck', 'foo'), str)
self.check_type('request_summary', self._request_summary(), str)
def get(self, path_component):
# path_component uses type unicode instead of str for consistency
# with get_argument()
self.check_type('path_component', path_component, unicode)
self.write(self.errors)
def post(self, path_component):
self.check_type('path_component', path_component, unicode)
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class DecodeArgHandler(RequestHandler):
def decode_argument(self, value, name=None):
assert type(value) == bytes_type, repr(value)
# use self.request.arguments directly to avoid recursion
if 'encoding' in self.request.arguments:
return value.decode(to_unicode(self.request.arguments['encoding'][0]))
else:
return value
def get(self, arg):
def describe(s):
if type(s) == bytes_type:
return ["bytes", native_str(binascii.b2a_hex(s))]
elif type(s) == unicode:
return ["unicode", s]
raise Exception("unknown type")
self.write({'path': describe(arg),
'query': describe(self.get_argument("foo")),
})
class LinkifyHandler(RequestHandler):
def get(self):
self.render("linkify.html", message="http://example.com")
class UIModuleResourceHandler(RequestHandler):
def get(self):
self.render("page.html", entries=[1,2])
class OptionalPathHandler(RequestHandler):
def get(self, path):
self.write({"path": path})
class FlowControlHandler(RequestHandler):
# These writes are too small to demonstrate real flow control,
# but at least it shows that the callbacks get run.
@asynchronous
def get(self):
self.write("1")
self.flush(callback=self.step2)
def step2(self):
self.write("2")
self.flush(callback=self.step3)
def step3(self):
self.write("3")
self.finish()
class MultiHeaderHandler(RequestHandler):
def get(self):
self.set_header("x-overwrite", "1")
self.set_header("x-overwrite", 2)
self.add_header("x-multi", 3)
self.add_header("x-multi", "4")
class WebTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
loader = DictLoader({
"linkify.html": "{% module linkify(message) %}",
"page.html": """\
<html><head></head><body>
{% for e in entries %}
{% module Template("entry.html", entry=e) %}
{% end %}
</body></html>""",
"entry.html": """\
{{ set_resources(embedded_css=".entry { margin-bottom: 1em; }", embedded_javascript="js_embed()", css_files=["/base.css", "/foo.css"], javascript_files="/common.js", html_head="<meta>", html_body='<script src="/analytics.js"/>') }}
<div class="entry">...</div>""",
})
urls = [
url("/typecheck/(.*)", TypeCheckHandler, name='typecheck'),
url("/decode_arg/(.*)", DecodeArgHandler),
url("/decode_arg_kw/(?P<arg>.*)", DecodeArgHandler),
url("/linkify", LinkifyHandler),
url("/uimodule_resources", UIModuleResourceHandler),
url("/optional_path/(.+)?", OptionalPathHandler),
url("/flow_control", FlowControlHandler),
url("/multi_header", MultiHeaderHandler),
]
return Application(urls,
template_loader=loader,
autoescape="xhtml_escape")
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
def test_types(self):
response = self.fetch("/typecheck/asdf?foo=bar",
headers={"Cookie": "cook=ie"})
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck/asdf?foo=bar", method="POST",
headers={"Cookie": "cook=ie"},
body="foo=bar")
def test_decode_argument(self):
# These urls all decode to the same thing
urls = ["/decode_arg/%C3%A9?foo=%C3%A9&encoding=utf-8",
"/decode_arg/%E9?foo=%E9&encoding=latin1",
"/decode_arg_kw/%E9?foo=%E9&encoding=latin1",
]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u'path': [u'unicode', u'\u00e9'],
u'query': [u'unicode', u'\u00e9'],
})
response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u'path': [u'bytes', u'c3a9'],
u'query': [u'bytes', u'c3a9'],
})
def test_uimodule_unescaped(self):
response = self.fetch("/linkify")
self.assertEqual(response.body,
b("<a href=\"http://example.com\">http://example.com</a>"))
def test_uimodule_resources(self):
response = self.fetch("/uimodule_resources")
self.assertEqual(response.body, b("""\
<html><head><link href="/base.css" type="text/css" rel="stylesheet"/><link href="/foo.css" type="text/css" rel="stylesheet"/>
<style type="text/css">
.entry { margin-bottom: 1em; }
</style>
<meta>
</head><body>
<div class="entry">...</div>
<div class="entry">...</div>
<script src="/common.js" type="text/javascript"></script>
<script type="text/javascript">
//<![CDATA[
js_embed()
//]]>
</script>
<script src="/analytics.js"/>
</body></html>"""))
def test_optional_path(self):
self.assertEqual(self.fetch_json("/optional_path/foo"),
{u"path": u"foo"})
self.assertEqual(self.fetch_json("/optional_path/"),
{u"path": None})
def test_flow_control(self):
self.assertEqual(self.fetch("/flow_control").body, b("123"))
def test_multi_header(self):
response = self.fetch("/multi_header")
self.assertEqual(response.headers["x-overwrite"], "2")
self.assertEqual(response.headers.get_list("x-multi"), ["3", "4"])
class ErrorResponseTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
class DefaultHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
raise HTTPError(int(self.get_argument("status")))
1/0
class WriteErrorHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1/0
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exc_info" in kwargs:
self.write("Exception: %s" % kwargs["exc_info"][0].__name__)
else:
self.write("Status: %d" % status_code)
class GetErrorHtmlHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1/0
def get_error_html(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exception" in kwargs:
self.write("Exception: %s" % sys.exc_info()[0].__name__)
else:
self.write("Status: %d" % status_code)
class FailedWriteErrorHandler(RequestHandler):
def get(self):
1/0
def write_error(self, status_code, **kwargs):
raise Exception("exception in write_error")
return Application([
url("/default", DefaultHandler),
url("/write_error", WriteErrorHandler),
url("/get_error_html", GetErrorHtmlHandler),
url("/failed_write_error", FailedWriteErrorHandler),
])
def test_default(self):
response = self.fetch("/default")
self.assertEqual(response.code, 500)
self.assertTrue(b("500: Internal Server Error") in response.body)
response = self.fetch("/default?status=503")
self.assertEqual(response.code, 503)
self.assertTrue(b("503: Service Unavailable") in response.body)
def test_write_error(self):
response = self.fetch("/write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b("Exception: ZeroDivisionError"), response.body)
response = self.fetch("/write_error?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b("Status: 503"), response.body)
def test_get_error_html(self):
response = self.fetch("/get_error_html")
self.assertEqual(response.code, 500)
self.assertEqual(b("Exception: ZeroDivisionError"), response.body)
response = self.fetch("/get_error_html?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b("Status: 503"), response.body)
def test_failed_write_error(self):
response = self.fetch("/failed_write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b(""), response.body)
class StaticFileTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
class StaticUrlHandler(RequestHandler):
def get(self, path):
self.write(self.static_url(path))
class AbsoluteStaticUrlHandler(RequestHandler):
include_host = True
def get(self, path):
self.write(self.static_url(path))
return Application([('/static_url/(.*)', StaticUrlHandler),
('/abs_static_url/(.*)', AbsoluteStaticUrlHandler)],
static_path=os.path.join(os.path.dirname(__file__), 'static'))
def test_static_files(self):
response = self.fetch('/robots.txt')
assert b("Disallow: /") in response.body
response = self.fetch('/static/robots.txt')
assert b("Disallow: /") in response.body
def test_static_url(self):
response = self.fetch("/static_url/robots.txt")
self.assertEqual(response.body, b("/static/robots.txt?v=f71d2"))
def test_absolute_static_url(self):
response = self.fetch("/abs_static_url/robots.txt")
self.assertEqual(response.body,
utf8(self.get_url("/") + "static/robots.txt?v=f71d2"))
class CustomStaticFileTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
class MyStaticFileHandler(StaticFileHandler):
def get(self, path):
assert path == "foo.txt"
self.write("bar")
@classmethod
def make_static_url(cls, settings, path):
return "/static/%s?v=42" % path
class StaticUrlHandler(RequestHandler):
def get(self, path):
self.write(self.static_url(path))
return Application([("/static_url/(.*)", StaticUrlHandler)],
static_path="dummy",
static_handler_class=MyStaticFileHandler)
def test_serve(self):
response = self.fetch("/static/foo.txt")
self.assertEqual(response.body, b("bar"))
def test_static_url(self):
response = self.fetch("/static_url/foo.txt")
self.assertEqual(response.body, b("/static/foo.txt?v=42"))
|
|
################################
#### MOCK FUNCTIONS MACROS #####
################################
def defaultMacros(numargs):
ret = ''
for j in range(0,numargs):
ret += '`undef ARG%0d``_NODEFAULT \\\n' % (j)
ret += '`undef ARG%0d``_``DEF%0d \\\n' % (j,j)
ret += '`undef NAME``_``ARG%0d``_``DEF%0d \\\n' % (j,j)
ret += '`define ARG%0d``_``DEF%0d \\\n' % (j,j)
ret += '`ifdef ARG%0d``_NODEFAULT \\\n' % (j)
ret += ' `define NAME``_``ARG%0d``_``DEF%0d \\\n' % (j,j)
ret += '`else \\\n'
ret += ' `define NAME``_``ARG%0d``_``DEF%0d =DEF%0d \\\n' % (j,j,j)
ret += '`endif \\\n'
return ret
def function_macro(numargs, fout):
fout.write ('`define SVMOCK_FUNC%0d(NAME,RETURN%s) \\\n' % (numargs, allArgString(numargs, ',', ',', '=NODEFAULT')) +
defaultMacros(numargs) +
'`undef invoke%0d_``NAME \\\n' % numargs +
'`undef args%0d_``NAME \\\n' % numargs +
'`define invoke%0d_``NAME`` virtual %s \\\n' % (numargs, functionDecl('NAME',numargs,'RETURN')) +
'`define args%0d_``NAME`` %s \\\n' % (numargs, method_arg_names(numargs)) +
'`SVMOCK_FUNCTION_MOCKER_CLASS%0d(NAME,RETURN%s) \\\n' % (numargs, allArgString(numargs, ',', ',', 'MACRO')) +
'__``NAME``__mocker #(PARENT) __``NAME = new(`"NAME`", __mockers, this); \\\n' +
'virtual function RETURN NAME(%s); \\\n' % method_args(numargs) +
' __``NAME.called(%s); \\\n' % method_arg_names(numargs) +
' if (__``NAME.override != null) \\\n' +
' return __``NAME.override.NAME(%s); \\\n' % method_arg_names(numargs) +
' else if (__``NAME.overrideReturn) \\\n' +
' return __``NAME.returnsVal; \\\n' +
'`ifdef MOCKTYPE_HAS_NO_PARENT \\\n' +
' else \\\n' +
' begin \\\n' +
' RETURN bogus; \\\n' +
' return bogus; \\\n' +
' end \\\n' +
'`else \\\n' +
' else \\\n' +
' return super.NAME(%s); \\\n' % method_arg_names(numargs) +
'`endif \\\n' +
'endfunction\n\n')
def void_function_macro(numargs, fout):
fout.write ('`define SVMOCK_VFUNC%0d(NAME%s) \\\n' % (numargs, allArgString(numargs, ',', ',', '=NODEFAULT')) +
defaultMacros(numargs) +
'`undef invoke%0d_``NAME \\\n' % numargs +
'`undef args%0d_``NAME \\\n' % numargs +
'`define invoke%0d_``NAME`` virtual %s \\\n' % (numargs, functionDecl('NAME',numargs)) +
'`define args%0d_``NAME`` %s \\\n' % (numargs, method_arg_names(numargs)) +
'`SVMOCK_VOID_FUNCTION_MOCKER_CLASS%0d(NAME%s) \\\n' % (numargs, allArgString(numargs, ',', ',', 'MACRO')) +
'__``NAME``__mocker #(PARENT) __``NAME = new(`"NAME`", __mockers, this); \\\n' +
'virtual function void NAME(%s); \\\n' % method_args(numargs) +
' __``NAME.called(%s); \\\n' % method_arg_names(numargs) +
' if (__``NAME.override != null) \\\n' +
' __``NAME.override.NAME(%s); \\\n' % method_arg_names(numargs) +
'`ifdef MOCKTYPE_HAS_NO_PARENT \\\n' +
'`else \\\n' +
' else \\\n' +
' super.NAME(%s); \\\n' % method_arg_names(numargs) +
'`endif \\\n' +
'endfunction\n\n')
def task_macro(numargs, fout):
fout.write ('`define SVMOCK_TASK%0d(NAME%s) \\\n' % (numargs, allArgString(numargs, ',', ',', '=NODEFAULT')) +
defaultMacros(numargs) +
'`undef invoke%0d_``NAME \\\n' % numargs +
'`undef args%0d_``NAME \\\n' % numargs +
'`define invoke%0d_``NAME`` virtual %s \\\n' % (numargs, taskDecl('NAME',numargs)) +
'`define args%0d_``NAME`` %s \\\n' % (numargs, method_arg_names(numargs)) +
'`SVMOCK_TASK_MOCKER_CLASS%0d(NAME%s) \\\n' % (numargs, allArgString(numargs, ',', ',', 'MACRO')) +
'__``NAME``__mocker #(PARENT) __``NAME = new(`"NAME`", __mockers, this); \\\n' +
'virtual task NAME(%s); \\\n' % method_args(numargs) +
' __``NAME.called(%s); \\\n' % method_arg_names(numargs) +
' if (__``NAME.override != null) \\\n' +
' __``NAME.override.NAME(%s); \\\n' % method_arg_names(numargs) +
'`ifdef MOCKTYPE_HAS_NO_PARENT \\\n' +
'`else \\\n' +
' else \\\n' +
' super.NAME(%s); \\\n' % method_arg_names(numargs) +
'`endif \\\n' +
'endtask\n\n')
########################
###### MAP MACROS ######
########################
def map_function_macro(numargs, fout):
fout.write ('`define SVMOCK_MAP_FUNC%0d(ORIGINAL,INSTEAD) \\\n' % numargs +
'class __``INSTEAD``__mocker #(type PARENT=int) extends __``ORIGINAL``__mocker #(PARENT); \\\n' +
' function new(string name, ref __mocker __mockers[$], input PARENT _parent, input __``ORIGINAL``__mocker #(PARENT) associate = null); \\\n' +
' super.new(name, __mockers, _parent, associate); \\\n' +
' endfunction \\\n' +
' `invoke%0d_``ORIGINAL \\\n' % numargs +
' return parent.INSTEAD(`args%0d_``ORIGINAL); \\\n' % numargs +
' endfunction \\\n' +
'endclass \\\n ' +
'__``INSTEAD``__mocker #(PARENT) __``INSTEAD = new(`"INSTEAD`", __mockers, this, __``ORIGINAL); \n\n')
def map_void_function_macro(numargs, fout):
fout.write ('`define SVMOCK_MAP_VFUNC%0d(ORIGINAL,INSTEAD) \\\n' % numargs +
'class __``INSTEAD``__mocker #(type PARENT=int) extends __``ORIGINAL``__mocker #(PARENT); \\\n' +
' function new(string name, ref __mocker __mockers[$], input PARENT _parent, input __``ORIGINAL``__mocker #(PARENT) associate = null); \\\n' +
' super.new(name, __mockers, _parent, associate); \\\n' +
' endfunction \\\n' +
' `invoke%0d_``ORIGINAL \\\n' % numargs +
' parent.INSTEAD(`args%0d_``ORIGINAL); \\\n' % numargs +
' endfunction \\\n' +
'endclass \\\n' +
'__``INSTEAD``__mocker #(PARENT) __``INSTEAD = new(`"INSTEAD`", __mockers, this, __``ORIGINAL); \n\n')
def map_task_macro(numargs, fout):
fout.write ('`define SVMOCK_MAP_TASK%0d(ORIGINAL,INSTEAD) \\\n' % numargs +
'class __``INSTEAD``__mocker #(type PARENT=int) extends __``ORIGINAL``__mocker #(PARENT); \\\n' +
' function new(string name, ref __mocker __mockers[$], input PARENT _parent, input __``ORIGINAL``__mocker #(PARENT) associate = null); \\\n' +
' super.new(name, __mockers, _parent, associate); \\\n' +
' endfunction \\\n' +
' `invoke%0d_``ORIGINAL \\\n' % numargs +
' parent.INSTEAD(`args%0d_``ORIGINAL); \\\n' % numargs +
' endtask \\\n' +
'endclass \\\n' +
'__``INSTEAD``__mocker #(PARENT) __``INSTEAD = new(`"INSTEAD`", __mockers, this, __``ORIGINAL); \n\n')
def mockers(numargs):
fout = open('../src/__mocker' + str(numargs) + '.svh', 'w+')
base_mocker_class(numargs, fout)
function_mocker_class(numargs, fout)
void_function_mocker_class(numargs, fout)
task_mocker_class(numargs, fout)
###############################
###### MOCK CLASS MACROS ######
###############################
def base_mocker_class(numargs, fout):
# macro header
fout.write ('`define SVMOCK_MOCKER_CLASS%0d(NAME,RETURN%s,MODIFIER=) \\\n' % (numargs, allArgString(numargs, ',', ',', '=NODEFAULT')) +
'class __``NAME``MODIFIER``__mocker #(type PARENT=int) extends __mocker; \\\n' +
'PARENT parent; \\\n' +
'function new(string name, ref __mocker __mockers[$], input PARENT _parent); \\\n' +
' super.new(name, __mockers); \\\n' +
' parent = _parent; \\\n' +
'endfunction \\\n' +
with_comparison_properties(numargs) +
functionDecl('called',numargs) + ' \\\n' + # called
' timesCnt += 1; \\\n' +
with_property_assignments(numargs) +
'endfunction \\\n' +
functionDecl('with_args',numargs) + ' \\\n' + # with
with_property_assignments(numargs, 'exp') +
'endfunction \\\n' +
'function bit verify(); \\\n' + # verify
' string error_signature [int]; \\\n' +
' verify = super.verify(); \\\n' +
with_property_check(numargs) +
' clear(); \\\n' +
' return verify; \\\n' +
'endfunction \\\n' +
'virtual function void clear(); \\\n' + # clear
' super.clear; \\\n' +
with_property_clear(numargs) +
'endfunction \\\n' +
'endclass\n\n')
def function_mocker_class(numargs, fout):
fout.write ('`define SVMOCK_FUNCTION_MOCKER_CLASS%0d(NAME,RETURN%s) \\\n' % (numargs, allArgString(numargs, ',', ',', '=NODEFAULT')) +
'`SVMOCK_MOCKER_CLASS%0d(NAME,RETURN%s,_base) \\\n' % (numargs, allArgString(numargs, ',', ',')) +
'class __``NAME``__mocker #(type PARENT=int) extends __``NAME``_base__mocker #(PARENT); \\\n' +
'function new(string name, ref __mocker __mockers[$], input PARENT _parent, input __``NAME``__mocker #(PARENT) associate = null); \\\n' +
' super.new(name, __mockers, _parent); \\\n' +
' if (associate != null) associate.map[name] = this; \\\n' +
'endfunction \\\n' +
'virtual ' + functionDecl('NAME',numargs,'RETURN') + ' \\\n' + # NAME
' return NAME; \\\n' + # NAME
'endfunction \\\n' +
'RETURN returnsVal; \\\n' +
'function void returns(RETURN r); \\\n' + # returns
' overrideReturn = 1; \\\n' +
' returnsVal = r; \\\n' +
'endfunction \\\n' +
'__``NAME``__mocker #(PARENT) map [string]; \\\n' +
'__``NAME``__mocker #(PARENT) override; \\\n' +
'function void will_by_default(string i); \\\n' + # will_by_default
' override = map[i]; \\\n' +
'endfunction \\\n' +
'virtual function void clear(); \\\n' + # clear
' super.clear(); \\\n' +
' override = null; \\\n' +
'endfunction \\\n' +
'endclass\n\n')
def void_function_mocker_class(numargs, fout):
fout.write ('`define SVMOCK_VOID_FUNCTION_MOCKER_CLASS%0d(NAME%s) \\\n' % (numargs, allArgString(numargs, ',', ',', '=NODEFAULT')) +
'`SVMOCK_MOCKER_CLASS%0d(NAME,RETURN%s,_base) \\\n' % (numargs, allArgString(numargs, ',', ',')) +
'class __``NAME``__mocker #(type PARENT=int) extends __``NAME``_base__mocker #(PARENT); \\\n' +
'function new(string name, ref __mocker __mockers[$], input PARENT _parent, input __``NAME``__mocker #(PARENT) associate = null); \\\n' +
' super.new(name, __mockers, _parent); \\\n' +
' if (associate != null) associate.map[name] = this; \\\n' +
'endfunction \\\n' +
'virtual ' + functionDecl('NAME',numargs) + ' \\\n' + # NAME
'endfunction \\\n' +
'__``NAME``__mocker #(PARENT) map [string]; \\\n' + # will_by_default
'__``NAME``__mocker #(PARENT) override; \\\n' +
'function void will_by_default(string i); \\\n' +
' override = map[i]; \\\n' +
'endfunction \\\n' +
'virtual function void clear(); \\\n' + # clear
' super.clear(); \\\n' +
' override = null; \\\n' +
'endfunction \\\n' +
'endclass\n\n')
def task_mocker_class(numargs, fout):
fout.write ('`define SVMOCK_TASK_MOCKER_CLASS%0d(NAME%s) \\\n' % (numargs, allArgString(numargs, ',', ',', '=NODEFAULT')) +
'`SVMOCK_MOCKER_CLASS%0d(NAME,void%s,_base) \\\n' % (numargs, allArgString(numargs, ',', ',')) +
'class __``NAME``__mocker #(type PARENT=int) extends __``NAME``_base__mocker #(PARENT); \\\n' +
'function new(string name, ref __mocker __mockers[$], input PARENT _parent, input __``NAME``__mocker #(PARENT) associate = null); \\\n' +
' super.new(name, __mockers, _parent); \\\n' +
' if (associate != null) associate.map[name] = this; \\\n' +
'endfunction \\\n' +
'virtual ' + taskDecl('NAME',numargs) + ' \\\n' + # NAME
'endtask \\\n' +
'__``NAME``__mocker #(PARENT) map [string]; \\\n' + # will_by_default
'__``NAME``__mocker #(PARENT) override; \\\n' +
'function void will_by_default(string i); \\\n' +
' override = map[i]; \\\n' +
'endfunction \\\n' +
'virtual function void clear(); \\\n' + # clear
' super.clear(); \\\n' +
' override = null; \\\n' +
'endfunction \\\n' +
'endclass\n')
################################
###### HELPER FUNCTIONS ########
################################
def with_comparison_properties(numargs):
ret = ''
for j in range(0,numargs):
ret += '`MOCKER_WITH(NAME,ARG%0d,TYPE%0d,MOD%0d) \\\n' % (j,j,j)
ret += 'ARG%0d``__with __with_%0d [$]; \\\n' % (j,j)
return ret
def with_property_assignments(numargs, type='act'):
ret = ''
if (type == 'exp'):
for j in range(0,numargs):
ret += ' begin \\\n'
ret += ' ARG%0d``__with __w = new(); \\\n' % j
ret += ' __w.%s = ARG%0d; \\\n' % (type,j)
ret += ' __with_%0d.push_back(__w); \\\n' % (j)
ret += ' end \\\n'
else:
for j in range(0,numargs):
ret += ' for (int i=0; i<__with_%0d.size(); i+=1) begin \\\n' % j
ret += ' if (!__with_%0d[i].done) begin \\\n' % j
ret += ' __with_%0d[i].%s = ARG%0d; \\\n' % (j,type,j)
ret += ' __with_%0d[i].done = 1; \\\n' % j
ret += ' break; \\\n'
ret += ' end \\\n'
ret += ' end \\\n'
return ret
def with_property_check(numargs):
ret = ''
for j in range(0,numargs):
ret += ' for (int i=0; i<__with_%0d.size(); i+=1) begin \\\n' % j
ret += ' bit comp = __with_%0d[i].compare(); \\\n' % j
ret += ' if (!comp) begin \\\n'
ret += ' string _name = `"NAME`"; \\\n'
ret += ' string _arg = `"ARG%0d`"; \\\n' % j
ret += ' if (!error_signature.exists(i)) begin \\\n'
ret += ' string es; \\\n'
ret += ' $sformat(es, "EXPECT_CALL::with_args[%0d].miscompare %s::%s: (%s)", i, _name, _arg, __with_' + str(j) + '[i].as_string()); \\\n'
ret += ' error_signature[i] = es; \\\n'
ret += ' end \\\n'
ret += ' else \\\n'
ret += ' $sformat(error_signature[i], "%s\\n %s::%s: (%s)", error_signature[i], _name, _arg, __with_' + str(j) + '[i].as_string()); \\\n'
ret += ' end \\\n'
ret += ' verify &= comp; \\\n'
ret += ' end \\\n'
ret += ' foreach (error_signature[i]) $display(error_signature[i]); \\\n'
return ret
def with_property_clear(numargs):
ret = ''
for j in range(0,numargs):
ret += ' __with_%0d.delete(); \\\n' % j
return ret
def oneArgString(idx, delim=' ', default=''):
if default == '=NODEFAULT':
return 'DIR%0d%sTYPE%0d%sARG%0d%sMOD%d%sDEF%0d%s' % (idx,delim,idx,delim,idx,delim,idx,delim,idx,default)
elif default == 'MACRO':
return 'DIR%0d%sTYPE%0d%sARG%0d%sMOD%d%s`NAME``_``ARG%0d``_``DEF%0d' % (idx,delim,idx,delim,idx,delim,idx,delim,idx,idx)
elif delim == ',':
return 'DIR%0d%sTYPE%0d%sARG%0d%sMOD%d%sDEF%0d' % (idx,delim,idx,delim,idx,delim,idx,delim,idx)
else:
return 'DIR%0d%sTYPE%0d%sARG%0d%sMOD%d' % (idx,delim,idx,delim,idx,delim,idx)
def allArgString(numargs, delim=' ', prefix='', default=''):
a = ""
if numargs > 0:
a += prefix
for j in range(0,numargs):
a += oneArgString(j, delim, default)
if (j < numargs-1):
a += ','
return a
def functionDecl(name,numargs,type='void'):
return 'function %s %s(%s);' % (type, name, allArgString(numargs))
def taskDecl(name,numargs):
return 'task %s(%s);' % (name, allArgString(numargs))
def method_args(numargs):
ret = ''
for j in range(0,numargs):
if (j == numargs-1):
ret += 'DIR%0d TYPE%0d ARG%0d MOD%0d `NAME``_``ARG%0d``_``DEF%0d' % (j,j,j,j,j,j)
else:
ret += 'DIR%0d TYPE%0d ARG%0d MOD%0d `NAME``_``ARG%0d``_``DEF%0d, ' % (j,j,j,j,j,j)
return ret
def method_arg_names(numargs):
ret = ''
for j in range(0,numargs):
if (j == numargs-1):
ret += 'ARG%0d' % j
else:
ret += 'ARG%0d, ' % j
return ret
################################
########## MAIN ##########
################################
if __name__ == "__main__":
f_macros = open('../src/svmock_mocker_defines.svh', 'w+')
for i in range(0,10):
mockers(i)
task_macro(i, f_macros)
map_task_macro(i, f_macros)
void_function_macro(i, f_macros)
map_void_function_macro(i, f_macros)
function_macro(i, f_macros)
map_function_macro(i, f_macros)
|
|
"""
lexington.regex
===============
Lexington's lexers operate using the derivative of a regular expression.
Python's regular expressions as implemented in the `re` module are not
actually regular expressions, and probably wouldn't give you access to their
derivatives even if they were. So, this implementation is necessary.
:copyright: (C) 2013, Matthew Frazier
:license: Released under the MIT/X11 license, see LICENSE for details
"""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Sequence
from .strings import Strings, Characters, native_strings, n, string_type
### Very scary metaprogramming ###
class _RegexClass(ABCMeta):
"""
This is a metaclass, and a giant hack. It has two primary functions:
First, it obviates the needs to decorate every `__repr__` method with
`~lexington.strings.native_strings`.
Second, because it doesn't make sense to instantiate `Regex` directly,
it's convenient to use it as a factory for actual `Regex` subclasses.
However, the behavior of `__new__` can be a bit confusing.
By overriding `__call__` in the metaclass directly, we can prevent the
whole "`__new__`/`__init__`" stack from entering the picture when using
`Regex` as a factory function.
"""
def __new__(mcls, name, bases, namespace):
# __new__ is called on the metaclass when creating a class.
# It has the chance to modify the class's namespace before the
# class is actually created.
# We use this as a chance to wrap the __repr__ method.
if '__repr__' in namespace:
namespace['__repr__'] = native_strings(namespace['__repr__'])
return super(_RegexClass, mcls).__new__(mcls, name, bases, namespace)
def __call__(cls, *args, **kwargs):
# A class is just an object, and a metaclass is the type of that
# object. So, when you do AClass(...), it calls the __call__ method
# on that class, just like any other object.
if cls is Regex:
return regexify(*args, **kwargs)
else:
return super(_RegexClass, cls).__call__(*args, **kwargs)
_Regex = _RegexClass(n("_Regex"), (object,), dict(
__doc__ = "This is needed for Python 3 compatibility. "
"The metaclass syntax changed between Python 3 and Python 2, "
"so we need to construct a base class programmatically.",
__slots__ = ()
))
### Actual regular expression classes ###
class Regex(_Regex):
"""
This is the abstract base class for elements of regular expressions.
(It's also used as a factory for converting mundane Python data types
like strings into regular expressions.)
`Regex` objects are immutable, hashable, and comparable.
In practice, `Regex`'s subclasses should be regarded as implementation
details. You shouldn't attempt to create instances of them, create new
subclasses of `Regex`, or test that a regex is an instance of a
particular `Regex` subclass.
:param e: An expression to convert into a regular expression.
(This is equivalent to `regexify`.)
"""
__metaclass__ = _RegexClass
__slots__ = ()
### Abstractions to override
@abstractmethod
def derive(self, sym):
"""
Returns the derivative of this regular expression with respect to
a symbol. You can view it as::
{s[1:] for s in languages_generated_by(self) if s and s[0] == sym}
:param sym: The symbol to derive this regular expression with regards
to.
"""
pass
@abstractproperty
def accepts_empty_string(self):
"""
Indicates whether this regular expression will accept the empty
string.
"""
pass
@abstractproperty
def alphabet(self):
"""
Indicates the alphabet of the strings this regular expression can
match. `None` indicates that this regular expression is
independent of alphabet.
The alphabet will usually be `~lexington.strings.Text` or
`~lexington.strings.Bytestring`.
"""
pass
def __eq__(self, other):
return type(self) is type(other) and hash(self) == hash(other)
@abstractmethod
def __hash__(self):
pass
### High-level regex operations
def match(self, subject):
"""
Determines whether the `subject` matches this regex. This performs a
total match -- if you want the behavior of `re.match`, which only
matches a prefix, use `match_prefix`.
This returns `True` if the match succeeds, and `False` if not.
:param subject: The string to match against this regex.
"""
re = self
for sym in subject:
re = re.derive(sym)
if re is Null:
return False
return re.accepts_empty_string
@property
def literal(self):
"""
If this regular expression matches a literal string exactly, this
property contains that string. Otherwise, it will be `None`.
(You can generally only assume that regexes constructed *from* literal
strings will have this property.)
"""
return None
### Operator overloads and convenience methods
def star(self):
"""
Creates a regular expression that accepts this one repeated any
number of times. (Equivalent to the `~lexington.regex.star` function.)
"""
return star(self)
def plus(self):
"""
Creates a regular expression that accepts this one repeated any
number of times.
"""
return concat(self, star(self))
def maybe(self):
"""
Creates a regular expression that accepts this one, or the empty
string.
"""
return union(self, Epsilon)
def __add__(self, suffix):
return concat(self, suffix)
def __radd__(self, prefix):
return concat(prefix, self)
def __or__(self, other):
return union(self, other)
def __ror__(self, other):
return union(other, self)
def __pow__(self, count):
return repeat(self, count)
class EpsilonRegex(Regex):
"""
A regular expression that matches the empty string.
"""
__slots__ = ()
def derive(self, sym):
return Null
accepts_empty_string = True
alphabet = None
def __repr__(self):
return "Epsilon"
def __hash__(self):
return hash(type(self))
class NullRegex(Regex):
"""
A regular expression that doesn't match any strings, even the empty
string.
"""
__slots__ = ()
def derive(self, sym):
return self
accepts_empty_string = False
alphabet = None
def __repr__(self):
return "Null"
def __hash__(self):
return hash(type(self))
class SymbolRegex(Regex):
"""
A regular expression that matches a particular symbol.
:param sym: The symbol to match.
"""
__slots__ = ('sym')
def __init__(self, sym):
self.sym = sym
def derive(self, sym):
return Epsilon if sym == self.sym else Null
accepts_empty_string = False
@property
def alphabet(self):
return string_type(self.sym)
@property
def literal(self):
return self.sym
def __repr__(self):
return "Regex(%r)" % self.sym
def __hash__(self):
return hash((id(type(self)), self.sym))
class AnySymbolRegex(Regex):
"""
A regular expression that matches ANY symbol (but not the lack of one).
Equivalent to ``.`` in Python's regex notation.
:param sym: The symbol to match.
"""
__slots__ = ()
def derive(self, sym):
return Epsilon
accepts_empty_string = False
alphabet = None
def __repr__(self):
return "Any"
def __hash__(self):
return hash(type(self))
class UnionRegex(Regex):
"""
A regular expression that will match any of multiple options.
:param options: The regular expressions to accept.
"""
__slots__ = ('options', 'alphabet')
def __init__(self, options):
self.alphabet = None
self.options = frozenset(options)
for opt in self.options:
if opt.alphabet is not None:
if self.alphabet is None:
self.alphabet = opt.alphabet
elif opt.alphabet is not self.alphabet:
raise TypeError(n("Cannot mix alphabets %r and %r in "
"union" %
(self.alphabet, opt.alphabet)))
def derive(self, sym):
return union(*(r.derive(sym) for r in self.options))
@property
def accepts_empty_string(self):
return any(r.accepts_empty_string for r in self.options)
def __repr__(self):
return " | ".join(repr(r) for r in self.options)
def __hash__(self):
return hash((id(type(self)), self.options))
class ConcatRegex(Regex):
"""
A regular expression that matches two regular expressions in a row.
"""
__slots__ = ('prefix', 'suffix', 'alphabet')
def __init__(self, prefix, suffix):
self.prefix = prefix
self.suffix = suffix
# This logic is admittedly a bit twisty. The idea is:
# If the prefix and suffix are alphabet-independent, so is this.
# If the prefix and suffix have the same alphabet, or one has an
# alphabet and the other doesn't, this will have the same alphabet.
# If the prefix and suffix have different alphabets, that's an error.
alpha_pre = prefix.alphabet
alpha_suf = suffix.alphabet
if alpha_pre is not None or alpha_suf is not None:
if alpha_pre is not alpha_suf:
raise TypeError(n("Cannot concatenate alphabets %r and %r" %
(alpha_pre, alpha_suf)))
self.alphabet = alpha_pre if alpha_suf is None else alpha_suf
else:
self.alphabet = None
def derive(self, sym):
if self.prefix.accepts_empty_string:
return union(concat(self.prefix.derive(sym), self.suffix),
self.suffix.derive(sym))
else:
return concat(self.prefix.derive(sym), self.suffix)
@property
def accepts_empty_string(self):
return (self.prefix.accepts_empty_string and
self.suffix.accepts_empty_string)
@property
def literal(self):
if self.prefix.literal and self.suffix.literal:
return self.prefix.literal + self.suffix.literal
else:
return None
def __repr__(self):
if self.literal:
return "Regex(%r)" % self.literal
else:
return "%r + %r" % (self.prefix, self.suffix)
def __hash__(self):
return hash((id(type(self)), self.prefix, self.suffix))
class StarRegex(Regex):
"""
A regular expression that will match a certain regex, repeated any number
of times.
:param regex: The regular expression describing the strings to repeat.
"""
__slots__ = ('regex')
def __init__(self, regex):
self.regex = regex
def derive(self, sym):
return concat(self.regex.derive(sym), self)
accepts_empty_string = True
@property
def alphabet(self):
return self.regex.alphabet
def __repr__(self):
return "star(%r)" % self.regex
def __hash__(self):
return hash((id(type(self)), self.regex))
class RepeatRegex(Regex):
"""
A regular expression that will match a certain regex, repeated a specific
number of times.
:param regex: The regular expression describing the strings to repeat.
:param count: The number of times to repeat it.
"""
__slots__ = ('regex', 'count')
def __init__(self, regex, count):
if count < 2:
raise ValueError("Repeat must be greater than 1" % count)
self.regex = regex
self.count = count
def derive(self, sym):
return concat(self.regex.derive(sym),
repeat(self.regex, self.count - 1))
accepts_empty_string = False
@property
def alphabet(self):
return self.regex.alphabet
def __repr__(self):
return "%r ** %d" % (self.regex, self.count)
def __hash__(self):
return hash((id(type(self)), self.regex, self.count))
### Regex constructors ###
def regexify(e):
"""
Converts a Python object to a `Regex`. If it's already a `Regex`, it just
returns it. It will also accept any string or character type, and create
a regex that matches that exactly.
:param e: The Python object to create a regex of.
"""
if isinstance(e, Regex):
return e
elif isinstance(e, Strings):
if len(e) == 0:
return Epsilon
return join(SymbolRegex(sym) for sym in e)
elif isinstance(e, Characters):
return SymbolRegex(e)
else:
raise TypeError(n("Instances of %r can't be automatically converted "
"to regular expressions" % type(e)))
#: A regular expression that matches any single symbol.
Any = AnySymbolRegex()
#: A regular expression that matches the empty string.
Epsilon = EpsilonRegex()
#: A regular expression that refuses to accept *anything* -- even the
#: empty string.
Null = NullRegex()
def union(*options):
"""
Creates a regular expression that accepts *any* of the following regexes.
:param options: The regular expressions to accept.
"""
# A list comprehension would be cleaner, but we need to be able to check
# the value *after* processing to leave out Nulls.
s = set()
for regex in options:
if isinstance(regex, UnionRegex):
s.update(regex.options)
else:
regex = regexify(regex)
if regex is not Null:
s.add(regex)
if not s:
return Null
elif len(s) == 1:
return s.pop()
else:
return UnionRegex(s)
def concat(prefix, suffix):
"""
Concatenates two regular expressions, such that `prefix` will be matched,
then `suffix` will be matched after it is complete.
:param prefix: The first regular expression.
:param suffix: The second regular expression.
"""
prefix = regexify(prefix)
suffix = regexify(suffix)
if prefix is Null or suffix is Null:
return Null
elif prefix is Epsilon:
return suffix
elif suffix is Epsilon:
return prefix
else:
return ConcatRegex(prefix, suffix)
def join(regexes):
"""
Concatenates a sequence of regular expressions, such that each one
will be matched successively. (Note that a `Null` value anywhere in the
sequence will result in a `Null` overall.) A zero-length sequence
will return `Epsilon`.
:param regexes: The regular expressions to concatenate.
"""
if not isinstance(regexes, Sequence):
regexes = tuple(regexes)
n = len(regexes)
if n == 2:
return concat(regexes[0], regexes[1])
elif n == 1:
return regexify(regexes[0])
elif n == 0:
return Epsilon
else:
# This is a right fold (`reduce` backwards).
# [-2::-1] is slice for "start at the next-to-last and go in reverse."
final = regexes[-1]
for regex in regexes[-2::-1]:
final = concat(regex, final)
# Break early on Null.
if final is Null:
return Null
return final
def star(regex):
"""
Creates a regular expression that accepts `regex`, repeated any number
of times -- even 0.
:param regex: The regular expression describing the strings to repeat.
"""
if regex is Epsilon:
return Epsilon
elif regex is Null:
return Epsilon
elif isinstance(regex, StarRegex):
# r* == r**, so we can avoid wrapping it again and wasting time.
return regex
else:
return StarRegex(regexify(regex))
def repeat(regex, count):
"""
Creates a regular expression that accepts `regex` repeated a specific
number of times.
:param regex: The regular expression describing the strings to reepeat.
:param count: The number of times to repeat it.
"""
if count == 0:
return Epsilon
elif count == 1:
return regex
elif regex is Epsilon:
return Epsilon
elif regex is Null:
return Null
else:
return RepeatRegex(regexify(regex), count)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
import re
import socket
import stat
from ironic.common import exception
from ironic.common import utils
from ironic.openstack.common import excutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (portal_address, portal_port),
run_as_root=True,
check_exit_code=[0])
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0])
# Ensure the login complete
time.sleep(3)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0])
def make_partitions(dev, root_mb, swap_mb):
"""Create partitions for root and swap on a disk device."""
# Lead in with 1MB to allow room for the partition table itself, otherwise
# the way sfdisk adjusts doesn't shift the partition up to compensate, and
# we lose the space.
# http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/raring/util-linux/
# raring/view/head:/fdisk/sfdisk.c#L1940
stdin_command = ('1,%d,83;\n,%d,82;\n0,0;\n0,0;\n' % (root_mb, swap_mb))
utils.execute('sfdisk', '-uM', dev, process_input=stdin_command,
run_as_root=True,
attempts=3,
check_exit_code=[0])
# avoid "device is busy"
time.sleep(3)
def is_block_device(dev):
"""Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.execute('dd',
'if=%s' % src,
'of=%s' % dst,
'bs=1M',
'oflag=direct',
run_as_root=True,
check_exit_code=[0])
def mkswap(dev, label='swap1'):
"""Execute mkswap on a device."""
utils.execute('mkswap',
'-L', label,
dev,
run_as_root=True,
check_exit_code=[0])
def block_uuid(dev):
"""Get UUID of a block device."""
out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def switch_pxe_config(path, root_uuid):
"""Switch a pxe config from deployment mode to service mode."""
with open(path) as f:
lines = f.readlines()
root = 'UUID=%s' % root_uuid
rre = re.compile(r'\{\{ ROOT \}\}')
dre = re.compile('^default .*$')
with open(path, 'w') as f:
for line in lines:
line = rre.sub(root, line)
line = dre.sub('default boot', line)
f.write(line)
def notify(address, port):
"""Notify a node that it becomes ready to reboot."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((address, port))
s.send('done')
finally:
s.close()
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \
% (address, port, iqn, lun)
return dev
def get_image_mb(image_path):
"""Get size of an image in Megabyte."""
mb = 1024 * 1024
image_byte = os.path.getsize(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def work_on_disk(dev, root_mb, swap_mb, image_path):
"""Creates partitions and write an image to the root partition."""
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
LOG.warn(_("parent device '%s' not found"), dev)
return
make_partitions(dev, root_mb, swap_mb)
if not is_block_device(root_part):
LOG.warn(_("root device '%s' not found"), root_part)
return
if not is_block_device(swap_part):
LOG.warn(_("swap device '%s' not found"), swap_part)
return
dd(image_path, root_part)
mkswap(swap_part)
try:
root_uuid = block_uuid(root_part)
except exception.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to detect root device UUID."))
return root_uuid
def deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb):
"""All-in-one function to deploy a node."""
dev = get_dev(address, port, iqn, lun)
image_mb = get_image_mb(image_path)
if image_mb > root_mb:
root_mb = image_mb
discovery(address, port)
login_iscsi(address, port, iqn)
try:
root_uuid = work_on_disk(dev, root_mb, swap_mb, image_path)
except exception.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
# Log output if there was a error
LOG.error(_("Deploy to address %s failed.") % address)
LOG.error(_("Command: %s") % err.cmd)
LOG.error(_("StdOut: %r") % err.stdout)
LOG.error(_("StdErr: %r") % err.stderr)
finally:
logout_iscsi(address, port, iqn)
switch_pxe_config(pxe_config_path, root_uuid)
# Ensure the node started netcat on the port after POST the request.
time.sleep(3)
notify(address, 10000)
|
|
#!/usr/bin/env python
# Written by Bram Cohen and Uoti Urpala
from bisect import bisect
def unique_lcs(a, b):
# set index[line in a] = position of line in a unless
# unless a is a duplicate, in which case it's set to None
index = {}
for i in xrange(len(a)):
line = a[i]
if line in index:
index[line] = None
else:
index[line]= i
# make btoa[i] = position of line i in a, unless
# that line doesn't occur exactly once in both,
# in which case it's set to None
btoa = [None] * len(b)
index2 = {}
for pos, line in enumerate(b):
next = index.get(line)
if next is not None:
if line in index2:
# unset the previous mapping, which we now know to
# be invalid because the line isn't unique
btoa[index2[line]] = None
del index[line]
else:
index2[line] = pos
btoa[pos] = next
# this is the Patience sorting algorithm
# see http://en.wikipedia.org/wiki/Patience_sorting
backpointers = [None] * len(b)
stacks = []
lasts = []
k = 0
for bpos, apos in enumerate(btoa):
if apos is None:
continue
# skip over solitary matches with no surrounding matching context
if (bpos == 0 or apos == 0 or a[apos-1] != b[bpos-1]) and \
(bpos == len(b)-1 or apos == len(a)-1 or a[apos+1] != b[bpos+1]):
continue
# as an optimization, check if the next line comes right after
# the previous line, because usually it does
if stacks and stacks[k] < apos and (k == len(stacks) - 1 or stacks[k+1] > apos):
k += 1
else:
k = bisect(stacks, apos)
if k > 0:
backpointers[bpos] = lasts[k-1]
if k < len(stacks):
stacks[k] = apos
lasts[k] = bpos
else:
stacks.append(apos)
lasts.append(bpos)
if len(lasts) == 0:
return []
result = []
k = lasts[-1]
while k is not None:
result.append((btoa[k], k))
k = backpointers[k]
result.reverse()
return result
def test_lcs():
assert unique_lcs('', '') == []
assert unique_lcs('a', 'a') == []
assert unique_lcs('a', 'b') == []
assert unique_lcs('ab', 'ab') == [(0, 0), (1, 1)]
assert unique_lcs('abcde', 'cdeab') == [(2, 0), (3, 1), (4, 2)]
assert unique_lcs('cdeab', 'abcde') == [(0, 2), (1, 3), (2, 4)]
assert unique_lcs('abXde', 'abYde') == [(0, 0), (1, 1), (3, 3), (4, 4)]
return
def recurse_matches(a, b, ahi, bhi, answer, maxrecursion, is_junk):
# this will never happen normally, this check is to prevent DOS attacks
if maxrecursion < 0:
return
oldlength = len(answer)
# check for new matches after the last confirmed match ends
if oldlength == 0:
alo = 0
blo = 0
else:
lasta, lastb, lastlen = answer[-1]
alo = lasta + lastlen
blo = lastb + lastlen
if alo == ahi or blo == bhi:
return
last_enda = alo
last_endb = blo
last_checkb = blo
# extend individual line matches into match sections
for apos, bpos in unique_lcs(a[alo:ahi], b[blo:bhi]):
apos += alo
bpos += blo
# don't overlap with an existing match or check something which
# already got thrown out as junk
if bpos < last_checkb or apos < last_enda:
continue
# extend line match as far in either direction as possible
enda = apos + 1
endb = bpos + 1
while enda < ahi and endb < bhi and a[enda] == b[endb]:
enda += 1
endb += 1
while apos > last_enda and bpos > last_endb and a[apos-1] == b[bpos-1]:
apos -= 1
bpos -= 1
# mark what's been checked now, so even if it's junked it doesn't
# have to be checked again
last_checkb = endb
# if this section is junk, skip it
numreal = 0
for k in xrange(apos, enda):
if not is_junk(a[k]):
numreal += 1
if numreal >= 2:
break
else:
# Niklaus Wirth can bite me
continue
last_enda = enda
last_endb = endb
# find matches which come before the new match section
# this can happen because there may be lines which weren't unique
# in the whole file but are unique in the subsection
recurse_matches(a, b, apos, bpos, answer, maxrecursion - 1, is_junk)
answer.append((apos, bpos, enda - apos))
if len(answer) > oldlength:
# find matches between the last match and the end
recurse_matches(a, b, ahi, bhi, answer, maxrecursion - 1, is_junk)
# else: fall back to difflib (possibly a good idea, possibly not)
def default_is_junk(x):
return len(x.strip()) <= 2
def find_matches(a, b, is_junk = default_is_junk):
# single-line identical files match, despite being too short for
# a real match if they were part of a larger file
if a == b:
return [(0, 0, len(a))]
answer = []
recurse_matches(a, b, len(a), len(b), answer, 10, is_junk)
return answer
try:
import psyco
psyco.bind(unique_lcs, 0)
except ImportError:
pass
# Stuff below here is for testing
def x(a, b):
t1 = time.time()
r = unique_lcs(a, b)
t2 = time.time()
#print r
for i, j in r:
assert a[i] == b[j]
#print a[i]
print
print 'time:', t2-t1
def return_false(x):
return False
def x2(a, b, is_junk = return_false):
t1 = time.time()
r = find_matches(a, b, is_junk)
t2 = time.time()
#print r
for i, j, l in r:
assert a[i:i+l] == b[j:j+l]
#print ''.join(a[i:i+l]),
print
print 'time:', t2-t1
def completely_random_test(x):
a = [str(i) for i in range(100000)]
b = list(a)
random.shuffle(b)
#print ' '.join(b)
#print
x(a, b)
def random_with_ranges_test(x):
d = [[str(i), str(i+1)] for i in xrange(0, 100000, 2)]
c = list(d)
random.shuffle(c)
a = []
for i in d:
a.extend(i)
b = []
for i in c:
b.extend(i)
#print ' '.join(b)
#print
x(a, b)
def is_A_test(s):
return s == 'A'
def test_lcsmatch():
global random, time
import random
import time
cur_time = time.time()
random.seed(cur_time)
print 'Seeded tests with %s' % (cur_time,)
completely_random_test(x)
random_with_ranges_test(x)
x2('0123456789abc', ' 01 89a 34 67')
x2('AB 1 CD 1 AB P XY Q AB 1 CD 1 AB', 'AB 2 CD 2 AB Q XY P AB 2 CD 2 AB')
x2('AjBjC', 'AjC')
x2('AjC', 'AjBjC')
x2('AjBjC', 'AjC', is_A_test)
x2('AjC', 'AjBjC', is_A_test)
x2('x', 'x')
x2('01 2', '01')
x2('ABPXYQAB', 'ABQXYPAB')
return
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 12 12:48:33 2014
@author: ibackus
"""
# DEBUGGING
import sys
# END DEBUGGING
# External modules
import pynbody
SimArray = pynbody.array.SimArray
import numpy as np
import scipy.interpolate as interp
import os
import cPickle as pickle
# ICgen modules
import calc_rho_zr
import calc_rho
import pos_gen_grid as pos_gen
import calc_temp
import calc_sigma
import pos_class
import make_snapshot
import isaac
import ICgen_settings
import make_sigma
ICgenDir = os.path.dirname(os.path.realpath(__file__))
param_filepath = os.path.join(ICgenDir, 'default.param')
param_default = isaac.configparser(param_filepath, ftype='param')
class IC:
def __init__(self):
# Initialize
# Load up default settings
self.settings = ICgen_settings.settings()
# Add modules/attributes
self.T = calc_temp.T(self)
self.maker = maker(self)
self.add = add(self)
def saver(filename = None):
"""
A wrapper for ICgen.save
"""
save(self, filename)
self.save = saver
def save(ICobj, filename=None):
if filename is None:
filename = ICobj.settings.filenames.IC_file_name
save_dict = {}
# --------------------------------------------------
# GET SETTINGS
# --------------------------------------------------
save_dict['settings'] = ICobj.settings
# --------------------------------------------------
# Prepare rho, if available
# --------------------------------------------------
if hasattr(ICobj, 'rho'):
rho = ICobj.rho
# Generate a dictionary containing rho_binned, z_bins, r_bins
rho_dict = {\
'rho': rho.rho_binned,\
'z': rho.z_bins,\
'r': rho.r_bins}
# Update save dictionary
save_dict['rho'] = rho_dict
# --------------------------------------------------
# Prepare sigma, if available
# --------------------------------------------------
if hasattr(ICobj, 'sigma'):
sigma = ICobj.sigma
kind = sigma.kind
save_dict['settings'].sigma.kind = kind
# Change how sigma is saved depending on the method (kind) used to
# generate sigma
if kind == 'file':
# Update save dictionary
save_dict['sigma'] = sigma.input_dict
# --------------------------------------------------
# Prepare pos if possible
# --------------------------------------------------
if hasattr(ICobj, 'pos'):
save_dict['pos'] = ICobj.pos
# --------------------------------------------------
# Prepare param if possible
# --------------------------------------------------
if hasattr(ICobj, 'snapshot_param'):
save_dict['snapshot_param'] = ICobj.snapshot_param
param_name = ICobj.settings.filenames.paramName
isaac.configsave(ICobj.snapshot_param, param_name)
print 'param file saved to {}'.format(param_name)
# --------------------------------------------------
# SAVE
# --------------------------------------------------
# Save snapshot if possible
if hasattr(ICobj, 'snapshot'):
fmt = pynbody.tipsy.TipsySnap
fname = ICobj.settings.filenames.snapshotName
save_dict['snapshotName'] = fname
ICobj.snapshot.write(fmt = fmt, filename = fname)
# Save the save dictionary
pickle.dump(save_dict,open(filename,'wb'))
print 'Initial conditions saved to {}'.format(filename)
def load(filename):
# Initialize a blank IC object
ICobj = IC()
# Load everything available from filename
input_dict = pickle.load(open(filename,'rb'))
# Parse the input dictionary
if 'settings' in input_dict:
print 'loading settings'
ICobj.settings = input_dict['settings']
if 'rho' in input_dict:
print 'loading rho'
ICobj.add.rho(input_dict['rho'])
if 'sigma' in input_dict:
print 'loading sigma'
ICobj.add.sigma(input_dict['sigma'])
if 'pos' in input_dict:
print 'loading pos'
ICobj.pos = input_dict['pos']
if 'snapshotName' in input_dict:
print 'loading snapshot'
fname = input_dict['snapshotName']
ICobj.snapshot = pynbody.load(fname)
if 'snapshot_param' in input_dict:
print 'loading param'
ICobj.snapshot_param = input_dict['snapshot_param']
return ICobj
class add:
"""
Contains modules to load data/parameters
"""
def __init__(self, ICobj):
self._parent = ICobj
def rho(self,rho_dict):
"""
Generates a rho object and stores it in ICobj.rho
rho_dict should be a dictionary containing:
'z': 1D array of z values
'r': 1D array of r values
'rho': 2D array of rho evaluated at z,r
Exaple:
rho_dict = pickle.load(open('rhofile.p', 'rb')) # Load up a rho dict
ICobj.add.rho(rho_dict) # create ICobj.rho
"""
# Create rho object (includes a spline interpolation)
rho_binned = rho_dict['rho']
z_bins = rho_dict['z']
r_bins = rho_dict['r']
self._parent.rho = calc_rho_zr.rho_from_array(self._parent, rho_binned, z_bins, r_bins)
print 'rho stored in <IC instance>.rho'
def sigma(self, sigma_input):
"""
Generates a sigma object and stores it in ICobj.rho
IF ICobj.settings.sigma.kind = 'file':
sigma_input should be a dictionary containing 'sigma' and 'r'
Example:
sigma_dict = pickle.load(open('sigmafile.p','rb'))
IC.settings.sigma.kind = 'file'
IC.add.sigma(sigma_dict)
"""
sigma_out = make_sigma.sigma_gen(self._parent.settings, sigma_input)
# Assign to parent ICobj:
self._parent.sigma = sigma_out
print 'Sigma stored in <IC instance>.sigma'
class maker:
"""
A Wrapper containing various functions for generating initial conditions.
Outputs of the functions are saved to the IC object. The IC object is
referenced as self._parent. So to access temperature, simply call
self._parent.T(r)
"""
def __init__(self, ICobj):
self._parent = ICobj
def sigma_gen(self):
"""
A Wrapper for make_sigma.sigma_gen
See make_sigma.sigma_gen for documentation
Upon executing, generates sigma, pdf, and cdf_inv according to
settings.sigma.kind and saves to ICobj
USAGE:
ICobj.maker.sigma_gen()
"""
# Generate sigma
sigma = make_sigma.sigma_gen(self._parent.settings)
# Copy sigma to the parent (IC) object
self._parent.sigma = sigma
print 'Sigma stored in <IC instance>.sigma'
def rho_gen(self):
"""
A wrapper for calc_rho_zr.
Upon executing, generates rho and rho cdf inverse
"""
# Check that sigma has been generated
if not hasattr(self._parent, 'sigma'):
raise RuntimeError,'Must load/generate sigma before calculating rho'
# Numerically calculate rho(z,r) for a given sigma. rho(z,r)
# obeys vertical hydrostatic equilibrium (approximately)
rho_array, z, r = calc_rho_zr.rho_zr(self._parent)
# Create a complete rho object. Includes rho spline and CDF inverse
rho = calc_rho_zr.rho_from_array(self._parent, rho_array, z, r)
# Save to ICobj
self._parent.rho = rho
print 'rho stored in <IC instance>.rho'
def pos_gen(self, method = None):
"""
A wrapper for generating positions according to rho and sigma
Initializes a pos object (see pos_class.py) and saves it to ICobj.pos
IF called with method not set, the method used is:
ICobj.settings.pos_gen.method
"""
# Generate positions object
pos = pos_class.pos(self._parent, method)
# Save it to ICobj
self._parent.pos = pos
def snapshot_gen(self):
"""
A wrapper for generating a tipsy snapshot from the initial conditions
Uses make_snapshot.py
"""
# Generate snapshot
snapshot, snapshot_param = make_snapshot.snapshot_gen(self._parent)
# Save to ICobj
self._parent.snapshot = snapshot
self._parent.snapshot_param = snapshot_param
|
|
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PeriodicAction that saves checkpoints periodically."""
import os
from typing import Iterable, Optional
from clu import periodic_actions
import jax
import numpy as np
from vmoe import multihost_utils
from vmoe.checkpoints import base as checkpoints_base
from vmoe.checkpoints import partitioned as checkpoints_partitioned
AsyncResult = checkpoints_partitioned.AsyncResult
Mesh = checkpoints_partitioned.Mesh
PyTree = checkpoints_partitioned.PyTree
ThreadPool = checkpoints_partitioned.ThreadPool
class PeriodicSaveCheckpoint(periodic_actions.PeriodicCallback):
"""Saves checkpoints of a partitioned training state periodically.
Example:
saver = PeriodicSaveCheckpoint(
prefix='/tmp/ckpt',
state_axis_resources=state_axis_resources,
every_steps=10)
for step in range(100):
state = update_state(...)
saver(step=step, state=state) # Saves at steps 0, 10, 20, 30, ...
"""
def __init__(
self,
*,
prefix: str,
state_axis_resources: PyTree,
mesh: Optional[Mesh] = None,
num_shards: int = 0,
num_threads: Optional[int] = None,
wait_seconds: Optional[int] = None,
every_steps: Optional[int] = None,
every_secs: Optional[float] = None,
on_steps: Optional[Iterable[int]] = None,
keep_last: Optional[int] = None,
keep_steps_multiple_of: Optional[int] = None,
execute_async: bool = True,
report_progress: Optional[periodic_actions.ReportProgress] = None,
report_progress_name: str = 'ckpt'):
"""Initializer.
Args:
prefix: Prefix for the checkpoint files. The step number is appended to
this when a checkpoint is written (e.g. prefix='ckpt_' gives checkpoints
'ckpt_1', 'ckpt_2', ...).
state_axis_resources: PyTree with PartitionSpec leaves, with the same
structure as the `state` to checkpoint in every step, indicating how
each axis of the corresponding array is partitioned across the axes of
the logical device mesh.
mesh: Logical device mesh used with pjit. If None, the active mesh will
be used.
num_shards: Number of checkpoint shards. If `num_shards <= 0`, the minimum
number of shards will be used. If `num_shards > 0`, this number is only
tentative.
num_threads: Number of threads to use for writing checkpoint shards. If
None, `multiprocessing.pool.cpu_count()` is used.
wait_seconds: If given, we wait at most this number of seconds for the
checkpoint writing to complete. Otherwise, TimeoutError is raised.
every_steps: If given, writes a checkpoint every `every_steps` steps.
every_secs: If given, writes a checkpoint every `every_secs` seconds.
on_steps: If given, writes a checkpoint on these particular steps.
keep_last: If given, we only keep the last `keep_last` checkpoints.
If None, only the last checkpoint is kept.
keep_steps_multiple_of: If given, all steps multiple of this number are
kept (in addition to the `keep_last` steps).
execute_async: If True, writes checkpoints shards asynchronously.
If False, waits `wait_seconds` for the writing to complete. Note that,
even if this is True, we always wait up to `wait_seconds` between two
consecutive checkpointing steps.
report_progress: When given, the `timed()` method of this `ReportProgress`
is used to time the saving of checkpoints.
report_progress_name: Name used by `ReportProgress.timed()`.
"""
self._thread_pool = ThreadPool(processes=num_threads)
self._async_result = None # type: Optional[AsyncResult]
self._wait_seconds = wait_seconds
self._makedirs(os.path.dirname(prefix))
keep_last = max(keep_last or 1, 1)
keep_multiple = max(keep_steps_multiple_of or 0, 0)
super().__init__(
every_steps=every_steps,
every_secs=every_secs,
on_steps=on_steps,
callback_fn=self._make_callback_fn(
prefix, state_axis_resources, mesh, num_shards, wait_seconds,
keep_last, keep_multiple, execute_async, self._thread_pool,
report_progress, report_progress_name),
# Note: save_checkpoint() is still asynchronous. This just means that
# we wait until the callback_fn returns.
execute_async=False,
pass_step_and_time=True)
def __del__(self):
if self._async_result:
self._block_async_result(self._wait_seconds)
self._thread_pool.close()
@classmethod
def _makedirs(cls, workdir: str):
# Process 0 creates the workdir if it doesn't exist. All processes wait
# until this is done.
if jax.process_index() == 0 and not os.path.exists(workdir):
checkpoints_base.gfile.makedirs(workdir)
multihost_utils.sync_devices(f'checkpoints:mkdir:{workdir}')
@classmethod
def _remove_old_checkpoints(cls, prefix: str, keep_last: int,
keep_multiple: int, thread_pool: ThreadPool):
def _parse_step_from_filepath(filepath):
m = checkpoints_base.CHECKPOINT_REGEX.fullmatch(filepath)
step_str = m.group(2) if m else None
return int(step_str[1:]) if step_str else None
def _find_step_numbers(filepaths):
for step in map(_parse_step_from_filepath, filepaths):
if step is not None:
yield step
def _remove():
# Find step number of pending shards.
workdir = os.path.dirname(prefix)
basename = os.path.basename(prefix)
prefix_tmp = os.path.join(workdir, f'.tmp.{basename}') + '*'
checkpoints_tmp = checkpoints_base.gfile.glob(prefix_tmp)
pending_steps = set(_find_step_numbers(checkpoints_tmp))
# Find all completed shards.
checkpoints = checkpoints_base.gfile.glob(prefix + '*')
completed_steps = set(_find_step_numbers(checkpoints))
# Keep `keep_last` completed steps.
keep_steps = set(sorted(completed_steps - pending_steps)[-keep_last:])
# Keep steps multiple of `keep_multiple`.
if keep_multiple > 0:
keep_steps.update([
step for step in completed_steps if step % keep_multiple == 0])
# Always keep pending steps.
keep_steps.update(pending_steps)
# Remove checkpoints.
def match_remove_fn(filepath):
# Returns True (to remove) if the step is not in `keep_steps`.
step = _parse_step_from_filepath(filepath)
return (step not in keep_steps) if step is not None else False
checkpoints_base.remove_checkpoints(
checkpoints, match_remove_fn, thread_pool=thread_pool)
# Only process 0 removes files. All processes wait untils this is done.
if jax.process_index() == 0:
_remove()
multihost_utils.sync_devices(f'checkpoints:remove:{prefix}')
def _block_async_result(self, wait_seconds: Optional[int]):
try:
self._async_result.get(wait_seconds)
self._async_result = None
except TimeoutError:
raise TimeoutError('Timeout while writing checkpoint files after '
f'{wait_seconds} seconds.')
def _make_callback_fn(self, prefix, state_axis_resources, mesh, num_shards,
wait_seconds, keep_last, keep_multiple, execute_async,
thread_pool, report_progress, report_progress_name):
def callback_fn(step: int, t: float, state: PyTree):
del t # Unused.
# Wait up to `wait_seconds` seconds, until the previous checkpoint is
# completed before starting to write a new checkpoint. If the timeout
# expires, an exception is raised. This is to avoid having multiple copies
# of the model in the CPU memory.
if self._async_result:
self._block_async_result(wait_seconds)
multihost_utils.sync_devices(f'checkpoints:sync_pending:{prefix}')
# Remove outdated checkpoints before starting writing new ones.
self._remove_old_checkpoints(
prefix, keep_last, keep_multiple, thread_pool)
# Save new checkpoint.
self._async_result = checkpoints_partitioned.save_checkpoint(
prefix=f'{prefix}_{step}',
# Note: saving is faster if we transfer the data from device to CPU
# in one go.
tree=jax.tree_map(np.asarray, state),
axis_resources=state_axis_resources,
mesh=mesh,
num_shards=num_shards,
thread_pool=thread_pool,
makedirs=False,
overwrite=True)
# Optionally, wait `wait_seconds` until the checkpointing is done, or
# raise an exception if writing doesn't finish in `wait_seconds`.
if not execute_async:
self._block_async_result(wait_seconds)
multihost_utils.sync_devices(f'checkpoints:no_async:{prefix}')
if report_progress is None:
return callback_fn
else:
return report_progress.timed(
report_progress_name, wait_jax_async_dispatch=False)(callback_fn)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import copy
from keystoneclient import auth
from keystoneclient import service_catalog
from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from nova import exception
from nova.i18n import _, _LW
from nova import policy
from nova import utils
LOG = logging.getLogger(__name__)
class _ContextAuthPlugin(auth.BaseAuthPlugin):
"""A keystoneclient auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
sc = {'serviceCatalog': sc}
self.service_catalog = service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
endpoint_type=interface,
region_name=region_name)
@enginefacade.transaction_context_provider
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None,
is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
user = kwargs.pop('user', None)
tenant = kwargs.pop('tenant', None)
super(RequestContext, self).__init__(
auth_token=auth_token,
user=user_id or user,
tenant=project_id or tenant,
domain=kwargs.pop('domain', None),
user_domain=kwargs.pop('user_domain', None),
project_domain=kwargs.pop('project_domain', None),
is_admin=is_admin,
read_only=kwargs.pop('read_only', False),
show_deleted=kwargs.pop('show_deleted', False),
request_id=request_id,
resource_uuid=kwargs.pop('resource_uuid', None),
overwrite=overwrite)
# oslo_context's RequestContext.to_dict() generates this field, we can
# safely ignore this as we don't use it.
kwargs.pop('user_identity', None)
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: %s') %
str(kwargs))
# FIXME(dims): user_id and project_id duplicate information that is
# already present in the oslo_context's RequestContext. We need to
# get rid of them.
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume', 'volumev2', 'key-manager')]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'roles': getattr(self, 'roles', None),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': utils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
'instance_lock_checked': getattr(self, 'instance_lock_checked',
False)
})
return values
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None):
"""Return a version of this context with admin flag set."""
context = copy.deepcopy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is not an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.