input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>python/envs/hackathon/lib/python3.7/site-packages/mkl_fft/tests/test_fft1d.py
#!/usr/bin/env python
# Copyright (c) 2017-2019, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, assert_allclose)
from numpy import random as rnd
import sys
import warnings
import mkl_fft
def naive_fft1d(vec):
L = len(vec)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(vec*np.exp(phase), axis=1)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
class Test_mklfft_vector(TestCase):
def setUp(self):
rnd.seed(1234567)
self.xd1 = rnd.standard_normal(128)
self.xf1 = self.xd1.astype(np.float32)
self.xz1 = rnd.standard_normal((128,2)).view(dtype=np.complex128).squeeze()
self.xc1 = self.xz1.astype(np.complex64)
def test_vector1(self):
"""check that mkl_fft gives the same result of numpy.fft"""
f1 = mkl_fft.fft(self.xz1)
f2 = naive_fft1d(self.xz1)
assert_allclose(f1,f2, rtol=1e-7, atol=2e-12)
f1 = mkl_fft.fft(self.xc1)
f2 = naive_fft1d(self.xc1)
assert_allclose(f1,f2, rtol=2e-6, atol=2e-6)
def test_vector2(self):
"ifft(fft(x)) is identity"
f1 = mkl_fft.fft(self.xz1)
f2 = mkl_fft.ifft(f1)
assert_(np.allclose(self.xz1,f2))
f1 = mkl_fft.fft(self.xc1)
f2 = mkl_fft.ifft(f1)
assert_( np.allclose(self.xc1,f2))
f1 = mkl_fft.fft(self.xd1)
f2 = mkl_fft.ifft(f1)
assert_( np.allclose(self.xd1,f2))
f1 = mkl_fft.fft(self.xf1)
f2 = mkl_fft.ifft(f1)
assert_( np.allclose(self.xf1,f2, atol = 2.0e-7))
def test_vector3(self):
"fft(ifft(x)) is identity"
f1 = mkl_fft.ifft(self.xz1)
f2 = mkl_fft.fft(f1)
assert_(np.allclose(self.xz1,f2))
f1 = mkl_fft.ifft(self.xc1)
f2 = mkl_fft.fft(f1)
assert_( np.allclose(self.xc1,f2))
f1 = mkl_fft.ifft(self.xd1)
f2 = mkl_fft.fft(f1)
assert_( np.allclose(self.xd1,f2))
f1 = mkl_fft.ifft(self.xf1)
f2 = mkl_fft.fft(f1)
assert_( np.allclose(self.xf1, f2, atol = 2.0e-7))
def test_vector4(self):
"fft of strided is same as fft of contiguous copy"
x = self.xz1[::2]
f1 = mkl_fft.fft(x)
f2 = mkl_fft.fft(x.copy())
assert_(np.allclose(f1,f2))
x = self.xz1[::-1]
f1 = mkl_fft.fft(x)
f2 = mkl_fft.fft(x.copy())
assert_(np.allclose(f1,f2))
def test_vector5(self):
"fft in-place is the same as fft out-of-place"
x = self.xz1.copy()[::-2]
f1 = mkl_fft.fft(x, overwrite_x=True)
f2 = mkl_fft.fft(self.xz1[::-2])
assert_(np.allclose(f1,f2))
def test_vector6(self):
"fft in place"
x = self.xz1.copy()
f1 = mkl_fft.fft(x, overwrite_x=True)
assert_(not _datacopied(f1, x)) # this is in-place
x = self.xz1.copy()
f1 = mkl_fft.fft(x[::-2], overwrite_x=True)
assert_( not np.allclose(x, self.xz1) ) # this is also in-place
assert_( np.allclose(x[-2::-2], self.xz1[-2::-2]) )
assert_( np.allclose(x[-1::-2], f1) )
def test_vector7(self):
"fft of real array is the same as fft of its complex cast"
x = self.xd1[3:17:2]
f1 = mkl_fft.fft(x)
f2 = mkl_fft.fft(x.astype(np.complex128))
assert_(np.allclose(f1,f2))
def test_vector8(self):
"ifft of real array is the same as fft of its complex cast"
x = self.xd1[3:17:2]
f1 = mkl_fft.ifft(x)
f2 = mkl_fft.ifft(x.astype(np.complex128))
assert_(np.allclose(f1,f2))
def test_vector9(self):
"works on subtypes of ndarray"
mask = np.zeros(self.xd1.shape, dtype='int')
mask[1] = 1
mask[-2] = 1
x = np.ma.masked_array(self.xd1, mask=mask)
f1 = mkl_fft.fft(x)
f2 = mkl_fft.fft(self.xd1)
assert_allclose(f1, f2)
def test_vector10(self):
"check n for real arrays"
x = self.xd1[:8].copy()
f1 = mkl_fft.fft(x, n = 7)
f2 = mkl_fft.fft(self.xd1[:7])
assert_allclose(f1, f2)
f1 = mkl_fft.fft(x, n = 9)
y = self.xd1[:9].copy()
y[-1] = 0.0
f2 = mkl_fft.fft(y)
assert_allclose(f1, f2)
def test_vector11(self):
"check n for complex arrays"
x = self.xz1[:8].copy()
f1 = mkl_fft.fft(x, n = 7)
f2 = mkl_fft.fft(self.xz1[:7])
assert_allclose(f1, f2)
f1 = mkl_fft.fft(x, n = 9)
y = self.xz1[:9].copy()
y[-1] = 0.0 + 0.0j
f2 = mkl_fft.fft(y)
assert_allclose(f1, f2)
def test_vector12(self):
"check fft of float-valued array"
x = np.arange(20)
f1 = mkl_fft.fft(x)
f2 = mkl_fft.fft(x.astype(np.float64))
assert_allclose(f1, f2)
class Test_mklfft_matrix(TestCase):
def setUp(self):
rnd.seed(1234567)
self.ad2 = rnd.standard_normal((4, 3))
self.af2 = self.ad2.astype(np.float32)
self.az2 = np.dot(
rnd.standard_normal((17, 15, 2)),
np.array([1.0 + 0.0j, 0.0 + 1.0j], dtype=np.complex128)
)
self.ac2 = self.az2.astype(np.complex64)
self.mat = np.matrix(self.az2)
self.xd1 = rnd.standard_normal(128)
def test_matrix1(self):
x = self.az2.copy()
f1 = mkl_fft.fft(x)
f2 = np.array([ mkl_fft.fft(x[i]) for i in range(x.shape[0])])
assert_allclose(f1, f2)
f1 = mkl_fft.fft(x, axis=0)
f2 = np.array([ mkl_fft.fft(x[:, i]) for i in range(x.shape[1])]).T
assert_allclose(f1, f2)
def test_matrix2(self):
f1 = mkl_fft.fft(self.az2)
f2 = mkl_fft.fft(self.mat)
assert_allclose(f1, f2)
def test_matrix3(self):
x = self.az2.copy()
f1 = mkl_fft.fft(x[::3,::-1])
f2 = mkl_fft.fft(x[::3,::-1].copy())
assert_allclose(f1, f2)
def test_matrix4(self):
x = self.az2.copy()
f1 = mkl_fft.fft(x[::3,::-1])
f2 = mkl_fft.fft(x[::3,::-1], overwrite_x=True)
assert_allclose(f1, f2)
def test_matrix5(self):
x = self.ad2;
f1 = mkl_fft.fft(x)
f2 = mkl_fft.ifft(f1)
assert_allclose(x, f2, atol=1e-10)
def test_matrix6(self):
x = self.ad2;
f1 = mkl_fft.ifft(x)
f2 = mkl_fft.fft(f1)
assert_allclose(x, f2, atol=1e-10)
def test_matrix7(self):
x = self.ad2.copy()
f1 = mkl_fft.fft(x)
f2 = np.array([ mkl_fft.fft(x[i]) for i in range(x.shape[0])])
assert_allclose(f1, f2)
f1 = mkl_fft.fft(x, axis=0)
f2 = np.array([ mkl_fft.fft(x[:, i]) for i in range(x.shape[1])]).T
assert_allclose(f1, f2)
def test_matrix8(self):
from numpy.lib.stride_tricks import as_strided
x = self.xd1[:10].copy()
y = as_strided(x, shape=(4,4,), strides=(2*x.itemsize, x.itemsize))
f1 = mkl_fft.fft(y)
f2 = mkl_fft.fft(y.copy())
assert_allclose(f1, f2, atol=1e-15, rtol=1e-7)
class Test_mklfft_rank3(TestCase):
def setUp(self):
rnd.seed(1234567)
self.ad3 = rnd.standard_normal((7, 11, 19))
self.af3 = self.ad3.astype(np.float32)
self.az3 = np.dot(
rnd.standard_normal((17, 13, 15, 2)),
np.array([1.0 + 0.0j, 0.0 + 1.0j], dtype=np.complex128)
)
self.ac3 = self.az3.astype(np.complex64)
def test_array1(self):
x = self.az3
for ax in range(x.ndim):
f1 = mkl_fft.fft(x, axis = ax)
f2 = mkl_fft.ifft(f1, axis = ax)
assert_allclose(f2, x, atol=2e-15)
def test_array2(self):
x = self.ad3
for ax in range(x.ndim):
f1 = mkl_fft.fft(x, axis = ax)
f2 = mkl_fft.ifft(f1, axis = ax)
assert_allclose(f2, x, atol=2e-15)
def test_array3(self):
x = self.az3
for ax in range(x.ndim):
f1 = mkl_fft.ifft(x, axis = ax)
f2 = mkl_fft.fft(f1, axis = ax)
assert_allclose(f2, x, atol=2e-15)
def test_array4(self):
x = self.ad3
for ax in range(x.ndim):
f1 = mkl_fft.ifft(x, axis = ax)
f2 = mkl_fft.fft(f1, axis = ax)
assert_allclose(f2, x, atol=2e-15)
def test_array5(self):
"""Inputs with zero strides are handled correctly"""
z = self.az3
z1 = z[np.newaxis]
f1 = mkl_fft.fft(z1, axis=-1)
f2 = mkl_fft.fft(z1.reshape(z1.shape), axis=-1)
assert_allclose(f1, f2, atol=2e-15)
z1 = z[:, np.newaxis]
f1 = mkl_fft.fft(z1, axis=-1)
f2 = mkl_fft.fft(z1.reshape(z1.shape), axis=-1)
assert_allclose(f1, f2, atol=2e-15)
z1 = z[:, :, np.newaxis]
f1 = mkl_fft.fft(z1, axis=-1)
f2 = mkl_fft.fft(z1.reshape(z1.shape), axis=-1)
assert_allclose(f1, f2, atol=2e-15)
z1 = z[:, :, :, np.newaxis]
f1 = mkl_fft.fft(z1, axis=-1)
f2 = mkl_fft.fft(z1.reshape(z1.shape), axis=-1)
assert_allclose(f1, f2, atol=2e-15)
def test_array6(self):
"""Inputs with Fortran layout are handled correctly, issue 29"""
z = self.az3
z = z.astype(z.dtype, order='F')
y1 = mkl_fft.fft(z, axis=0)
y2 = mkl_fft.fft(self.az3, axis=0)
assert_allclose(y1, y2, atol=2e-15)
y1 = mkl_fft.fft(z, axis=-1)
y2 = mkl_fft.fft(self.az3, axis=-1)
assert_allclose(y1, y2, atol=2e-15)
class Test_mklfft_rfft(TestCase):
def setUp(self):
rnd.seed(1234567)
self.v1 = rnd.randn(16)
self.m2 = rnd.randn(5,7)
self.t3 = rnd.randn(5,7,11)
def test1(self):
x = self.v1.copy()
f1 = mkl_fft.rfft(x)
f2 = mkl_fft.irfft(f1)
assert_allclose(f2,x)
def test2(self):
x = self.v1.copy()
f1 = mkl_fft.irfft(x)
f2 = mkl_fft.rfft(f1)
assert_allclose(f2,x)
def test3(self):
for a in range(0,2):
for ovwr_x in [True, False]:
for dt, atol in zip([np.float32, np.float64], [2e-7, 2e-15]):
x = self.m2.copy().astype(dt)
f1 = mkl_fft.rfft(x, axis=a, overwrite_x=ovwr_x)
f2 = mkl_fft.irfft(f1, axis=a, overwrite_x=ovwr_x)
assert_allclose(f2, self.m2.astype(dt), atol=atol)
def test4(self):
for a in range(0,2):
for ovwr_x in [True, False]:
for dt, atol in zip([np.float32, np.float64], [2e-7, 2e-15]):
x = self.m2.copy().astype(dt)
f1 = mkl_fft.irfft(x, axis=a, overwrite_x=ovwr_x)
f2 = mkl_fft.rfft(f1, axis=a, overwrite_x=ovwr_x)
assert_allclose(f2, self.m2.astype(dt), atol=atol)
def test5(self):
for a in range(0,3):
for ovwr_x in [True, False]:
for dt, atol in zip([np.float32, np.float64], [4e-7, 4e-15]):
x = self.t3.copy().astype(dt)
f1 = mkl_fft.irfft(x, axis=a, overwrite_x=ovwr_x)
f2 = mkl_fft.rfft(f1, axis=a, overwrite_x=ovwr_x)
assert_allclose(f2, self.t3.astype(dt), atol=atol)
if __name__ == "__main__":
run_module_suite(argv = | |
import items_setup as item_s
import shops_setup as shop_s
ARMOR = {
1000: "Shaved",
1100: "Receding",
1200: "Short",
1300: "Swept Back",
1400: "Ponytail",
1500: "Wild",
1600: "Parted Center",
1700: "Semi-Long",
1800: "Curly",
1900: "Bobbed",
2000: "Male 11",
2100: "Male 12",
2200: "Male 13",
2300: "Male 14",
2400: "Male 15",
2500: "Male 16",
2600: "Male 17",
2700: "Male 18",
2800: "Male 19",
2900: "Male 20",
3000: "Shaved",
3100: "Very Short",
3200: "Wave",
3300: "Straight A",
3400: "Straight B",
3500: "Ponytail A",
3600: "Ponytail B",
3700: "Pigtails",
3800: "Bun",
3900: "Braided",
4000: "Female 11",
4100: "Female 12",
4200: "Female 13",
4300: "Female 14",
4400: "Female 15",
4500: "Female 16",
4600: "Female 17",
4700: "Female 18",
4800: "Female 19",
4900: "Female 20",
5000: "Travel Hairstyle",
10000: "Catarina Helm",
10001: "Catarina Helm +1",
10002: "Catarina Helm +2",
10003: "Catarina Helm +3",
10004: "Catarina Helm +4",
10005: "Catarina Helm +5",
11000: "Catarina Armor",
11001: "Catarina Armor +1",
11002: "Catarina Armor +2",
11003: "Catarina Armor +3",
11004: "Catarina Armor +4",
11005: "Catarina Armor +5",
12000: "Catarina Gauntlets",
12001: "Catarina Gauntlets +1",
12002: "Catarina Gauntlets +2",
12003: "Catarina Gauntlets +3",
12004: "Catarina Gauntlets +4",
12005: "Catarina Gauntlets +5",
13000: "Catarina Leggings",
13001: "Catarina Leggings +1",
13002: "Catarina Leggings +2",
13003: "Catarina Leggings +3",
13004: "Catarina Leggings +4",
13005: "Catarina Leggings +5",
20000: "Paladin Helm",
20001: "Paladin Helm +1",
20002: "Paladin Helm +2",
20003: "Paladin Helm +3",
20004: "Paladin Helm +4",
20005: "Paladin Helm +5",
21000: "Paladin Armor",
21001: "Paladin Armor +1",
21002: "Paladin Armor +2",
21003: "Paladin Armor +3",
21004: "Paladin Armor +4",
21005: "Paladin Armor +5",
22000: "Paladin Gauntlets",
22001: "Paladin Gauntlets +1",
22002: "Paladin Gauntlets +2",
22003: "Paladin Gauntlets +3",
22004: "Paladin Gauntlets +4",
22005: "Paladin Gauntlets +5",
23000: "Paladin Leggings",
23001: "Paladin Leggings +1",
23002: "Paladin Leggings +2",
23003: "Paladin Leggings +3",
23004: "Paladin Leggings +4",
23005: "Paladin Leggings +5",
40000: "Dark Mask",
40001: "Dark Mask +1",
40002: "Dark Mask +2",
40003: "Dark Mask +3",
40004: "Dark Mask +4",
40005: "Dark Mask +5",
41000: "Dark Armor",
41001: "Dark Armor +1",
41002: "Dark Armor +2",
41003: "Dark Armor +3",
41004: "Dark Armor +4",
41005: "Dark Armor +5",
42000: "Dark Gauntlets",
42001: "Dark Gauntlets +1",
42002: "Dark Gauntlets +2",
42003: "Dark Gauntlets +3",
42004: "Dark Gauntlets +4",
42005: "Dark Gauntlets +5",
43000: "Dark Leggings",
43001: "Dark Leggings +1",
43002: "Dark Leggings +2",
43003: "Dark Leggings +3",
43004: "Dark Leggings +4",
43005: "Dark Leggings +5",
50000: "Brigand Hood",
50001: "Brigand Hood +1",
50002: "Brigand Hood +2",
50003: "Brigand Hood +3",
50004: "Brigand Hood +4",
50005: "Brigand Hood +5",
50006: "Brigand Hood +6",
50007: "Brigand Hood +7",
50008: "Brigand Hood +8",
50009: "Brigand Hood +9",
50010: "Brigand Hood +10",
51000: "Brigand Armor",
51001: "Brigand Armor +1",
51002: "Brigand Armor +2",
51003: "Brigand Armor +3",
51004: "Brigand Armor +4",
51005: "Brigand Armor +5",
51006: "Brigand Armor +6",
51007: "Brigand Armor +7",
51008: "Brigand Armor +8",
51009: "Brigand Armor +9",
51010: "Brigand Armor +10",
52000: "Brigand Gauntlets",
52001: "Brigand Gauntlets +1",
52002: "Brigand Gauntlets +2",
52003: "Brigand Gauntlets +3",
52004: "Brigand Gauntlets +4",
52005: "Brigand Gauntlets +5",
52006: "Brigand Gauntlets +6",
52007: "Brigand Gauntlets +7",
52008: "Brigand Gauntlets +8",
52009: "Brigand Gauntlets +9",
52010: "Brigand Gauntlets +10",
53000: "Brigand Trousers",
53001: "Brigand Trousers +1",
53002: "Brigand Trousers +2",
53003: "Brigand Trousers +3",
53004: "Brigand Trousers +4",
53005: "Brigand Trousers +5",
53006: "Brigand Trousers +6",
53007: "Brigand Trousers +7",
53008: "Brigand Trousers +8",
53009: "Brigand Trousers +9",
53010: "Brigand Trousers +10",
60000: "Shadow Mask",
60001: "Shadow Mask +1",
60002: "Shadow Mask +2",
60003: "Shadow Mask +3",
60004: "Shadow Mask +4",
60005: "Shadow Mask +5",
61000: "Shadow Garb",
61001: "Shadow Garb +1",
61002: "Shadow Garb +2",
61003: "Shadow Garb +3",
61004: "Shadow Garb +4",
61005: "Shadow Garb +5",
62000: "Shadow Gauntlets",
62001: "Shadow Gauntlets +1",
62002: "Shadow Gauntlets +2",
62003: "Shadow Gauntlets +3",
62004: "Shadow Gauntlets +4",
62005: "Shadow Gauntlets +5",
63000: "Shadow Leggings",
63001: "Shadow Leggings +1",
63002: "Shadow Leggings +2",
63003: "Shadow Leggings +3",
63004: "Shadow Leggings +4",
63005: "Shadow Leggings +5",
70000: "Black Iron Helm",
70001: "Black Iron Helm +1",
70002: "Black Iron Helm +2",
70003: "Black Iron Helm +3",
70004: "Black Iron Helm +4",
70005: "Black Iron Helm +5",
71000: "Black Iron Armor",
71001: "Black Iron Armor +1",
71002: "Black Iron Armor +2",
71003: "Black Iron Armor +3",
71004: "Black Iron Armor +4",
71005: "Black Iron Armor +5",
72000: "Black Iron Gauntlets",
72001: "Black Iron Gauntlets +1",
72002: "Black Iron Gauntlets +2",
72003: "Black Iron Gauntlets +3",
72004: "Black Iron Gauntlets +4",
72005: "Black Iron Gauntlets +5",
73000: "Black Iron Leggings",
73001: "Black Iron Leggings +1",
73002: "Black Iron Leggings +2",
73003: "Black Iron Leggings +3",
73004: "Black Iron Leggings +4",
73005: "Black Iron Leggings +5",
80000: "Smough's Helm",
81000: "Smough's Armor",
82000: "Smough's Gauntlets",
83000: "Smough's Leggings",
90000: "Six-Eyed Helm of the Channelers",
91000: "Robe of the Channelers",
92000: "Gauntlets of the Channelers",
93000: "Waistcloth of the Channelers",
100000: "Helm of Favor",
100001: "Helm of Favor +1",
100002: "Helm of Favor +2",
100003: "Helm of Favor +3",
100004: "Helm of Favor +4",
100005: "Helm of Favor +5",
101000: "Embraced Armor of Favor",
101001: "Embraced Armor of Favor +1",
101002: "Embraced Armor of Favor +2",
101003: "Embraced Armor of Favor +3",
101004: "Embraced Armor of Favor +4",
101005: "Embraced Armor of Favor +5",
102000: "Gauntlets of Favor",
102001: "Gauntlets of Favor +1",
102002: "Gauntlets of Favor +2",
102003: "Gauntlets of Favor +3",
102004: "Gauntlets of Favor +4",
102005: "Gauntlets of Favor +5",
103000: "Leggings of Favor",
103001: "Leggings of Favor +1",
103002: "Leggings of Favor +2",
103003: "Leggings of Favor +3",
103004: "Leggings of Favor +4",
103005: "Leggings of Favor +5",
110000: "Helm of the Wise",
111000: "Armor of the Glorious",
112000: "Gauntlets of the Vanquisher",
113000: "Boots of the Explorer",
120000: "Stone Helm",
121000: "Stone Armor",
122000: "Stone Gauntlets",
123000: "Stone Leggings",
130000: "Crystalline Helm",
131000: "Crystalline Armor",
132000: "Crystalline Gauntlets",
133000: "Crystalline Leggings",
140000: "Mask of the Sealer",
140001: "Mask of the Sealer +1",
140002: "Mask of the Sealer +2",
140003: "Mask of the Sealer +3",
140004: "Mask of the Sealer +4",
140005: "Mask of the Sealer +5",
141000: "Crimson Robe",
141001: "Crimson Robe +1",
141002: "Crimson Robe +2",
141003: "Crimson | |
<reponame>yxia-fb/shaDow-GNN
import numpy as np
import scipy.sparse
from typing import Union, List
from dataclasses import dataclass, field, fields, InitVar
import scipy.sparse as sp
@dataclass
class Subgraph:
"""
Represents the meta information of sampled subgraphs.
"""
# data fields
indptr : np.ndarray
indices : np.ndarray
data : np.ndarray
node : np.ndarray
edge_index : np.ndarray
target : np.ndarray
hop : np.ndarray
ppr : np.ndarray
# init fields
cap_node_full : InitVar[int]=None
cap_edge_full : InitVar[int]=None
cap_node_subg : InitVar[int]=None
cap_edge_subg : InitVar[int]=None
validate : InitVar[bool]=True
# summary
names_data_fields = ['indptr', 'indices', 'data', 'node', 'edge_index', 'target', 'hop', 'ppr']
def __post_init__(self, cap_node_full, cap_edge_full, cap_node_subg, cap_edge_subg, validate):
"""
All subgraphs sampled by the same sampler should have the same dtype, since cap_*_subg are an upper bound
for all subgraphs under that sampler.
"""
if cap_node_full is not None and cap_edge_full is not None \
and cap_node_subg is not None and cap_edge_subg is not None:
dtype = {'indptr' : np.int64,
'indices' : np.int64,
'data' : np.float32,
'node' : np.int64,
'edge_index': np.int64,
'target' : np.int64,
'hop' : np.int64,
'ppr' : np.float32}
f_dtype = lambda n : np.uint16 if n < 2**16 else np.uint32
if cap_node_full < 2**32:
dtype['node'] = f_dtype(cap_node_full)
if cap_edge_full < 2**32:
dtype['edge_index'] = f_dtype(cap_edge_full)
if cap_node_subg < 2**32:
dtype['indices'] = f_dtype(cap_node_subg)
dtype['target'] = f_dtype(cap_node_subg)
dtype['hop'] = f_dtype(cap_node_subg)
if cap_edge_subg < 2**32:
dtype['indptr'] = f_dtype(cap_edge_subg)
assert set(dtype.keys()) == set(self.names_data_fields)
for n in self.names_data_fields:
v = getattr(self, n)
if v is not None:
setattr(self, n, v.astype(dtype[n], copy=False))
# explicitly handle data -- if it is all 1.
if np.all(self.data == 1.):
self.data = np.broadcast_to(np.array([1.]), self.data.size)
if validate:
self.check_valid()
def _copy(self):
datacopy = {}
for n in self.names_data_fields:
datacopy[n] = getattr(self, n).copy()
return self.__class__(**datacopy)
def check_valid(self):
assert self.indices.size == self.edge_index.size == self.data.size == self.indptr[-1]
assert self.hop.size == 0 or (self.hop.size == self.indptr.size - 1)
assert self.ppr.size == 0 or (self.ppr.size == self.indptr.size - 1)
assert self.indptr.size >= 2, "Subgraph must contain at least 1 node!"
def num_nodes(self):
assert self.node.size == self.indptr.size - 1
return self.node.size
def num_edges(self):
assert self.indices.size == self.edge_index.size == self.data.size == self.indptr[-1]
return self.indices.size
@classmethod
def cat_to_block_diagonal(cls, subgs : list):
""" Concatenate subgraphs into a full adj matrix (i.e., into the block diagonal form) """
offset_indices = np.cumsum([s.node.size for s in subgs]) # always int64
offset_indptr = np.cumsum([s.edge_index.size for s in subgs]) # ^
offset_indices[1:] = offset_indices[:-1]
offset_indices[0] = 0
offset_indptr[1:] = offset_indptr[:-1]
offset_indptr[0] = 0
node_batch = np.concatenate([s.node for s in subgs]) # keep original dtype
edge_index_batch = np.concatenate([s.edge_index for s in subgs]) # ^
data_batch = np.concatenate([s.data for s in subgs]) # ^
hop_batch = np.concatenate([s.hop for s in subgs]) # ^
if subgs[0].ppr.size == 0:
ppr_batch = np.array([])
else: # need to explicitly check due to .max() function
ppr_batch = np.concatenate([s.ppr/s.ppr.max() for s in subgs]) # renorm ppr
target_batch_itr = [s.target.astype(np.int64) for s in subgs]
indptr_batch_itr = [s.indptr.astype(np.int64) for s in subgs]
indices_batch_itr = [s.indices.astype(np.int64) for s in subgs]
target_batch, indptr_batch, indices_batch = [], [], []
for i in range(len(subgs)):
target_batch.append(target_batch_itr[i] + offset_indices[i])
if i > 0: # end of indptr1 equals beginning of indptr2. So remove one duplicate to ensure correctness.
indptr_batch_itr[i] = indptr_batch_itr[i][1:]
indptr_batch.append(indptr_batch_itr[i] + offset_indptr[i])
indices_batch.append(indices_batch_itr[i] + offset_indices[i])
target_batch = np.concatenate(target_batch)
indptr_batch = np.concatenate(indptr_batch)
indices_batch = np.concatenate(indices_batch)
ret_subg = cls(
indptr=indptr_batch,
indices=indices_batch,
data=data_batch,
node=node_batch,
edge_index=edge_index_batch,
target=target_batch,
hop=hop_batch,
ppr=ppr_batch,
cap_node_full=2**63, # just be safe. Note that concated subgraphs are only used for one batch.
cap_edge_full=2**63,
cap_node_subg=2**63,
cap_edge_subg=2**63,
validate=True
)
return ret_subg
def to_csr_sp(self):
num_nodes = self.indptr.size - 1
adj = sp.csr_matrix((self.data, self.indices, self.indptr), shape=(num_nodes, num_nodes))
if self.indices.dtype != np.int64:
adj.indices = adj.indices.astype(self.indices.dtype, copy=False)
adj.indptr = adj.indptr.astype(self.indptr.dtype, copy=False)
return adj
class GraphSampler:
"""
This is the sampler super-class. Any shallow sampler is supposed to perform
the following meta-steps:
1. [optional] Preprocessing: e.g., for PPR sampler, we need to calculate the
PPR vector for each node in the training graph. This is to be performed
only once.
==> Need to override the `preproc()` in sub-class
2. Parallel sampling: launch a batch of graph samplers in parallel and sample
subgraphs independently. For efficiency, the actual sampling operation
happen in C++. And the classes here is mainly just a wrapper.
==> Need to set self.para_sampler to the appropriate C++ sampler
in `__init__()` of the sampler sub-class
3. Post-processing: upon getting the sampled subgraphs, we need to prepare the
appropriate information (e.g., subgraph adj with renamed indices) to
enable the PyTorch trainer. Also, we need to do data conversion from C++
to Python (or, mostly numpy). Post-processing is handled via PyBind11.
"""
def __init__(self, adj, node_target, aug_feat, args_preproc):
"""
Inputs:
adj scipy sparse CSR matrix of the training graph
node_target 1D np array storing the indices of the training nodes
args_preproc dict, addition arguments needed for pre-processing
Outputs:
None
"""
self.adj = adj
self.node_target = np.unique(node_target)
self.aug_feat = aug_feat
# size in terms of number of vertices in subgraph
self.name_sampler = "None"
self.node_subgraph = None
self.preproc(**args_preproc)
def preproc(self, **kwargs):
raise NotImplementedError
def par_sample(self, **kwargs):
return self.para_sampler.par_sample()
def helper_extract_subgraph(self, node_ids, target_ids=None):
"""
Used for serial Python sampler (not for the parallel C++ sampler).
Return adj of node-induced subgraph and other corresponding data struct.
Inputs:
node_ids 1D np array, each element is the ID in the original
training graph.
Outputs:
indptr np array, indptr of the subg adj CSR
indices np array, indices of the subg adj CSR
data np array, data of the subg adj CSR. Since we have aggregator
normalization, we can simply set all data values to be 1
subg_nodes np array, i-th element stores the node ID of the original graph
for the i-th node in the subgraph. Used to index the full feats
and label matrices.
subg_edge_index np array, i-th element stores the edge ID of the original graph
for the i-th edge in the subgraph. Used to index the full array
of aggregation normalization.
"""
# Let n = num subg nodes; m = num subg edges
node_ids = np.unique(node_ids)
node_ids.sort()
orig2subg = {n: i for i, n in enumerate(node_ids)}
n = node_ids.size
indptr = np.zeros(node_ids.size + 1)
indices = []
subg_edge_index = []
subg_nodes = node_ids
for nid in node_ids:
idx_s, idx_e = self.adj.indptr[nid], self.adj.indptr[nid + 1]
neighs = self.adj.indices[idx_s : idx_e]
for i_n, n in enumerate(neighs):
if n in orig2subg:
indices.append(orig2subg[n])
indptr[orig2subg[nid] + 1] += 1
subg_edge_index.append(idx_s + i_n)
indptr = indptr.cumsum().astype(np.int64)
indices = np.array(indices)
subg_edge_index = np.array(subg_edge_index)
data = np.ones(indices.size)
assert indptr[-1] == indices.size == subg_edge_index.size
if target_ids is not None:
return indptr, indices, data, subg_nodes, subg_edge_index,\
np.array([orig2subg[t] for t in target_ids])
else:
return indptr, indices, data, subg_nodes, subg_edge_index
class NodeIIDBase(GraphSampler):
def __init__(self, adj, node_target, aug_feat):
self.name = 'nodeIID'
super().__init__(adj, node_target, aug_feat, {})
def preproc(self, **kwargs):
pass
class KHopSamplingBase(GraphSampler):
"""
The sampler performs k-hop sampling, by following the steps:
1. Randomly pick `size_root` number of root nodes from all training nodes;
2. Sample hop-`k` neighborhood from the roots. A node at hop-i will fanout to
at most `budget` nodes at hop-(i+1)
3. Generate node-induced subgraph from the nodes touched by the random walk.
If budget == -1, then we will expand all hop-(i+1) neighbors without any subsampling
"""
def __init__(self, adj, node_target, aug_feat, size_root, depth, budget):
"""
Inputs:
adj see super-class
node_target see super-class
size_root int, number of root nodes randomly picked
depth int, number of hops to expand
budget int, number of hop-(i+1) neighbors to expand
Outputs:
None
"""
self.size_root = size_root
self.depth = depth
self.budget = budget
self.name = "khop"
super().__init__(adj, node_target, aug_feat, {})
def preproc(self, **kwargs):
pass
class PPRSamplingBase(GraphSampler):
"""
The sampler performs sampling based on PPR score
"""
def __init__(self, adj, node_target, aug_feat, size_root, k, alpha=0.85, epsilon=1e-5, threshold=0):
"""
Inputs:
adj see super-class
node_target see super-class
size_root int, number of root nodes randomly picked
k int, number of hops to expand
budget int, number of hop-(i+1) neighbors to expand
Outputs:
None
"""
self.size_root = size_root
self.k = | |
import math
from datetime import datetime
import psutil
from cartmigration.libs.utils import *
import sendgrid
from sendgrid.helpers.mail import *
class BaseController:
NEW = 1
RUN = 2
STOP = 3
FINISH = 4
DEV_MODE = True
LIMIT_LINE_ERROR = 200
ACTION_STOP = 1
ACTION_COMPLETED = 2
ACTION_APP_MODE = 3
ACTION_DEMO_ERROR = 4
def __init__(self, data = None):
self._migration_id = data.get('migration_id') if isinstance(data, dict) else None
self.data = data
self.pid = None
self._notice = None
self.router = None
self.source_cart = None
self.target_cart = None
self.test = data.get('test') if isinstance(data, dict) else False
def set_migration_id(self, _migration_id):
self._migration_id = _migration_id
def get_migration_id(self):
return self._migration_id
def set_notice(self, notice):
self._notice = notice
def get_notice(self):
return self._notice
def init_cart(self, new = False):
if self._notice and self.router:
return self
self.router = get_model('basecart')
getattr(self.router, 'set_is_test')(self.test)
if not self._migration_id or new:
if self._migration_id:
getattr(self.router, 'set_migration_id')(self._migration_id)
self._notice = getattr(self.router, 'get_default_notice')()
else:
getattr(self.router, 'set_migration_id')(self._migration_id)
if not self._notice:
self._notice = getattr(self.router, 'init_notice')()
getattr(self.router, 'set_notice')(self._notice)
# self.source_cart = self.get_source_cart()
# self.target_cart = self.get_target_cart()
return self
def delete_notice(self):
# router = get_model('migration')
delete = getattr(self.get_router(), 'delete_migration_notice')(self._migration_id)
if delete:
self._notice = None
return delete
def update_notice(self, _migration_id, notice = None, pid = None, mode = None, status = None, finish = False):
# router = get_model('migration')
return getattr(self.get_router(), 'update_notice')(_migration_id, notice, pid , mode, status, finish)
def get_router(self):
if self.router:
return self.router
self.init_cart()
return self.router
def reset_cart(self):
self.source_cart = None
self.target_cart = None
self.get_source_cart()
self.get_target_cart()
def get_source_cart(self):
if self.source_cart:
return self.source_cart
source_cart_type = self._notice['src']['cart_type']
target_cart_type = self._notice['target']['cart_type']
special_type = source_cart_type == target_cart_type
cart_version = self._notice['src']['config']['version']
cart_name = getattr(self.router, 'get_cart')(source_cart_type, cart_version, special_type)
self.source_cart = get_model(cart_name)
if not self.source_cart:
return None
getattr(self.source_cart, 'set_migration_id')(self._migration_id)
getattr(self.source_cart, 'set_type')('src')
getattr(self.source_cart, 'set_notice')(self._notice)
getattr(self.source_cart, 'set_db')(getattr(self.router, 'get_db')())
getattr(self.source_cart, 'set_is_test')(self.test)
return self.source_cart
def get_target_cart(self):
# cart_custom_name = getattr(basecart, 'get_target_custom_cart')(self._migration_id)
# target_cart = get_model(cart_custom_name)
if self.target_cart:
return self.target_cart
source_cart_type = self._notice['src']['cart_type']
target_cart_type = self._notice['target']['cart_type']
special_type = source_cart_type == target_cart_type
cart_version = self._notice['target']['config']['version']
cart_name = getattr(self.get_router(), 'get_cart')(target_cart_type, cart_version, special_type)
self.target_cart = get_model(cart_name)
if not self.target_cart:
return None
getattr(self.target_cart, 'set_type')('target')
getattr(self.target_cart, 'set_migration_id')(self._migration_id)
getattr(self.target_cart, 'set_notice')(self._notice)
getattr(self.target_cart, 'set_db')(getattr(self.router, 'get_db')())
getattr(self.target_cart, 'set_is_test')(self.test)
return self.target_cart
def get_target_cart_name(self):
source_cart_type = self._notice['src']['cart_type']
target_cart_type = self._notice['target']['cart_type']
check = False
if (source_cart_type == 'magento') and (target_cart_type == 'magento'):
check = True
cart_version = self._notice['target']['config']['version']
cart_name = getattr(self.get_router(), 'get_cart')(target_cart_type, cart_version, check)
return cart_name
def get_source_cart_name(self):
source_cart_type = self._notice['src']['cart_type']
target_cart_type = self._notice['target']['cart_type']
check = False
if (source_cart_type == 'magento') and (target_cart_type == 'magento'):
check = True
cart_version = self._notice['src']['config']['version']
cart_name = getattr(self.get_router(), 'get_cart')(source_cart_type, cart_version, check)
return cart_name
def save_notice(self, status = None, sv_pid = True, pid = None, clear_entity_warning = False):
notice = self._notice
demo = None
# if 'demo' in notice and notice['demo']:
# demo = 2
if sv_pid:
process_id = pid if pid else self.pid
else:
process_id = None
res = getattr(self.get_router(), 'save_user_notice')(self._migration_id, notice, process_id, demo, status, clear_entity_warning = clear_entity_warning)
return res
def save_migration(self, after_kill = False, kill_all = False, extend_data = dict):
notice = self._notice
data = {
'notice': notice,
'migration_id': self._migration_id
}
if kill_all:
data['status'] = STATUS_KILL
data['pid'] = None
if after_kill:
data['status'] = STATUS_STOP
data['pid'] = None
if extend_data and isinstance(extend_data, dict):
for extend_key, extend_value in extend_data.items():
if extend_key not in data:
data[extend_key] = extend_value
res = getattr(self.get_router(), 'save_migration')(self._migration_id, data)
return res
def clear_stop_flag(self):
self.init_cart()
return getattr(self.router, 'clear_stop_flag')(self._migration_id)
def get_user_notice(self):
getattr(self.get_router(), 'set_migration_id')(self._migration_id)
notice = getattr(self.get_router(), 'get_migration_notice')(self._migration_id)
return notice
def save_recent(self):
return getattr(self.get_router(), 'save_recent')(self._migration_id, self._notice)
def default_result_migration(self):
return {
'result': '',
'msg': '',
'process': {
'next': '',
'total': 0,
'imported': 0,
'error': 0,
'point': 0,
}
}
def get_process_migration(self, notice, con):
notice = getattr(self.get_router(), 'get_migration_notice')(notice['setting']['migration_id'])
send_data_socket(notice, con)
def get_info_migration_id(self, user_migration_id):
# cart = get_model('basecart')
return getattr(self.get_router(), 'get_info_migration')(user_migration_id)
def check_migration_id(self, user_migration_id):
cart = get_model('basecart')
check_migration_id = getattr(self.get_router(), 'check_migration_id')(user_migration_id)
if not check_migration_id:
return response_error()
return check_migration_id
def log(self, msg, type_log = 'exceptions'):
log(msg, self._migration_id, type_log)
if type_log not in ['process', 'time_requests', 'time_images']:
path = BASE_DIR + '/log'
if self._migration_id:
migration_id = to_str(self._migration_id)
path = DIR_PROCESS + migration_id + '/' + path
if os.path.isfile(path+'/exceptions_top.log'):
os.remove(path+'/exceptions_top.log')
log(msg, self._migration_id, 'exceptions_top')
def log_traceback(self, type_error = 'exceptions', entity_id = None):
error = traceback.format_exc()
if entity_id:
error = type_error + ' ' + to_str(entity_id) + ': ' + error
self.log(error, type_error)
def setup_source_cart(self, cart_type = None):
# cart = get_model('basecart')
if not cart_type:
cart_type = self.get_first_source_cart_type()
setup_type = getattr(self.get_router(), 'source_cart_setup')(cart_type)
view_path = 'templates.migration.source.' + setup_type
support_info = 'templates.migration.source.support.info'
return {
'setup_type': setup_type,
'cart_type': cart_type,
'view_path': view_path,
'info': support_info,
}
def setup_target_cart(self, cart_type = None):
# cart = get_model('basecart')
if not cart_type:
cart_type = self.get_first_target_cart_type()
setup_type = getattr(self.get_router(), 'target_cart_setup')(cart_type)
view_path = 'templates.migration.target.' + setup_type
support_info = 'templates.migration.target.support.info'
return {
'setup_type': setup_type,
'cart_type': cart_type,
'view_path': view_path,
'info': support_info,
}
def get_first_source_cart_type(self):
source_cart_type = get_model('type')
lists = getattr(source_cart_type, 'source_cart')()
first_cart = ''
for cart_type, label in lists.items():
first_cart = cart_type
break
return first_cart
def get_first_target_cart_type(self):
target_cart_type = get_model('type')
lists = getattr(target_cart_type, 'target_cart')()
first_cart = ''
for cart_type, label in lists.items():
first_cart = cart_type
break
return first_cart
def get_migration_info(self, data):
self.set_migration_id(data['migration_id'])
self._notice = None
self.init_cart()
notice_clone = self._notice.copy()
response_from_subprocess(notice_clone)
return
def update_migration_info(self, data):
if not data:
response_from_subprocess(response_success())
self.set_migration_id(data['migration_id'])
self._notice = None
self.init_cart()
cart_filter_keys = ['cart_type', 'cart_url', 'token', 'api', 'database']
filter_keys = ['mode', 'status']
for cart_key in ['src', ['target']]:
for filter_key in cart_filter_keys:
if data.get(filter_key):
self._notice[cart_key][filter_key] = data[filter_key]
extend_data = dict()
for filter_key in filter_keys:
if data.get(filter_key):
if filter_key in self._notice:
self._notice[filter_key] = data[filter_key]
extend_data[filter_key] = data[filter_key]
update = self.save_migration(extend_data = extend_data)
return update
def get_migration_history(self, data):
migration = get_model('migration')
history = getattr(migration, 'get_migration_history')(data['migration_id'])
response_from_subprocess(history)
return
def get_file(self, migration_id, path_file = 'exceptions_top', is_limit = True, limit_line = None):
if migration_id:
log_file = get_pub_path() + '/log/' + to_str(migration_id) + '/' + path_file + '.log'
else:
log_file = get_pub_path() + '/log/' + path_file + '.log'
lines = list()
_limit = to_int(limit_line if limit_line else self.LIMIT_LINE_ERROR)
if os.path.isfile(log_file):
file_handle = open(log_file, "r")
line_lists = file_handle.readlines()
file_handle.close()
if (not is_limit) or (to_len(line_lists) <= _limit):
lines = line_lists
else:
index = 0 - _limit
while index <= -1:
lines.append(line_lists[index])
index += 1
return lines
def get_process_log(self, data):
response_from_subprocess(list(reversed(self.get_file(data.get('migration_id'), 'process'))))
return
def get_error_entity(self, data):
if not data['type']:
return list()
_type = data['type'] + '_errors'
response_from_subprocess(self.get_file(data.get('migration_id'), _type))
return
def get_errors(self, data):
response_from_subprocess(self.get_file(data.get('migration_id'), 'exceptions_top'))
return
def get_exceptions(self, data):
response_from_subprocess(self.get_file(data.get('migration_id'), 'exceptions'))
return
def client_get_list_migration(self, data):
cart = get_model('basecart')
notice = getattr(cart, 'get_list_migration')(data['user_id'], data['page'], data['limit'])
if notice['result'] == 'success':
return notice
return None
def client_new_migration(self, data):
migration = get_model('migration')
return getattr(migration, 'new_migration')(data['user_id'])
def change_source(self, data):
self.init_cart()
cart_type = data.get('source_cart_type')
self._notice['src']['cart_type'] = cart_type
setup_source_cart = self.setup_source_cart(cart_type)
response_from_subprocess({
'result': 'show',
'html': setup_source_cart['view_path'],
'show_next': False if setup_source_cart['setup_type'] == 'file' else True,
'info': setup_source_cart['info'],
'setup_type': setup_source_cart['setup_type'],
'notice': self._notice
})
def change_target(self, data):
self.init_cart()
cart_type = data.get('target_cart_type')
self._notice['target']['cart_type'] = cart_type
setup_target_cart = self.setup_target_cart(cart_type)
response_from_subprocess({
'result': 'show',
'html': setup_target_cart['view_path'],
'show_next': False if setup_target_cart['setup_type'] == 'file' else True,
'info': setup_target_cart['info'],
'setup_type': setup_target_cart['setup_type'],
'notice': self._notice
})
def client_setup_cart(self, data):
self._migration_id = data['migration_id']
self.init_cart()
self._notice['src']['cart_type'] = data.get('source_cart_type')
self._notice['src']['cart_url'] = data.get('source_cart_url')
self._notice['target']['cart_url'] = data.get('target_cart_url')
self._notice['target']['cart_type'] = data.get('target_cart_type')
# clone_code_for_migration_id(self._migration_id)
buffer = dict()
buffer['controller'] = 'migration'
buffer['action'] = 'setup_cart'
buffer['data'] = data
setup_cart = start_subprocess(self._migration_id, buffer, True)
# subprocess.call(['python3', get_root_path() + '/' + DIR_PROCESS + '/' + self._migration_id + '/bootstrap.py', json.dumps(buffer)])
# self._notice = self.client_get_migration_info(data)
# notice = self._notice.copy()
# self._notice['response'] = dict()
# self.save_notice(None, False)
return setup_cart
def client_config(self, data):
self._migration_id = data['migration_id']
self.init_cart()
buffer = dict()
buffer['controller'] = 'migration'
buffer['action'] = 'config'
buffer['data'] = data
config = start_subprocess(self._migration_id, buffer, True)
# subprocess.call(['python3', get_root_path() + '/' + DIR_PROCESS + '/' + self._migration_id + '/bootstrap.py', json.dumps(buffer)])
# self._notice = self.client_get_migration_info(data)
# notice = self._notice.copy()
# self._notice['response'] = dict()
# self.save_notice(None, False)
return config
def kill_end_loop_migration(self, data):
migration_id = data.get('migration_id')
self.init_cart()
stop = getattr(self.get_router(), 'set_flag_stop')(migration_id)
if stop['result'] != 'success':
response_from_subprocess(response_error("Don't stop"))
return
else:
response_from_subprocess(response_success())
return
def get_cart_type(self, data):
cart_type = data['type']
model_type = get_model('type')
all_cart_type = getattr(model_type, cart_type + '_cart')()
return all_cart_type
def get_cart_setup(self, data):
cart_type = data['type']
if cart_type != 'target':
cart_type = 'source'
model = get_model('basecart')
cart_setup = getattr(model, cart_type + '_cart_setup')(data.get('cart_type'))
return cart_setup
def kill_all_process(self, data):
server_id = data['server_id']
migration_model = get_model('migration')
list_migration = getattr(migration_model, 'get_list_migration_run')(server_id)
if list_migration['result'] == 'success':
for migration in list_migration['data']:
getattr(migration_model, 'set_flag_stop')(migration['migration_id'])
# pid = to_int(migration['pid'])
# retry = 5
# while check_pid(pid) and retry > 0:
# subprocess.call(['kill', '-9', to_str(pid)])
# retry -= 1
getattr(migration_model, 'set_status_migration')(migration['migration_id'], STATUS_KILL)
response_from_subprocess(True)
def kill_migration(self, data, conn = True):
# cart = get_model('basecart')
info_migration_id = getattr(self.get_router(), 'get_info_migration')(data['migration_id'])
if not info_migration_id or not info_migration_id['pid']:
if conn:
response_from_subprocess(response_success())
return
else:
return response_success()
pid = to_int(info_migration_id['pid'])
retry = 5
while check_pid(pid) and retry > 0:
subprocess.call(['kill', '-9', to_str(pid)])
retry -= 1
# if check_pid(pid):
# if conn:
# response_from_subprocess(response_error("Don't kill"))
# return
# else:
# return response_error("Don't kill")
# else:
self._notice = json_decode(info_migration_id['notice'])
self.init_cart()
self.save_migration(True)
if conn:
response_from_subprocess(response_success())
return
else:
return response_success()
def check_run(self, data, conn = True):
cart = get_model('basecart')
info_migration_id = getattr(cart, 'get_info_migration')(data['migration_id'])
if not info_migration_id or not info_migration_id['pid']:
if conn:
response_from_subprocess(False)
return
else:
return False
pid = to_int(info_migration_id['pid'])
if check_pid(pid) and to_int(info_migration_id['status']) == STATUS_RUN:
if conn:
response_from_subprocess(True)
return
else:
return True
if conn:
response_from_subprocess(False)
return
return False
def check_custom(self, data):
migration_id = data['migration_id']
check = check_folder_clone(migration_id)
response_from_subprocess(response_success(check))
return
def save_history(self, data):
self._migration_id = data['migration_id']
getattr(self.get_router(), 'save_migration_history')()
response_from_subprocess(True)
def client_get_file_info(self, data):
buffer = dict()
buffer['controller'] = 'migration'
buffer['action'] = 'get_file_info'
buffer['data'] = data
file_info = start_subprocess(self._migration_id, buffer, True)
return file_info
def get_average(self, data):
return round(sum(data) / to_len(data), 1)
def get_server_status(self, data):
cpu_percent = []
memory_percent = []
disk_usage_percent = []
readio_mps = []
writeio_mps = []
new_info = 0
for x in range(10):
cpu_percent.append(psutil.cpu_percent(interval = 0.2))
memory_percent.append(psutil.virtual_memory().percent)
disk_usage_percent.append(psutil.disk_usage('/')[3])
if x == 0:
new_info = psutil.disk_io_counters()
else:
old_info = new_info
new_info = psutil.disk_io_counters()
r = round((new_info.read_bytes - old_info.read_bytes) / 1024 ** 2, 1)
readio_mps.append(r)
w = round((new_info.write_bytes - old_info.write_bytes) / 1024 ** 2, 1)
writeio_mps.append(w)
status = {
"cpu_percent" : self.get_average(cpu_percent),
"memory_percent" : self.get_average(memory_percent),
"disk_usage_percent": self.get_average(disk_usage_percent),
"readio_mps" : self.get_average(readio_mps),
"writeio_mps" : self.get_average(writeio_mps)
}
#get migrations info
migrations = []
for proc in psutil.process_iter():
proc_cmd = proc.cmdline()
if not proc_cmd or 'python' not in proc_cmd[0]:
continue
if len(proc_cmd) > 2 and proc_cmd[0] and proc_cmd[0] == "python3" and proc_cmd[1] and "bootstrap.py" in proc_cmd[1]:
proc_status = {
"pid": proc.pid,
"cpu_percent": proc.cpu_percent(interval = 0.2),
"memory_info": str(math.ceil(proc.memory_info().rss / (1024 * 1024))) + "M",
"create_time": datetime.fromtimestamp(proc.create_time()).strftime("%Y-%m-%d %H:%M:%S"),
"path": proc_cmd[1],
"migration_info": json_decode(proc_cmd[2])
}
# try:
# migration_info = json.loads(proc_cmd[2])
# proc_status["migration_id"] = migration_info.get("data", dict()).get("migration_id")
# except Exception as e:
# proc_status["migration_id"] = ""
migrations.append(proc_status)
status["processes"] = migrations
if data and isinstance(data, dict) and data.get('default'):
migration = get_model('migration')
default_notice = getattr(migration, 'get_default_notice')()
status['default_notice'] = default_notice
response_from_subprocess(status)
return
def restart_migration(self, data):
self.kill_migration(data, False)
buffer = dict()
buffer['controller'] = 'migration'
buffer['action'] = 'start'
buffer['data'] = dict()
buffer['data']['migration_id'] = data['migration_id']
res = response_success()
res['next'] = buffer
# start_subprocess(data['migration_id'], buffer)
response_from_subprocess(res)
return
def client_update_token(self, data):
migration_id = data.get('migration_id')
type_token = data.get('type')
token = data.get('token')
if not migration_id or not type_token or not token:
return True
router = get_model('migration')
getattr(router, 'update_token')(migration_id, type_token, token)
return True
def delete_migration(self, data):
# check_run = self.check_run(data, False)
# if check_run is False:
# response_from_subprocess(False)
# return
self.kill_migration(data, False)
migration_id = data.get('migration_id')
if not migration_id:
response_from_subprocess(True)
return
path = get_pub_path()
path = path.replace('processes', '')
path = path.rstrip('/')
path_delete = [
path + '/' + DIR_PROCESS + to_str(migration_id),
path + '/log/' + to_str(migration_id),
path + '/uploads/' + to_str(migration_id)
]
for path in path_delete:
if os.path.isdir(path):
shutil.rmtree(path)
response_from_subprocess(True)
return
def reset_migration(self, data):
kill_process = self.kill_migration(data, False)
if kill_process['result'] != 'success':
response_from_subprocess(kill_process)
return
# migration = get_model('migration')
reset = getattr(self.get_router(), 'reset_migration')(data['migration_id'])
if not reset:
response_from_subprocess(response_error("Don't reset"))
return
clear_log(data['migration_id'])
buffer = dict()
buffer['controller'] = 'migration'
buffer['action'] = 'start'
buffer['data'] = dict()
buffer['data']['migration_id'] = data['migration_id']
buffer['data']['test'] = data.get('test')
# start_subprocess(data['migration_id'], buffer)
res = response_success()
res['next'] = buffer
response_from_subprocess(res)
# subprocess.Popen(['python3', get_root_path() + '/' + | |
# plot fits
frame.scatter(fp_m, fp_dopd, label='Measured')
frame.plot(np.sort(fp_m), fp_dopd_fit, label='fit', color='red')
# set title labels limits
title = 'FP cavity width offset'
frame.set(xlabel='FP peak number',
ylabel='Local cavity width offset [micron]',
title=title)
# Add legend
frame.legend(loc=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_wave_fp_wave_res(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
llprops = kwargs['llprops']
# get data from llprops
fp_ll = llprops['FP_LL_POS']
fp_ll_new = llprops['FP_LL_POS_NEW']
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
# plot fits
frame.scatter(fp_ll, fp_ll - fp_ll_new)
# set title labels limits
title = 'FP lines wavelength residuals'
frame.set(xlabel='Initial wavelength [nm]',
ylabel='New - Initial wavelength [nm]', title=title)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_wave_fp_m_x_res(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
fp_order = kwargs['fp_order']
fp_xx = kwargs['fp_xx']
m_vec = kwargs['m_vec']
xm_mask = kwargs['xm_mask']
coeff_xm_all = kwargs['coeff_xm_all']
n_init = kwargs['n_init']
n_fin = kwargs['n_fin']
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
for ord_num in range(n_fin - n_init):
# create order mask
ind_ord = np.where(np.concatenate(fp_order).ravel() == ord_num + n_init)
# get FP line pixel positions for the order
fp_x_ord = fp_xx[ord_num]
# get FP line numbers for the order
m_ord = m_vec[ind_ord]
# get m(x) mask for the order
mask = xm_mask[ord_num]
# get coefficients for the order
coeff_xm = coeff_xm_all[ord_num]
# plot residuals
frame.plot(fp_x_ord[mask], m_ord[mask] -
np.polyval(coeff_xm, fp_x_ord[mask]) + 0.01 * ord_num, '.')
frame.set(xlabel='FP pixel position',
ylabel='m(x) residuals (shifted +0.01*Order)')
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_wave_fp_ipt_cwid_1mhc(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
one_m_d = kwargs['one_m_d']
d_arr = kwargs['d_arr']
m_init = kwargs['m_init']
fit_1m_d_func = kwargs['fit_1m_d_func']
res_d_final = kwargs['res_d_final']
dopd0 = kwargs['dopd0']
# ------------------------------------------------------------------
# set up plot
gs = dict(height_ratios=[2, 1])
fig, frames = graph.set_figure(plotter, nrows=2, ncols=1,
gridspec_kw=gs, sharex=True)
frame1, frame2 = frames
# ------------------------------------------------------------------
# plot values
frame1.plot(one_m_d, d_arr, marker='.')
# plot initial cavity width value
frame1.hlines(dopd0 / 2., min(one_m_d), max(one_m_d), label='original d')
# plot reference peak of reddest order
frame1.plot(1. / m_init, dopd0 / 2., 'D')
# plot fit
frame1.plot(one_m_d, fit_1m_d_func(one_m_d), label='polynomial fit')
# plot residuals - separate subplot
frame2.plot(one_m_d, res_d_final, '.')
# set labels
frame1.set(ylabel='cavity width d')
frame2.set(xlabel='1/m', ylabel='residuals [nm]')
# plot legend
frame1.legend(loc='best')
# add title
fig.suptitle('Interpolated cavity width vs 1/m for HC lines')
# ------------------------------------------------------------------
# adjust plot
fig.subplots_adjust(top=0.9, bottom=0.1, left=0.05, right=0.95,
hspace=0, wspace=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_wave_fp_ipt_cwid_llhc(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
hc_ll = kwargs['hc_ll']
fp_ll = kwargs['fp_ll']
fitval = kwargs['fitval']
d_arr = kwargs['d_arr']
dopd0 = kwargs['dopd0']
fiber = kwargs['fiber']
# ------------------------------------------------------------------
# set up plot
gs = dict(height_ratios=[2, 1])
fig, frames = graph.set_figure(plotter, nrows=2, ncols=1,
gridspec_kw=gs, sharex=True)
frame1, frame2 = frames
# ------------------------------------------------------------------
frame1.plot(hc_ll, d_arr, '.')
# plot initial cavity width value
frame1.hlines(dopd0 / 2., min(hc_ll), max(hc_ll), label='original d')
# plot reference peak of reddest order
frame1.plot(fp_ll[-1][-1], dopd0 / 2., 'D')
# plot fit
frame1.plot(hc_ll, fitval, label='polynomial fit')
# plot residuals - separate subplot
frame2.plot(hc_ll, d_arr - fitval, '.')
# set labels
frame1.set(ylabel='cavity width d')
frame2.set(xlabel='wavelength', ylabel='residuals [nm]')
# plot legend
frame1.legend(loc='best')
# add title
fig.suptitle('Interpolated cavity width vs wavelength for HC lines. '
'Fiber={0}'.format(fiber))
# ------------------------------------------------------------------
# adjust plot
fig.subplots_adjust(top=0.9, bottom=0.1, left=0.05, right=0.95,
hspace=0, wspace=0)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_wave_fp_ll_diff(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
cm = plotter.matplotlib.cm
# ------------------------------------------------------------------
# get the arguments from kwargs
llprops = kwargs['llprops']
n_init = kwargs['n_init']
n_fin = kwargs['n_fin']
# get data from llprops
poly_wave_sol = llprops['POLY_WAVE_SOL']
fp_ord_new = llprops['FP_ORD_NEW']
fp_xx_new = llprops['FP_XX_NEW']
fp_ll_new = llprops['FP_LL_NEW']
# ------------------------------------------------------------------
# get colours
# noinspection PyUnresolvedReferences
col = cm.rainbow(np.linspace(0, 1, n_fin))
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
# loop through the orders
for ind_ord in range(n_fin - n_init):
# get parameters for initial wavelength solution
c_aux = np.poly1d(poly_wave_sol[ind_ord + n_init][::-1])
# order mask
ord_mask = np.where(fp_ord_new == ind_ord + n_init)
# get FP line pixel positions for the order
fp_x_ord = fp_xx_new[ord_mask]
# derive FP line wavelengths using initial solution
fp_ll_orig = c_aux(fp_x_ord)
# get new FP line wavelengths for the order
fp_ll_new_ord = fp_ll_new[ord_mask]
# plot old-new wavelengths
frame.plot(fp_x_ord, fp_ll_orig - fp_ll_new_ord + 0.001 * ind_ord,
marker='.', color=col[ind_ord],
label='order ' + str(ind_ord))
# define labels
ylabel = ('FP old-new wavelength difference [nm] '
'(shifted +0.001 per order)')
frame.set(xlabel='FP peak position [pix]', ylabel=ylabel)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_wave_fp_multi_order(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
params = kwargs['params']
hc_ll = kwargs['hc_ll']
hc_ord = kwargs['hc_ord']
hcdata = kwargs['hcdata']
wave_map = kwargs['wave']
n_plot_init = kwargs['init']
n_fin = kwargs['fin']
nbo = kwargs['nbo']
# compute final plotting order
n_plot_fin = np.min([n_plot_init + nbo, n_fin])
# ------------------------------------------------------------------
# deal with plot style
if 'dark' in params['DRS_PLOT_STYLE']:
black = 'white'
else:
black = 'black'
# define colours and line types for alternate order fitted lines
col = [black, 'grey']
lty = ['--', ':']
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
for order_num in range(n_plot_init, n_plot_fin):
# select lines for the order
hc_ll_plot = hc_ll[hc_ord == order_num]
# get colour and style from order parity
col_plot = col[np.mod(order_num, 2)]
lty_plot = lty[np.mod(order_num, 2)]
# log hc data
with warnings.catch_warnings(record=True) as _:
loghcdata = np.log10(hcdata[order_num])
# plot hc spectra
frame.plot(wave_map[order_num], loghcdata)
# plot used HC lines
frame.vlines(hc_ll_plot, 0, np.nanmax(loghcdata),
color=col_plot, linestyles=lty_plot)
# set axis labels
frame.set(xlabel='Wavelength [nm]', ylabel='log_{10}(Normalised flux)',
title='HC spectra + used HC lines')
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_wave_fp_single_order(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
llprops = kwargs['llprops']
order = kwargs.get('order', None)
hcdata = kwargs['hcdata']
# get data from llprops
all_lines = llprops['ALL_LINES_1']
wavemap = llprops['LL_OUT_2']
# get number of orders
nbo = llprops['LL_OUT_2'].shape[0]
# ------------------------------------------------------------------
# get order generator
if order is None:
order_gen = plotter.plotloop(np.arange(nbo).astype(int))
# prompt to start looper
plotter.close_plots(loop=True)
else:
order_gen = [order]
# ------------------------------------------------------------------
# loop around orders
for order_num in order_gen:
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
# get the maximum point for this order
maxpoint = mp.nanmax(hcdata[order_num])
# plot order and flux
frame.plot(wavemap[order_num], hcdata[order_num], label='HC Spectrum')
# loop around lines in order
for it in range(0, len(all_lines[order_num])):
# get x and y
x = all_lines[order_num][it][0] + all_lines[order_num][it][3]
ymaxi = all_lines[order_num][it][2]
# log ydata
with warnings.catch_warnings(record=True) as _:
logydata = np.log10(ymaxi)
# plot lines to their corresponding amplitude
frame.vlines(x, 0, logydata, color='m', label='fitted lines')
# plot lines to the top of the figure
frame.vlines(x, 0, np.log10(maxpoint), color='gray',
linestyles='dotted')
# plot
ulegend(frame, plotter, loc=0)
# set limits and title
title = 'Order {0}'.format(order_num)
frame.set(xlabel='Wavelength', ylabel='Flux', title=title)
# ------------------------------------------------------------------
# update filename (adding order_num to end)
suffix = 'order{0}'.format(order_num)
graph.set_filename(plotter.params, plotter.location, suffix=suffix)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_waveref_expected(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# ------------------------------------------------------------------
# get the arguments from kwargs
orders = kwargs['orders']
wavemap = kwargs['wavemap']
diff = kwargs['diff']
fiber = kwargs['fiber']
fibtype = kwargs['fibtype']
nbo = kwargs['nbo']
iteration = kwargs.get('iteration', None)
# ------------------------------------------------------------------
# set up plot
fig, frame = graph.set_figure(plotter, nrows=1, ncols=1)
# ------------------------------------------------------------------
for order_num in range(nbo):
# get order mask
omask = order_num == orders
# plot points
frame.scatter(wavemap[omask], diff[omask], s=5)
# add title (with or without iteration)
if iteration is not None:
if isinstance(iteration, int) or isinstance(iteration, float):
title = 'Pixel | |
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import base64
import ipaddress
import threading
import sys
import signal
import os
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_der_x509_certificate
from keylime.common import validators
from keylime.db.registrar_db import RegistrarMain
from keylime.db.keylime_db import DBEngineManager, SessionManager
from keylime import config
from keylime import crypto
from keylime import json
from keylime.tpm import tpm2_objects
from keylime import keylime_logging
from keylime.tpm.tpm_main import tpm
from keylime import api_version as keylime_api_version
from keylime import web_util
logger = keylime_logging.init_logging('registrar')
try:
engine = DBEngineManager().make_engine('registrar')
except SQLAlchemyError as err:
logger.error('Error creating SQL engine: %s', err)
sys.exit(1)
class ProtectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
web_util.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
web_util.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""This method handles the GET requests to retrieve status on agents from the Registrar Server.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned.
"""
session = SessionManager().make_session(engine)
rest_params = web_util.get_restful_params(self.path)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
# If the agent ID is not valid (wrong set of characters),
# just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("GET received an invalid agent ID: %s", agent_id)
return
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is None:
web_util.echo_json_response(self, 404, "agent_id not found")
logger.warning('GET returning 404 response. agent_id %s not found.', agent_id)
return
if not bool(agent.active):
web_util.echo_json_response(self, 404, "agent_id not yet active")
logger.warning('GET returning 404 response. agent_id %s not yet active.', agent_id)
return
response = {
'aik_tpm': agent.aik_tpm,
'ek_tpm': agent.ek_tpm,
'ekcert': agent.ekcert,
'mtls_cert': agent.mtls_cert,
'ip': agent.ip,
'port': agent.port,
'regcount': agent.regcount,
}
if agent.virtual:
response['provider_keys'] = agent.provider_keys
web_util.echo_json_response(self, 200, "Success", response)
logger.info('GET returning 200 response for agent_id: %s', agent_id)
else:
# return the available registered uuids from the DB
json_response = session.query(RegistrarMain.agent_id).all()
return_response = [item[0] for item in json_response]
web_util.echo_json_response(self, 200, "Success", {
'uuids': return_response})
logger.info('GET returning 200 response for agent_id list')
return
def do_POST(self):
"""POST not supported"""
web_util.echo_json_response(
self, 405, "POST not supported via TLS interface")
def do_PUT(self):
"""PUT not supported"""
web_util.echo_json_response(
self, 405, "PUT not supported via TLS interface")
def do_DELETE(self):
"""This method handles the DELETE requests to remove agents from the Registrar Server.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
session = SessionManager().make_session(engine)
rest_params = web_util.get_restful_params(self.path)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "URI not supported")
logger.warning('DELETE agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
# If the agent ID is not valid (wrong set of characters),
# just do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent_id not not valid")
logger.error("DELETE received an invalid agent ID: %s", agent_id)
return
if session.query(RegistrarMain).filter_by(agent_id=agent_id).delete():
# send response
try:
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
web_util.echo_json_response(self, 200, "Success")
return
# send response
web_util.echo_json_response(self, 404)
return
web_util.echo_json_response(self, 404)
# pylint: disable=W0622
def log_message(self, format, *args):
return
class UnprotectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
web_util.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
web_util.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""This method handles the GET requests to the unprotected side of the Registrar Server
Currently the only supported path is /versions which shows the supported API versions
"""
rest_params = web_util.get_restful_params(self.path)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /version/ interface")
return
if "version" not in rest_params:
web_util.echo_json_response(self, 400, "URI not supported")
logger.warning('GET agent returning 400 response. URI not supported: %s', self.path)
return
version_info = {
"current_version": keylime_api_version.current_version(),
"supported_versions": keylime_api_version.all_versions(),
}
web_util.echo_json_response(self, 200, "Success", version_info)
def do_POST(self):
"""This method handles the POST requests to add agents to the Registrar Server.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's
will return errors. POST requests require an an agent_id identifying the agent to add, and json
block sent in the body with 2 entries: ek and aik.
"""
session = SessionManager().make_session(engine)
rest_params = web_util.get_restful_params(self.path)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
web_util.echo_json_response(self, 400, "uri not supported")
logger.warning('POST agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
web_util.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('POST agent returning 400 response. agent id not found in uri %s', self.path)
return
# If the agent ID is not valid (wrong set of characters), just
# do nothing.
if not validators.valid_agent_id(agent_id):
web_util.echo_json_response(self, 400, "agent id not valid")
logger.error("POST received an invalid agent ID: %s", agent_id)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
web_util.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('POST for %s returning 400 response. Expected non zero content length.', agent_id)
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
ekcert = json_body['ekcert']
aik_tpm = json_body['aik_tpm']
initialize_tpm = tpm()
if ekcert is None or ekcert == 'emulator':
logger.warning('Agent %s did not submit an ekcert', agent_id)
ek_tpm = json_body['ek_tpm']
else:
if 'ek_tpm' in json_body:
# This would mean the agent submitted both a non-None ekcert, *and*
# an ek_tpm... We can deal with it by just ignoring the ek_tpm they sent
logger.warning('Overriding ek_tpm for agent %s from ekcert', agent_id)
# If there's an EKCert, we just overwrite their ek_tpm
# Note, we don't validate the EKCert here, other than the implicit
# "is it a valid x509 cert" check. So it's still untrusted.
# This will be validated by the tenant.
ek509 = load_der_x509_certificate(
base64.b64decode(ekcert),
backend=default_backend(),
)
ek_tpm = base64.b64encode(
tpm2_objects.ek_low_tpm2b_public_from_pubkey(
ek509.public_key(),
)
).decode()
aik_attrs = tpm2_objects.get_tpm2b_public_object_attributes(
base64.b64decode(aik_tpm),
)
if aik_attrs != tpm2_objects.AK_EXPECTED_ATTRS:
web_util.echo_json_response(
self, 400, "Invalid AK attributes")
logger.warning(
"Agent %s submitted AIK with invalid attributes! %s (provided) != %s (expected)",
agent_id,
tpm2_objects.object_attributes_description(aik_attrs),
tpm2_objects.object_attributes_description(tpm2_objects.AK_EXPECTED_ATTRS),
)
return
# try to encrypt the AIK
(blob, key) = initialize_tpm.encryptAIK(
agent_id,
base64.b64decode(ek_tpm),
base64.b64decode(aik_tpm),
)
# special behavior if we've registered this uuid before
regcount = 1
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except NoResultFound:
agent = None
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
if agent is not None:
# keep track of how many ek-ekcerts have registered on this uuid
regcount = agent.regcount
if agent.ek_tpm != ek_tpm or agent.ekcert != ekcert:
logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')
regcount += 1
# force overwrite
logger.info('Overwriting previous registration for this UUID.')
try:
session.query(RegistrarMain).filter_by(
agent_id=agent_id).delete()
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
# Check for ip and port
contact_ip = json_body.get('ip', None)
contact_port = json_body.get('port', None)
# Validate ip and port
if contact_ip is not None:
try:
# Use parser from the standard library instead of implementing our own
ipaddress.ip_address(contact_ip)
except ValueError:
logger.warning("Contact ip for agent %s is not a valid ip got: %s.", agent_id, contact_ip)
contact_ip = None
if contact_port is not None:
try:
contact_port = int(contact_port)
if contact_port < 1 or contact_port > 65535:
logger.warning("Contact port for agent %s is not a number between 1 and got: %s.",
agent_id, contact_port)
contact_port = None
except ValueError:
logger.warning("Contact port for agent %s is not a valid number got: %s.",
agent_id, contact_port)
contact_port = None
# Check for mTLS cert
mtls_cert = json_body.get('mtls_cert', None)
if mtls_cert is None:
logger.warning("Agent %s did | |
<filename>python/SDTransferUtility.pyw
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 09 14:57:26 2013
@author: pokeeffe
"""
import os
import os.path as osp
import logging
from sys import argv
from datetime import datetime
from glob import glob
from Tkinter import *
from tkFileDialog import askdirectory, asksaveasfilename
from tkMessageBox import showerror
from ScrolledText import ScrolledText
from ttk import Treeview
try:
# Homepage: https://github.com/ianare/exif-py
from exifread import process_file as get_exif_tags
except ImportError:
Tk().withdraw() # hide default window
msg = ('The module `exifread` is missing. Please install by running "pip '
'install exifread" \nas an Administrator. The source can be found'
' at https://github.com/ianare/exif-py. \n\nPress OK to exit.')
showerror(title='Missing module', message=msg)
exit()
from PIL import Image, ImageTk
from win32file import GetDriveType, DRIVE_REMOVABLE
from definitions.sites import site_list
from definitions.paths import TIMELAPSE_PHOTOS
from version import version as __version__
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
class SDTransferUtility(Frame):
"""GUI program for transferring timelapse images from SD cards"""
def __init__(self, parent=None, search_dir=None, search_str=None):
Frame.__init__(self, parent)
self.pack(padx=5, pady=5, expand=YES, fill=BOTH)
self._prog_title = 'Timelapse Image Transfer Utility'
self._sources = {}
self._log_output = IntVar(value=0)
self._log_fname = StringVar(value='')
self._search_dir = StringVar()
self._search_str = StringVar()
self._preview_img = None
if search_dir: self._search_dir.set(search_dir)
if search_str: self._search_str.set(search_str)
self.master.title(self._prog_title)
self.__gui_setup()
if self._search_dir.get():
self.__enable_eject_btn()
if self._search_str.get():
self.__search()
def __gui_setup(self):
"""Build up GUI from widgets"""
topfrm = Frame(self)
topfrm.pack(expand=YES, fill=BOTH, side=TOP)
Label(topfrm, text=self._prog_title).pack(side=TOP)
out_vpane = PanedWindow(topfrm, orient=VERTICAL, sashrelief=GROOVE)
btm_hpane = PanedWindow(out_vpane, orient=HORIZONTAL, sashrelief=GROOVE)
top_hpane = PanedWindow(out_vpane, orient=HORIZONTAL, sashrelief=GROOVE)
inn_vpane = PanedWindow(top_hpane, orient=VERTICAL, sashrelief=GROOVE)
out_vpane.pack(side=TOP, fill=BOTH, expand=YES, padx=5, pady=5)
btm_hpane.pack(side=TOP, fill=BOTH, expand=YES, padx=5, pady=5)
top_hpane.pack(side=TOP, fill=BOTH, expand=YES, padx=5, pady=5)
inn_vpane.pack(side=TOP, fill=BOTH, expand=YES, padx=5, pady=5)
win_search = self.__gui_search(inn_vpane)
win_results = self.__gui_results(inn_vpane)
win_preview = self.__gui_preview(top_hpane)
win_logger = self.__gui_logger(btm_hpane)
win_buttons = self.__gui_buttons(btm_hpane)
out_vpane.add(btm_hpane)
out_vpane.add(top_hpane, before=btm_hpane)
top_hpane.add(inn_vpane)
inn_vpane.add(win_search)
inn_vpane.add(win_results)
top_hpane.add(win_preview)
btm_hpane.add(win_logger)
btm_hpane.add(win_buttons)
out_vpane.paneconfigure(top_hpane, minsize=100)
out_vpane.paneconfigure(btm_hpane, minsize=100)
top_hpane.paneconfigure(inn_vpane)
inn_vpane.paneconfigure(win_search, padx=5, pady=5, minsize=50)
inn_vpane.paneconfigure(win_results, padx=5, pady=5, minsize=50)
top_hpane.paneconfigure(win_preview, padx=5, pady=5, minsize=300)
btm_hpane.paneconfigure(win_logger, padx=5, pady=5, minsize=300)
btm_hpane.paneconfigure(win_buttons, padx=5, pady=5, minsize=100)
def __gui_search(self, parent=None):
"""Upper pane for entry fields, buttons, so on """
thispane = LabelFrame(parent, padx=5, pady=5, relief=RIDGE,
text='Source Image Search')
row1 = Frame(thispane)
lbl_dir = Label(row1, text='Root search dir.:')
ent_dir = Entry(row1, textvariable=self._search_dir)
btn_dir = Button(row1, text='Browse', command=self.__set_search_dir)
row2 = Frame(thispane)
lbl_find = Label(row2, text='Match pattern:')
ent_find = Entry(row2, textvariable=self._search_str)
btn_find = Button(row2, text='Search', command=self.__search)
lbl_dir.pack(side=LEFT)
btn_dir.pack(side=RIGHT, padx=(5,0))
ent_dir.pack(side=LEFT, expand=YES, fill=X)
row1.pack(side=TOP, expand=NO, fill=X, pady=(0,5))
lbl_find.pack(side=LEFT)
btn_find.pack(side=RIGHT, padx=(5,0))
ent_find.pack(side=LEFT, expand=YES, fill=X)
row2.pack(side=TOP, expand=NO, fill=X)
return thispane
def __gui_results(self, parent=None):
"""Window with tree of files found sorted by directory"""
thispane = LabelFrame(parent, padx=5, pady=5, relief=RIDGE,
text='Search Results')
lbl = Label(thispane, anchor=W,
text='Right-click directory name, then select source site')
lbl.pack(side=TOP, expand=NO, fill=X, pady=(0,5))
self._sourcetree = Treeview(thispane,
columns=('destname'),
selectmode='browse')
self._sourcetree.heading('destname', text='Destination', anchor=W)
self._sourcetree.pack(side=TOP, expand=YES, fill=BOTH)
return thispane
def __gui_preview(self, parent=None):
"""Middle pane interacting with user"""
thispane = LabelFrame(parent, padx=5, pady=5, relief=RIDGE,
text='Image Preview')
self._preview = Button(thispane)
self._preview.pack(side=TOP, expand=YES, fill=BOTH)
return thispane
class __TextLogger(logging.Handler):
"""Tie logger into Tkinter Text object"""
def __init__(self, widget):
logging.Handler.__init__(self)
self.text = widget
def emit(self, record):
self.text.insert(END, record.msg + '\n')
self.text.see(END)
def __gui_logger(self, parent=None):
"""Lower pane with logging output"""
thispane = LabelFrame(parent, padx=5, pady=5, relief=RIDGE,
text='Logging')
hfrm = Frame(thispane)
chb_logging = Checkbutton(hfrm, text='Log output to: ',
variable=self._log_output)
ent_logpath = Entry(hfrm, textvariable=self._log_fname)
btn_browse = Button(hfrm, text='Browse', command=self.__set_logfile)
chb_logging.pack(expand=NO, fill=X, side=LEFT)
ent_logpath.pack(expand=YES, fill=X, side=LEFT)
btn_browse.pack(expand=NO, padx=(5,0), side=RIGHT)
# hfrm.pack(expand=NO, fill=X, side=BOTTOM, pady=(5,0))
self.__logpane = ScrolledText(thispane, height=12)
self.__logpane.pack(expand=YES, fill=BOTH, side=BOTTOM)
## tie into logging
log2pane = self.__TextLogger(self.__logpane)
log2pane.setLevel(logging.INFO)
logger.addHandler(log2pane)
return thispane
def __gui_buttons(self, parent=None):
"""Lower-right pane containing action buttons"""
thispane = Frame(parent)
go_btn = Button(thispane, text='Begin processing',
command=self.__transfer_images,
state=DISABLED)
eject_btn = Button(thispane, text='Eject source dir.',
command=self.__eject_srch_dir)
quit_btn = Button(thispane, text='Exit program',
command=self.__quit)
go_btn.pack(side=TOP)
quit_btn.pack(side=BOTTOM)
eject_btn.pack(side=BOTTOM, pady=(0, 5))
self.__begin_proc_btn = go_btn
self.__eject_src_btn = eject_btn
return thispane
def __gui_popup(self, event):
"""Pop-up context menu for selecting site"""
w = self._sourcetree
row = w.identify_row(event.y)
menu = Menu(tearoff=0)
for site in site_list:
def make_caller(iid, code):
return lambda: self.__set_srcdir_site(iid=iid, code=code)
site = site.code
menu.add_command(label=site, command=make_caller(row, site))
menu.post(event.x_root, event.y_root)
##### GUI ^ / LOGIC v #####
def __set_search_dir(self):
"""Browse to source directory"""
oldchoice = self._search_dir.get()
choice = askdirectory(title='Select source directory',
mustexist=True)
if choice and osp.isdir(choice):
choice = osp.normpath(choice)
self._search_dir.set(choice)
self.__enable_eject_btn()
if choice != oldchoice:
self._sources.clear()
self.__refresh_treeview()
def __set_srcdir_site(self, iid, code):
"""set key from None to site's code"""
srcdir = self._sourcetree.item(iid, option='text')
destdir = TIMELAPSE_PHOTOS % {'site' : code}
self._sources[srcdir]['dest_dir'] = destdir
self._sources[srcdir]['site_code'] = code
self.__refresh_treeview()
self.__enable_processing()
def __set_logfile(self):
"""Browse to target log file"""
fname = asksaveasfilename(title='Log to file', parent=self)
if fname:
self._log_fname.set(fname)
def __enable_eject_btn(self):
"""if the source drive is removable, enable the 'eject' button"""
state = DISABLED
srcdir = self._search_dir.get()
if osp.isdir(srcdir):
drive, _ = osp.splitdrive(srcdir)
if GetDriveType(drive) == DRIVE_REMOVABLE:
logger.info('Source directory drive type is DRIVE_REMOVABLE so '
'enabling eject button')
state = NORMAL
self.__eject_src_btn.configure(state=state)
def __enable_processing(self):
"""if conditions are OK, enable the 'begin processing' button"""
state = NORMAL
for _, info in self._sources.items():
if not info['dest_dir']:
state = DISABLED
if not info['site_code']:
state = DISABLED
self.__begin_proc_btn.configure(state=state)
def __search(self):
"""Search for files in source directory"""
globstr = osp.join(self._search_dir.get(), self._search_str.get())
files_found = glob(globstr)
self._sources.clear()
for f in files_found:
this_dir = self._sources.setdefault(osp.dirname(f), {})
this_dir.setdefault('dest_dir', None) # not used
this_dir.setdefault('site_code', None) # just set defaults
dest_names = this_dir.setdefault('dest_names', {})
dest_names[f] = None # init to none
self.__refresh_treeview()
def __refresh_treeview(self):
"""Construct tree view data model"""
w = self._sourcetree
# remember open tree controls & current selection
selected_row = w.selection()
open_nodes = []
open_opt = BooleanVar() # for coercing _tkinter objects to bool
for row in w.get_children():
open_opt.set(str(w.item(row, option='open'))) # force to bool
if open_opt.get():
open_nodes.append(row)
w.delete(row)
# populate
for src_dir in sorted(self._sources.keys()):
dest_dir = self._sources[src_dir]['dest_dir']
dest_names = self._sources[src_dir]['dest_names']
site_code = self._sources[src_dir]['site_code']
dest_str = dest_dir or '<not yet determined>'
w.insert('', END, iid=src_dir, text=src_dir,
tag='dir', values=[dest_str])
for src_name in sorted(dest_names.keys()):
dest_name = self.__dest_fname_mask(src_name)
if site_code:
dest_name = dest_name % {'code' : site_code}
dest_names[src_name] = dest_name
w.insert(src_dir, END, text=osp.basename(src_name), tag='img',
iid=src_name, values=[dest_name])
w.tag_bind('dir', sequence='<Button-3>', callback=self.__gui_popup)
w.bind('<<TreeviewSelect>>', lambda event: self.__preview_img())
# restore open tree controls & select previous item
topchildren = w.get_children()
for row in open_nodes:
if row in topchildren:
w.item(row, open=True)
if selected_row and w.exists(selected_row):
w.selection_set(selected_row)
def __preview_img(self):
"""Calculate size of, then display image"""
# event not used
w = self._sourcetree
if w.focus():
fname = w.item(w.focus(), option='text')
if fname in self._sources.keys():
if self._preview_img:
self._preview.configure(text='', image=None)
self._preview_img = None
else:
srcdir = w.item(w.parent(w.focus()), option='text')
imgpath = osp.join(srcdir, fname)
try:
img = Image.open(imgpath)
wd = self._preview.winfo_width() # button dimensions
ht = self._preview.winfo_height() - 25 # text label space
img.thumbnail((wd,ht), Image.ANTIALIAS)
self._preview_img = ImageTk.PhotoImage(img)
self._preview.configure(text=imgpath,
image=self._preview_img,
compound=TOP)
except Exception as ex:
self._preview.configure(text='<Preview not available>',
image=None)
self._preview_img = None
logger.debug('Exception prevented image preview load:\n'
+ ex.message)
def __dest_fname_mask(self, fname):
"""Return image destination name file mask
In form of `%(site)s_YYYYMMDD.hhmm` YYYYMMDD is year/month/day, hhmm
is (24) hour/min, and %(site)s is for dict-style string substitution.
"""
_, ext = osp.splitext(fname)
tags = get_exif_tags(open(fname, mode='rb'),
details=False,
stop_tag='DateTimeOriginal')
timestamp = str(tags['EXIF DateTimeOriginal'])
dt = datetime.strptime(timestamp, '%Y:%m:%d %H:%M:%S')
return dt.strftime('%%(code)s_%Y%m%d.%H%M'+ext.lower())
def __transfer_images(self):
"""Process image files from results objects"""
if self._preview_img:
self._preview.configure(text='', image=None) # de-associate
self._preview_img = None # then release image file
dirs_to_remove = []
for srcdir, info in sorted(self._sources.items()):
dest_names = info['dest_names']
if not dest_names:
continue
dest_dir = info['dest_dir']
try:
os.makedirs(dest_dir)
except OSError as e:
if not osp.isdir(dest_dir):
raise e
files_to_remove = []
for src_path, dest_file in sorted(dest_names.items()):
dest_path = osp.join(dest_dir, dest_file)
moved = self.__move_image(src_path, dest_path)
if moved:
files_to_remove.append(src_path)
for ea in files_to_remove:
dest_names.pop(ea) # remove file name if moved
if not dest_names:
dirs_to_remove.append(srcdir)
for ea in dirs_to_remove:
self._sources.pop(ea, None) # remove dirs with no files left
self.__refresh_treeview()
def __move_image(self, src, dst):
"""Move single image; threadable"""
try:
os.rename(src, dst)
logger.info('Moved %s to %s' % (src, dst))
return True
except WindowsError as err:
logger.info('Error moving %s (file skipped): %s' %
(src, err.strerror))
return False
def __eject_srch_dir(self):
"""attempt to eject source directory"""
to_eject = self._search_dir.get()
if not to_eject or not osp.isdir(to_eject):
return
drive, _ = osp.splitdrive(to_eject)
if GetDriveType(drive) != DRIVE_REMOVABLE:
logger.info('NOT A REMOVABLE DRIVE!')
return
if not osp.isfile('usb_disk_eject.exe'):
logger.info('CANNOT FIND DISK EJECTING SOFTWARE!')
return
try:
driveletter = drive.strip(':')
cwd = osp.dirname(argv[0])
eject = osp.join('"'+cwd, 'usb_disk_eject.exe" /REMOVELETTER %s')
os.system(eject % driveletter)
logger.info('SUCCESS EJECTING DISK!')
except Exception as err:
logger.info('WAS NOT ABLE TO EXIT! Exception:\n' + err.message)
def __quit(self):
"""exit program"""
# reserved for | |
logs = self._run_benchmark_cnn_with_fake_images(params, images, labels)
training_outputs = test_util.get_training_outputs_from_logs(
logs, params.print_training_accuracy)
last_output = training_outputs[-1]
# TODO(reedwm): These should be assertEqual but for some reason,
# occasionally the accuracies are lower (Running this test 500 times, these
# asserts failed twice). Investigate this problem.
self.assertLessEqual(last_output.top_1_accuracy, 0.1)
self.assertLessEqual(last_output.top_5_accuracy, 0.5)
def testParameterServer(self):
params = test_util.get_params('testParameterServer')
self._train_and_eval_local(params)
def testParameterServerStaged(self):
params = test_util.get_params('testParameterServerStaged')._replace(
staged_vars=True)
self._train_and_eval_local(params)
def testReplicated(self):
params = test_util.get_params('testReplicated')._replace(
variable_update='replicated')
self._train_and_eval_local(params)
def testIndependent(self):
params = test_util.get_params('testIndependent')._replace(
variable_update='independent')
self._train_and_eval_local(params)
def testForwardOnly(self):
params = test_util.get_params('testForwardOnly')._replace(forward_only=True)
# Evaluation is not supported with --forward_only, so we set skip='eval'.
self._train_and_eval_local(params, skip='eval')
def testForwardOnlyAndFreeze(self):
params = test_util.get_params('testForwardOnlyAndFreeze')._replace(
forward_only=True, freeze_when_forward_only=True, train_dir=None)
# Training is not supported with --freeze_when_forward_only.
self._train_and_eval_local(params, skip='eval_and_train_from_checkpoint')
def testForwardOnlyAndFreezeWithTrt(self):
params = test_util.get_params('testForwardOnlyAndFreeze')._replace(
forward_only=True, freeze_when_forward_only=True, train_dir=None,
trt_mode='FP32'
)
# Training is not supported with --freeze_when_forward_only.
self._train_and_eval_local(params, skip='eval_and_train_from_checkpoint')
def testNoDistortions(self):
params = test_util.get_params('testNoDistortions')._replace(
distortions=False)
self._train_and_eval_local(params)
def testCpuAsLocalParamDevice(self):
params = test_util.get_params('testCpuAsLocalParamDevice')._replace(
local_parameter_device='cpu')
self._train_and_eval_local(params)
def testNHWC(self):
params = test_util.get_params('testNHWC')._replace(data_format='NHWC')
self._train_and_eval_local(params)
def testCpuAsDevice(self):
params = test_util.get_params('testCpuAsDevice')._replace(
device='cpu', data_format='NHWC') # NHWC required when --device=cpu
self._train_and_eval_local(params)
def testMomentumParameterServer(self):
params = test_util.get_params('testMomentumParameterServer')._replace(
optimizer='momentum', momentum=0.8)
self._train_and_eval_local(params)
def testRmspropReplicated(self):
params = test_util.get_params('testRmspropReplicated')._replace(
variable_update='replicated',
optimizer='rmsprop',
rmsprop_decay=0.8,
rmsprop_momentum=0.6,
rmsprop_epsilon=0.7,
init_learning_rate=0.01)
self._train_and_eval_local(params)
def testBatchGroupSize(self):
params = test_util.get_params('testBatchGroupSize')._replace(
batch_group_size=4, num_batches=100, num_warmup_batches=5)
self._train_and_eval_local(params)
def testGradientClip(self):
params = test_util.get_params('testGradientClip')._replace(
gradient_clip=100.0)
self._train_and_eval_local(params)
def testWeightDecay(self):
params = test_util.get_params('testWeightDecay')._replace(
weight_decay=0.0001)
self._train_and_eval_local(params)
def testNoLayers(self):
params = test_util.get_params('testNoLayers')._replace(use_tf_layers=False)
self._train_and_eval_local(params)
def testSaveModelSteps(self):
params = test_util.get_params('testSaveModelSteps')._replace(
save_model_steps=2, num_warmup_batches=0, num_batches=10,
max_ckpts_to_keep=3)
self._train_and_eval_local(params)
for i in range(1, 20 + 1):
# We train for 20 steps, since self._train_and_eval_local() does two
# training runs of 10 steps each. We save a checkpoint every 2 steps and
# keep the last 3 checkpoints, so at the end, we should have checkpoints
# for steps 16, 18, and 20.
matches = glob.glob(os.path.join(params.train_dir,
'model.ckpt-{}.*'.format(i)))
if i in (16, 18, 20):
self.assertTrue(matches)
else:
self.assertFalse(matches)
def testFp16WithFp32Vars(self):
params = test_util.get_params('testFp16WithFp32Vars')._replace(
use_fp16=True, fp16_vars=False, fp16_loss_scale=1.)
self._train_and_eval_local(params)
def testFp16WithFp16Vars(self):
params = test_util.get_params('testFp16WithFp16Vars')._replace(
use_fp16=True, fp16_vars=True)
self._train_and_eval_local(params)
def testGradientRepacking(self):
params = test_util.get_params('testGradientRepacking1')._replace(
gradient_repacking=2)
self._train_and_eval_local(params, skip='eval_and_train_from_checkpoint')
params = test_util.get_params('testGradientRepacking2')._replace(
gradient_repacking=2, use_fp16=True)
self._train_and_eval_local(params, skip='eval_and_train_from_checkpoint')
def testTraceFileChromeTraceFormat(self):
trace_file = os.path.join(self.get_temp_dir(),
'testTraceFileChromeTraceFormat_tracefile')
params = test_util.get_params('testTraceFileChromeTraceFormat')._replace(
trace_file=trace_file, use_chrome_trace_format=True)
self._train_and_eval_local(params)
self.assertGreater(os.stat(trace_file).st_size, 0)
def testTraceFileStepStatsProto(self):
trace_file = os.path.join(self.get_temp_dir(),
'testTraceFileStepStatsProto_tracefile')
params = test_util.get_params('testTraceFileStepStatsProto')._replace(
trace_file=trace_file, use_chrome_trace_format=False)
self._train_and_eval_local(params)
self.assertGreater(os.stat(trace_file).st_size, 0)
with open(trace_file) as f:
step_stats = step_stats_pb2.StepStats()
# The following statement should not raise an exception.
contents = f.read()
text_format.Merge(contents, step_stats)
def testTfprofFile(self):
tfprof_file = os.path.join(self.get_temp_dir(), 'testTfprofFile_tfproffile')
params = test_util.get_params('testTfprofFile')._replace(
tfprof_file=tfprof_file)
self._train_and_eval_local(params, skip='eval_and_train_from_checkpoint')
self.assertGreater(os.stat(tfprof_file).st_size, 0)
with open(tfprof_file, 'rb') as f:
profile_proto = tfprof_log_pb2.ProfileProto()
# The following statement should not raise an exception.
profile_proto.ParseFromString(f.read())
def testMoveTrainDir(self):
params = test_util.get_params('testMoveTrainDir')
self._train_and_eval_local(params)
new_train_dir = params.train_dir + '_moved'
os.rename(params.train_dir, new_train_dir)
params = params._replace(train_dir=new_train_dir, eval=True)
self._run_benchmark_cnn_with_black_and_white_images(params)
@mock.patch('tensorflow.train.Saver')
@mock.patch('benchmark_cnn._get_checkpoint_to_load')
def testLoadCheckpoint(self, mock_checkpoint_to_load, mock_saver):
"""Tests load checkpoint with full path to checkpoint."""
expected_checkpoint = '/path/to/checkpoints/model.ckpt-1243'
mock_checkpoint_to_load.return_value = expected_checkpoint
global_batch = benchmark_cnn.load_checkpoint(mock_saver,
None,
expected_checkpoint)
self.assertEqual(global_batch, 1243)
def testGetCheckpointToLoadFullPath(self):
"""Tests passing full path."""
ckpt_path = '/foo/bar/model.ckpt-189'
full_path = benchmark_cnn._get_checkpoint_to_load(ckpt_path)
self.assertEqual(full_path, ckpt_path)
def testGetCheckpointToLoadException(self):
"""Tests exception for directory without a checkpoint."""
ckpt_path = '/foo/bar/checkpoints'
self.assertRaises(benchmark_cnn.CheckpointNotFoundException,
benchmark_cnn._get_checkpoint_to_load, ckpt_path)
@mock.patch('tensorflow.train.get_checkpoint_state')
def testGetCheckpointToLoad(self, mock_checkpoint_state):
"""Tests passing path to checkpoint folder."""
expected_checkpoint = '/path/to/checkpoints/model.ckpt-1243'
mock_checkpoint_state.return_value = mock.Mock(
model_checkpoint_path=expected_checkpoint)
ckpt_path = '/path/to/checkpoints/'
full_path = benchmark_cnn._get_checkpoint_to_load(ckpt_path)
self.assertEqual(full_path, expected_checkpoint)
def testImagenetPreprocessor(self):
imagenet_dir = os.path.join(platforms_util.get_test_data_dir(),
'fake_tf_record_data')
params = test_util.get_params('testImagenetPreprocessor')._replace(
data_dir=imagenet_dir, data_name='imagenet')
self._train_and_eval_local(params, use_test_preprocessor=False)
def testImagenetPreprocessorNoDistortions(self):
imagenet_dir = os.path.join(platforms_util.get_test_data_dir(),
'fake_tf_record_data')
params = test_util.get_params(
'testImagenetPreprocessorNoDistortions')._replace(
data_dir=imagenet_dir, data_name='imagenet', distortions=False)
self._train_and_eval_local(params, use_test_preprocessor=False)
def testImagenetPreprocessorVerboseSummary(self):
imagenet_dir = os.path.join(platforms_util.get_test_data_dir(),
'fake_tf_record_data')
params = test_util.get_params(
'testImagenetPreprocessorVerboseSummary')._replace(
data_dir=imagenet_dir, data_name='imagenet', distortions=False,
summary_verbosity=2)
self._train_and_eval_local(params, use_test_preprocessor=False)
def testImagenetPreprocessorWithoutMultiDeviceIterator(self):
imagenet_dir = os.path.join(platforms_util.get_test_data_dir(),
'fake_tf_record_data')
params = test_util.get_params(
'testImagenetPreprocessorWithoutMultiDeviceIterator')._replace(
data_dir=imagenet_dir, data_name='imagenet',
use_multi_device_iterator=False)
self._train_and_eval_local(params, use_test_preprocessor=False)
def testCifar10SyntheticData(self):
params = test_util.get_params('testCifar10SyntheticData')._replace(
data_name='cifar10')
self._train_and_eval_local(params)
def testShiftRatio(self):
test_util.monkey_patch_base_cluster_manager()
params = benchmark_cnn.make_params(
data_name='imagenet',
data_dir=os.path.join(platforms_util.get_test_data_dir(),
'fake_tf_record_data'),
job_name='worker',
worker_hosts='w1,w2,w3,w4',
ps_hosts='p1',
task_index=0)
self.assertEqual(
benchmark_cnn.BenchmarkCNN(params).input_preprocessor.shift_ratio, 0.0)
params = params._replace(task_index=3)
self.assertEqual(
benchmark_cnn.BenchmarkCNN(params).input_preprocessor.shift_ratio, 0.75)
def testDistributedReplicatedSavableVars(self):
test_util.monkey_patch_base_cluster_manager()
params = benchmark_cnn.make_params(
variable_update='distributed_replicated',
model='inception4',
data_name='imagenet',
data_dir=os.path.join(platforms_util.get_test_data_dir(),
'fake_tf_record_data'),
job_name='worker',
worker_hosts='w1,w2,w3,w4',
ps_hosts='p1',
datasets_use_prefetch=False)
bench = benchmark_cnn.BenchmarkCNN(params)
with tf.Graph().as_default():
bench._build_model()
savable_vars = bench.variable_mgr.savable_variables()
# Assert all global variables are in savable_vars
for v in tf.global_variables():
if not v.name.startswith(
variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0'):
self.assertEqual(v.name, 'global_step:0')
name = bench.variable_mgr._strip_port(v.name)
if name.startswith(variable_mgr_util.PS_SHADOW_VAR_PREFIX):
name = name[len(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/'):]
self.assertIn(name, savable_vars)
self.assertIn(savable_vars[name], tf.global_variables())
# Assert all local variables on the first tower are in savable_vars
for v in tf.local_variables():
if v.name.startswith('v0/'):
name = bench.variable_mgr._strip_port(v.name)
self.assertIn(name, savable_vars)
def _test_preprocessing_eval(self, image_height, image_width, output_height,
output_width):
image = tf.fill((image_height, image_width, 3),
tf.constant(128, dtype=tf.uint8))
params = benchmark_cnn.make_params()
new_image = preprocessing.eval_image(image, output_height, output_width, 0,
'bilinear', params.summary_verbosity)
with self.test_session() as sess:
new_image_value = sess.run(new_image)
self.assertAllEqual(new_image_value,
np.full((output_height, output_width, 3), 128,
dtype=np.uint8))
def testPreprocessingEval(self):
self._test_preprocessing_eval(10, 10, 4, 4)
self._test_preprocessing_eval(4, 4, 10, 10)
self._test_preprocessing_eval(1, 100, 100, 1)
self._test_preprocessing_eval(100, 1, 1, 100)
self._test_preprocessing_eval(1, 100, 1, 100)
def _test_preprocessing_traing(self, image_buf, image_color,
output_height, output_width, bbox,
batch_position, resize_method, distortions,
summary_verbosity, fuse_decode_and_crop):
new_image = preprocessing.train_image(
image_buf,
output_height,
output_width,
bbox,
batch_position,
resize_method,
distortions,
summary_verbosity=summary_verbosity,
fuse_decode_and_crop=fuse_decode_and_crop)
self.assertEqual(new_image.shape, [output_height, output_width, 3])
with self.test_session(use_gpu=True) as sess:
new_image_value = sess.run(new_image)
self.assertAllClose(
new_image_value,
np.full(
[output_height, output_width, 3],
image_color,
dtype=np.float32),
atol=50.,
rtol=0.)
def testPreprocessingTrain(self):
test_data_dir = os.path.join(platforms_util.get_test_data_dir(), 'images')
black_file = os.path.join(test_data_dir, 'black_image.jpg')
with open(black_file, 'rb') as f:
black_jpg_buffer = f.read()
white_file = os.path.join(test_data_dir, 'white_image.jpg')
with open(white_file, 'rb') as f:
white_jpg_buffer = f.read()
bbox = tf.zeros((1, 0, 4), dtype=tf.float32)
batch_position = 0
# Each size config is (output_height, output_width, resize_method)
size_configs = [(100, 100, 'round_robin'), (150, 10, 'bilinear'),
(10, 150, 'nearest')]
# Each image config is (image_buf, image_color)
image_configs = [(white_jpg_buffer, 255), (black_jpg_buffer, 0)]
for (image_buf, image_color) in image_configs:
for output_height, output_width, resize_method in size_configs:
for distortions in [True, False]:
for summary_verbosity in [0, 2]:
for fuse_decode_and_crop in [True, False]:
self._test_preprocessing_traing(
image_buf, image_color, output_height, output_width, bbox,
batch_position, resize_method, distortions, summary_verbosity,
fuse_decode_and_crop)
def _test_learning_rate(self, params, global_step_to_expected_learning_rate):
self.longMessage = True # pylint: disable=invalid-name
bench = benchmark_cnn.BenchmarkCNN(params)
with tf.Graph().as_default() as graph:
bench._build_model()
global_step = graph.get_tensor_by_name('global_step:0')
learning_rate = graph.get_tensor_by_name('learning_rate_tensor:0')
with self.test_session(graph=graph, use_gpu=True) as sess:
items = global_step_to_expected_learning_rate.items()
for global_step_val, expected_learning_rate in items:
self.assertAlmostEqual(sess.run(learning_rate,
{global_step: global_step_val}),
expected_learning_rate,
msg='at global_step:{}'.
format(global_step_val))
def testLearningRateModelSpecificResNet(self):
params = benchmark_cnn.make_params(model='resnet50',
batch_size=256,
variable_update='parameter_server',
num_gpus=1)
self._test_learning_rate(params, {
0: 0,
150136: 0.128,
150137: 0.0128,
300273: 0.0128,
300274: 0.00128,
10000000: 0.0000128
})
def testLearningRateUserProvidedInitLr(self):
params = benchmark_cnn.make_params(model='resnet50',
batch_size=256,
variable_update='replicated',
init_learning_rate=1.)
self._test_learning_rate(params, {
0: 1.,
10000000: 1.
})
def testLearningRateUserProvidedInitLrAndWarmup(self):
params = benchmark_cnn.make_params(model='resnet50',
batch_size=256,
variable_update='replicated',
init_learning_rate=1.,
num_learning_rate_warmup_epochs=5)
self._test_learning_rate(params, {
0: 0.,
12511: 0.5,
25022: 1.,
10000000: 1.
})
def testLearningRateUserProvidedDecayInfo(self):
params = benchmark_cnn.make_params(model='resnet50',
init_learning_rate=1.,
learning_rate_decay_factor=0.5,
num_epochs_per_decay=2,
minimum_learning_rate=0.3750,
batch_size=32)
self._test_learning_rate(params, {
0: 1.,
80071: 1.,
80072: 0.5,
160143: 0.5,
160144: 0.375,
10000000: 0.375
})
def testLearningRateUserProvidedZeroDecay(self):
params = benchmark_cnn.make_params(model='resnet50',
num_learning_rate_warmup_epochs=0,
learning_rate_decay_factor=0.5,
num_epochs_per_decay=0,
minimum_learning_rate=0.3750,
batch_size=32)
with self.assertRaises(ValueError):
with tf.Graph().as_default():
# This will fail because params.learning_rate_decay_factor cannot be
# nonzero if params.num_epochs_per_decay is zero.
benchmark_cnn.BenchmarkCNN(params)._build_model()
def testLearningRateUserProvidedSchedule(self):
params = benchmark_cnn.make_params(
model='trivial',
batch_size=32,
piecewise_learning_rate_schedule='1;3;.1;5;.01')
self._test_learning_rate(params, {
0: 1.,
120108: 1.,
120109: 0.1,
200181: 0.1,
200182: 0.01,
100000000: 0.01
})
def testNumBatchesAndEpochs(self):
params = benchmark_cnn.make_params()
batches, epochs = benchmark_cnn.get_num_batches_and_epochs(params, 10, 100)
self.assertEqual(batches, benchmark_cnn._DEFAULT_NUM_BATCHES)
self.assertAlmostEqual(epochs,
float(benchmark_cnn._DEFAULT_NUM_BATCHES) / 10)
params = benchmark_cnn.make_params(num_batches=21)
batches, epochs = benchmark_cnn.get_num_batches_and_epochs(params, 25, 50)
self.assertEqual(batches, 21)
self.assertAlmostEqual(epochs, 10.5)
params = benchmark_cnn.make_params(num_epochs=3)
batches, epochs = benchmark_cnn.get_num_batches_and_epochs(params, 2, 3)
self.assertEqual(batches, 4)
self.assertAlmostEqual(epochs, 8./3.)
with self.assertRaises(ValueError):
params = benchmark_cnn.make_params(num_batches=100, num_epochs=100)
benchmark_cnn.get_num_batches_and_epochs(params, 1, 1)
def testInvalidFlags(self):
params = benchmark_cnn.make_params(device='cpu', data_format='NCHW')
with self.assertRaises(ValueError):
benchmark_cnn.BenchmarkCNN(params)
params = benchmark_cnn.make_params(use_fp16=True, fp16_vars=True,
variable_update='replicated',
all_reduce_spec='nccl')
with self.assertRaises(ValueError):
benchmark_cnn.BenchmarkCNN(params)
# Automatic loss scaling is only supported for 'replicated', 'ps',
# and 'independent' variable_updates.
invalid_variable_updates = [
'distributed_replicated', 'distributed_all_reduce'
]
for variable_update in invalid_variable_updates:
params = benchmark_cnn.make_params(
use_fp16=True,
fp16_vars=True,
fp16_enable_auto_loss_scale=True,
variable_update=variable_update)
with self.assertRaises(ValueError):
benchmark_cnn.BenchmarkCNN(params)
# Automatic loss scaling is not supported for 'nccl'.
params = benchmark_cnn.make_params(
use_fp16=True,
fp16_vars=True,
fp16_enable_auto_loss_scale=True,
all_reduce_spec='nccl')
with self.assertRaises(ValueError):
benchmark_cnn.BenchmarkCNN(params)
# Automatic loss scaling is not supported for 'staged_vars'.
params = benchmark_cnn.make_params(
use_fp16=True,
fp16_vars=True,
fp16_enable_auto_loss_scale=True,
staged_vars=True)
with self.assertRaises(ValueError):
benchmark_cnn.BenchmarkCNN(params)
def testMakeParams(self):
default_params = benchmark_cnn.make_params()
self.assertEqual(default_params.model,
flags.param_specs['model'].default_value)
params = benchmark_cnn.make_params(model='foo')
self.assertEqual(params.model, 'foo')
with self.assertRaises(ValueError):
benchmark_cnn.make_params(job_name='foo')
with self.assertRaises(ValueError):
benchmark_cnn.make_params(gpu_memory_frac_for_testing=-1.)
with self.assertRaises(ValueError):
benchmark_cnn.make_params(gpu_memory_frac_for_testing=2.)
class VariableUpdateTest(tf.test.TestCase):
"""Tests that variables are updated correctly.
These tests use a very simple deterministic model. For example, some tests use
the model
loss = image * A * B
where image is a 1x1 images (with a single scalar value), and A and B are
scalar variables. Tests will run tf_cnn_benchmarks with such a model, on a
sequence of scalar | |
"outputBinding" in _doc:
try:
outputBinding = load_field(
_doc.get("outputBinding"),
union_of_None_type_or_CommandOutputBindingLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `outputBinding` field is not valid because:",
SourceLine(_doc, "outputBinding", str),
[e],
)
)
else:
outputBinding = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `symbols`, `type`, `label`, `outputBinding`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException(
"Trying 'CommandOutputEnumSchema'", None, _errors__
)
return cls(
symbols=symbols,
type=type,
label=label,
outputBinding=outputBinding,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.symbols is not None:
u = save_relative_uri(self.symbols, base_url, True, None, relative_uris)
if u:
r["symbols"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.label is not None:
r["label"] = save(
self.label, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.outputBinding is not None:
r["outputBinding"] = save(
self.outputBinding,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["symbols", "type", "label", "outputBinding"])
class CommandOutputArraySchema(OutputArraySchema):
def __init__(
self,
items: Any,
type: Any,
label: Optional[Any] = None,
outputBinding: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.items = items
self.type = type
self.label = label
self.outputBinding = outputBinding
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "CommandOutputArraySchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
items = load_field(
_doc.get("items"),
uri_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_False_True_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `items` field is not valid because:",
SourceLine(_doc, "items", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Array_symbolLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `type` field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
if "label" in _doc:
try:
label = load_field(
_doc.get("label"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `label` field is not valid because:",
SourceLine(_doc, "label", str),
[e],
)
)
else:
label = None
if "outputBinding" in _doc:
try:
outputBinding = load_field(
_doc.get("outputBinding"),
union_of_None_type_or_CommandOutputBindingLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `outputBinding` field is not valid because:",
SourceLine(_doc, "outputBinding", str),
[e],
)
)
else:
outputBinding = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `items`, `type`, `label`, `outputBinding`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException(
"Trying 'CommandOutputArraySchema'", None, _errors__
)
return cls(
items=items,
type=type,
label=label,
outputBinding=outputBinding,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.items is not None:
u = save_relative_uri(self.items, base_url, False, 2, relative_uris)
if u:
r["items"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.label is not None:
r["label"] = save(
self.label, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.outputBinding is not None:
r["outputBinding"] = save(
self.outputBinding,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["items", "type", "label", "outputBinding"])
class CommandInputParameter(InputParameter):
"""
An input parameter for a CommandLineTool.
"""
def __init__(
self,
id: Any,
label: Optional[Any] = None,
secondaryFiles: Optional[Any] = None,
streamable: Optional[Any] = None,
doc: Optional[Any] = None,
format: Optional[Any] = None,
inputBinding: Optional[Any] = None,
default: Optional[Any] = None,
type: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.label = label
self.secondaryFiles = secondaryFiles
self.streamable = streamable
self.doc = doc
self.id = id
self.format = format
self.inputBinding = inputBinding
self.default = default
self.type = type
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "CommandInputParameter":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if "id" in _doc:
try:
id = load_field(
_doc.get("id"),
uri_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `id` field is not valid because:",
SourceLine(_doc, "id", str),
[e],
)
)
else:
id = None
__original_id_is_none = id is None
if id is None:
if docRoot is not None:
id = docRoot
else:
raise ValidationException("Missing id")
if not __original_id_is_none:
baseuri = id
if "label" in _doc:
try:
label = load_field(
_doc.get("label"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `label` field is not valid because:",
SourceLine(_doc, "label", str),
[e],
)
)
else:
label = None
if "secondaryFiles" in _doc:
try:
secondaryFiles = load_field(
_doc.get("secondaryFiles"),
union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `secondaryFiles` field is not valid because:",
SourceLine(_doc, "secondaryFiles", str),
[e],
)
)
else:
secondaryFiles = None
if "streamable" in _doc:
try:
streamable = load_field(
_doc.get("streamable"),
union_of_None_type_or_booltype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `streamable` field is not valid because:",
SourceLine(_doc, "streamable", str),
[e],
)
)
else:
streamable = None
if "doc" in _doc:
try:
doc = load_field(
_doc.get("doc"),
union_of_None_type_or_strtype_or_array_of_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `doc` field is not valid because:",
SourceLine(_doc, "doc", str),
[e],
)
)
else:
doc = None
if "format" in _doc:
try:
format = load_field(
_doc.get("format"),
uri_union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `format` field is not valid because:",
SourceLine(_doc, "format", str),
[e],
)
)
else:
format = None
if "inputBinding" in _doc:
try:
inputBinding = load_field(
_doc.get("inputBinding"),
union_of_None_type_or_CommandLineBindingLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `inputBinding` field is not valid because:",
SourceLine(_doc, "inputBinding", str),
[e],
)
)
else:
inputBinding = None
if "default" in _doc:
try:
default = load_field(
_doc.get("default"),
union_of_None_type_or_Any_type,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `default` field is not valid because:",
SourceLine(_doc, "default", str),
[e],
)
)
else:
default = None
if "type" in _doc:
try:
type = load_field(
_doc.get("type"),
typedsl_union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `type` field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
else:
type = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `format`, `inputBinding`, `default`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'CommandInputParameter'", None, _errors__)
return cls(
label=label,
secondaryFiles=secondaryFiles,
streamable=streamable,
doc=doc,
id=id,
format=format,
inputBinding=inputBinding,
default=default,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.id is not None:
u = save_relative_uri(self.id, base_url, True, None, relative_uris)
if u:
r["id"] = u
if self.label is not None:
r["label"] = save(
self.label, top=False, base_url=self.id, relative_uris=relative_uris
)
if self.secondaryFiles is not None:
r["secondaryFiles"] = save(
self.secondaryFiles,
top=False,
base_url=self.id,
relative_uris=relative_uris,
)
if self.streamable is not None:
r["streamable"] = save(
self.streamable,
top=False,
base_url=self.id,
relative_uris=relative_uris,
)
if self.doc is not None:
r["doc"] = save(
| |
values['name'] = unicode(self.address)
values['network'] = unicode(self.address)
return values
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def clone(self):
return Range(self.start, self.end)
def __repr__(self):
return '<IPV4.Range(%s, %s)>' % (self.start, self.end)
def __str__(self):
return self.printable()
def __lt__(self, other):
"""True if the current address is a subnet of 'other'."""
if self.start >= other.start and self.end <= other.end:
if self.start > other.start or self.end < other.end:
return True
return False
def __le__(self, other):
"""True if the current address is a subnet of, or equal to, 'other'."""
if self.start >= other.start and self.end <= other.end:
return True
return False
def __eq__(self, other):
"""True if the addresses are identical."""
if self.start == other.start and self.end == other.end:
return True
return False
def __ne__(self, other):
"""True if the address are not identical."""
if self.start != other.start or self.end != other.end:
return True
return False
def __gt__(self, other):
"""True if the current address is a supernet of 'other'."""
if other.start >= self.start and other.end <= self.end:
if other.start > self.start or other.end < self.end:
return True
return False
def __ge__(self, other):
"""True if the current address is a supernet of, or equal to, 'other'."""
if other.start >= self.start and other.end <= self.end:
return True
return False
def printable(self):
return '%s %s' % (self.numToDottedQuad(self.start),
self.numToDottedQuad(self.end))
def printableStart(self):
return self.numToDottedQuad(self.start)
def printableEnd(self):
return self.numToDottedQuad(self.end)
def numToDottedQuad(self, network):
"""Convert an unsigned integer into a 'dotted quad' string.
NUM -> '192.168.1.1'
The number must be given in host byte order.
"""
return socket.inet_ntoa(struct.pack('>L', network))
class NetworkRange(treenodes.BaseNode):
class_id = 'IP4NR'
class_name = 'ipv4 network range'
def __init__(self, oid, branch, range = None):
"""Init.
address can be eith an address string (nn.nn.nn.nn mm.mm.mm.mm) or a
Range object.
"""
super(NetworkRange, self).__init__(oid, branch)
self.range = range
def __repr__(self):
return '<ipv4.NetworkRange(%s:%s)>' % (self.oid, self.range)
def __str__(self):
return '<ipv4.NetworkRange(%s:%s)>' % (self.oid, self.range)
def _created(self, user):
"""Perform setup for a newly created network.
This includes several steps. Make sure we match the protocol of
the network tree we have been created in.
Find our real parent, since we likely have been created as a child
of the network tree, and relocate to it.
Find any children (subnets) of ourselves and relocate to be children
of us.
"""
super(NetworkRange, self)._created(user)
network_tree = self.getParent('network tree')
if network_tree.protocol != 'ipv4':
raise errors.SiptrackError('network range type doesn\'t match network tree protocol')
# If we were passed a string, convert it, otherwise assume
# it's a Range object already.
if type(self.range) in [str, unicode]:
self.range = self.rangeFromString(self.range)
self.storageAction('write_data', {'name': 'start', 'value': self.range.start})
self.storageAction('write_data', {'name': 'end', 'value': self.range.end})
# Be really sure that this range is in the correct place.
parent = find_range_parent(network_tree, self.range)
if parent.oid != self.parent.oid:
raise errors.SiptrackError('invalid network location')
# Make sure an identical range doesn't exist here.
for range in parent.listChildren(include = ['ipv4 network range']):
if self.oid == range.oid:
continue
if self.range == range.range:
raise errors.SiptrackError('network range already exists')
def relocate(self):
"""Public relocate method.
Network ranges can't be manually relocated, their position in the
networe tree is based on existing networks.
"""
raise errors.SiptrackError('can\'t relocate network ranges')
def _loaded(self, data = None):
"""Called for an existing network being loaded.
Creates self.address from storage.
"""
self.range = Range(data['start'], data['end'])
self.address = self.range
def prune(self, user = None):
"""Prune a network range.
The range will be removed if it has no associations/references.
"""
if len(list(self.references)) == 0 and \
len(list(self.associations)) == 0:
return self.remove(recursive = True)
return []
def rangeFromString(self, address):
return range_from_string(address)
# We keep an address so comparissons etc. to Network objects
# don't break.
def _get_address(self):
return self.range
def _set_address(self, val):
self.range = val
address = property(_get_address, _set_address)
def getFreeNetwork(self, user=None):
"""Create a host (/32) subnet which is available under us.
Used by Device.autoAssign and possibly others.
"""
tree = self.getParent('network tree')
return get_free_network(tree, self.range.start, self.range.end, user)
def get_free_network(tree, start, end, user=None):
"""Create a host (/32) subnet which is available under us.
Used by Device.autoAssign and possibly others.
"""
cur = start
while not cur > end:
if not tree.networkExists(cur):
return tree.addNetwork(user, cur)
cur = cur.inc()
return None, None
#def get_free_network(base, start, end, user=None):
# """Create a host (/32) subnet which is available under us.
#
# Used by Device.autoAssign and possibly others.
# """
# children = self.listChildren(include = ['ipv4 network'])
# children = [c.address for c in children]
# for start, end in iter_empty_ranges(self.address, children):
# address = Address(start, 0xffffffff)
# return base.add(user, 'ipv4 network', address)
# return None
def network_sorter(x, y):
"""Simple network sorting function."""
if x.address < y.address:
return -1
if x.address == y.address:
return 0
return 1
def iter_empty_ranges(base, children):
"""Returns ranges in 'base' not occupied by 'children'.
'children' is a sorted list of subnets of 'base'.
Each range is returned as a tuple of (start_address, end_address).
"""
children.sort(cmp = network_sorter)
start = base.start
for child in children:
if start < child.start:
yield (start, child.start -1)
start = child.end + 1
if start <= base.end:
yield (start, base.end)
def iter_networks_in_range(start, end):
"""Return networks that fit in the given range.
The largest possible networks are returned.
"""
while start <= end:
# FIXME: there are better ways to find a valid netmask...
for n in range(33):
netmask = bitcount_to_num(n)
address = Address(start, netmask, mask = True)
if address.network >= start and address.broadcast <= end:
break
yield address
start = address.broadcast + 1
def iter_missing_networks(base, children):
"""Return networks missing from children, limited by base.
'base' is the entire network to be searched. 'children' is a list
of networks (direct subnets of base) that already exist.
The largest possible networks are returned (as Address objects).
"""
for start, end in iter_empty_ranges(base, children):
for address in iter_networks_in_range(start, end):
yield address
def iter_missing_networks_from_tree(tree):
"""iter_missing_networks wrapper for network trees."""
base = Address(0, 0)
children = tree.listChildren(include = ['ipv4 network'])
children = [c.address for c in children]
return iter_missing_networks(base, children)
def address_from_string(address, mask = True, validate = True):
"""Return an Address object matching an address string.
The address string must be an ipv4 address in cidr notion, ie.
nn.nn.nn.nn/mm.
If an Address object is passed in it is returned untouched.
"""
if type(address) == Address:
return address
if '/' not in address:
address = '%s/32' % (address)
network, netmask = dotted_quad_cidr_to_num(address)
if network is None or netmask is None:
raise errors.InvalidNetworkAddress('invalid address string')
return Address(network, netmask, mask, validate)
def range_from_string(range):
"""Return a Range object matching an range string.
The range string must be two ipv4 address, start and end.
End must be equal to or higher than start
If a Range object is passed in it is returned untouched.
"""
if type(range) == Range:
return range
split = range.split()
if len(split) != 2:
raise errors.SiptrackError('invalid range string')
start = dotted_quad_to_num(split[0])
end = dotted_quad_to_num(split[1])
return Range(start, end)
def get_network(network_tree, address):
"""Return a network from the network tree.
Both address strings and Address objects are allowed.
Returns the network if it exists. Otherwise None.
"""
address = address_from_string(address)
parent = network_tree
while True:
prev_parent = parent
for net in parent.listChildren(include = [Network.class_name]):
if address == net.address:
return net
if address < net.address:
parent = net
break
# Not getting any closer.
if parent is prev_parent:
return None
def get_range(network_tree, range):
"""Return a range from the network tree.
Both range strings and Range objects are allowed.
Returns the range if it exists. Otherwise None.
"""
match = range_from_string(range)
parent = network_tree
while True:
for range in parent.listChildren(include = [NetworkRange.class_name]):
if range.range == match:
return range
prev_parent = parent
for net in parent.listChildren(include = [Network.class_name]):
if match <= net.address:
parent = net
break
# Not getting any closer.
if parent is prev_parent:
return None
def find_network_parent(network_tree, address):
"""Find the nearest (direct) existing parent of this network.
Starts from the network tree and searches through the existing
networks until the smallest possible parent is found.
"""
address = address_from_string(address, mask = True, validate = True)
parent = network_tree
while True:
prev_parent = parent
for net in | |
from cuxfilter.layouts.chart_views import chart_view
from ..core import BaseWidget
from ..core.aggregate import BaseNumberChart
from ..constants import (
CUDF_DATETIME_TYPES,
DATATILE_ACTIVE_COLOR,
DATATILE_INACTIVE_COLOR,
)
from ...assets.cudf_utils import get_min_max
from bokeh.models import ColumnDataSource
import cudf
import dask_cudf
import panel as pn
class RangeSlider(BaseWidget):
_datatile_loaded_state: bool = False
datatile_active_color = DATATILE_ACTIVE_COLOR
@property
def datatile_loaded_state(self):
return self._datatile_loaded_state
@datatile_loaded_state.setter
def datatile_loaded_state(self, state: bool):
self._datatile_loaded_state = state
if state:
self.chart.bar_color = self.datatile_active_color
else:
self.chart.bar_color = DATATILE_INACTIVE_COLOR
def compute_stride(self):
if self.stride_type == int and self.max_value < 1:
self.stride_type = float
if self.stride is None:
self.stride = self.chart.step
def initiate_chart(self, dashboard_cls):
"""
initiate chart on dashboard creation
"""
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
self.generate_widget()
self.add_events(dashboard_cls)
def generate_widget(self):
"""
generate widget range slider
"""
if self.stride:
self.params["step"] = self.stride
self.chart = pn.widgets.RangeSlider(
start=self.min_value,
end=self.max_value,
value=(self.min_value, self.max_value),
**self.params,
)
self.compute_stride()
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
# interactive slider
self.datatile_active_color = theme.datatile_active_color
def add_events(self, dashboard_cls):
"""
add events
"""
def widget_callback(event):
if dashboard_cls._active_view != self.name:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles()
query_tuple = self._xaxis_np_dt64_transform(event.new)
dashboard_cls._query_datatiles_by_range(query_tuple)
self.chart.param.watch(widget_callback, ["value"], onlychanged=False)
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
compute query value
Parameters:
-----------
query_dict:
reference to dashboard.__cls__.query_dict
"""
if self.chart.value != (self.chart.start, self.chart.end):
min_temp, max_temp = self.chart.value
query = f"@{self.x}_min<={self.x}<=@{self.x}_max"
query_str_dict[self.name] = query
query_local_variables_dict[self.x + "_min"] = min_temp
query_local_variables_dict[self.x + "_max"] = max_temp
else:
query_str_dict.pop(self.name, None)
query_local_variables_dict.pop(self.x + "_min", None)
query_local_variables_dict.pop(self.x + "_max", None)
class DateRangeSlider(BaseWidget):
_datatile_loaded_state: bool = False
datatile_active_color = DATATILE_ACTIVE_COLOR
@property
def datatile_loaded_state(self):
return self._datatile_loaded_state
@property
def x_dtype(self):
if isinstance(self.source, ColumnDataSource):
return self.source.data[self.data_x_axis].dtype
elif isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.x].dtype
return None
@datatile_loaded_state.setter
def datatile_loaded_state(self, state: bool):
self._datatile_loaded_state = state
if state:
self.chart.bar_color = self.datatile_active_color
else:
self.chart.bar_color = DATATILE_INACTIVE_COLOR
def compute_stride(self):
self.stride = self.stride_type(
(self.max_value - self.min_value) / self.data_points
)
def initiate_chart(self, dashboard_cls):
"""
initiate chart on dashboard creation
"""
self.source = dashboard_cls._cuxfilter_df.data
if self.x_dtype not in CUDF_DATETIME_TYPES:
raise TypeError(
"DateRangeSlider: x-column type must be one of "
+ str(CUDF_DATETIME_TYPES)
)
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
if self.data_points is None:
_series = dashboard_cls._cuxfilter_df.data[self.x].value_counts()
self.data_points = (
_series.compute().shape[0]
if isinstance(_series, dask_cudf.core.Series)
else _series.shape[0]
)
del _series
self.compute_stride()
self.generate_widget()
self.add_events(dashboard_cls)
def generate_widget(self):
"""
generate widget range slider
"""
self.chart = pn.widgets.DateRangeSlider(
start=self.min_value,
end=self.max_value,
value=(self.min_value, self.max_value),
width=self.width,
sizing_mode="scale_width",
**self.params,
)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
# interactive slider
self.datatile_active_color = theme.datatile_active_color
def add_events(self, dashboard_cls):
"""
add events
"""
def widget_callback(event):
if dashboard_cls._active_view != self.name:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles()
query_tuple = self._xaxis_np_dt64_transform(event.new)
dashboard_cls._query_datatiles_by_range(query_tuple)
# add callback to filter_Widget on value change
self.chart.param.watch(widget_callback, ["value"], onlychanged=False)
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
compute query value
Parameters:
-----------
query_dict:
reference to dashboard.__cls__.query_dict
"""
if self.chart.value != (self.chart.start, self.chart.end):
min_temp, max_temp = self.chart.value
query = f"@{self.x}_min<={self.x}<=@{self.x}_max"
query_str_dict[self.name] = query
query_local_variables_dict[self.x + "_min"] = min_temp
query_local_variables_dict[self.x + "_max"] = max_temp
else:
query_str_dict.pop(self.name, None)
query_local_variables_dict.pop(self.x + "_min", None)
query_local_variables_dict.pop(self.x + "_max", None)
class IntSlider(BaseWidget):
_datatile_loaded_state: bool = False
value = None
datatile_active_color = DATATILE_ACTIVE_COLOR
@property
def datatile_loaded_state(self):
return self._datatile_loaded_state
@datatile_loaded_state.setter
def datatile_loaded_state(self, state: bool):
self._datatile_loaded_state = state
if state:
self.chart.bar_color = self.datatile_active_color
else:
self.chart.bar_color = DATATILE_INACTIVE_COLOR
def initiate_chart(self, dashboard_cls):
"""
initiate chart on dashboard creation
"""
min, max = get_min_max(dashboard_cls._cuxfilter_df.data, self.x)
self.min_value = int(min)
self.max_value = int(max)
self.generate_widget()
self.add_events(dashboard_cls)
def generate_widget(self):
"""
generate widget int slider
"""
if self.value is None:
self.value = self.min_value
if self.stride is None:
self.chart = pn.widgets.IntSlider(
start=self.min_value,
end=self.max_value,
value=self.value,
step=self.stride,
width=self.width,
height=self.height,
**self.params,
)
self.stride = self.chart.step
else:
self.chart = pn.widgets.IntSlider(
start=self.min_value,
end=self.max_value,
value=self.value,
width=self.width,
height=self.height,
**self.params,
)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
# interactive slider
self.datatile_active_color = theme.datatile_active_color
def add_events(self, dashboard_cls):
"""
add events
"""
def widget_callback(event):
if dashboard_cls._active_view != self.name:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles()
dashboard_cls._query_datatiles_by_indices([], [event.new])
# add callback to filter_Widget on value change
self.chart.param.watch(widget_callback, ["value"], onlychanged=False)
# self.add_reset_event(dashboard_cls)
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
compute query value
Parameters:
-----------
query_dict:
reference to dashboard.__cls__.query_dict
"""
if len(str(self.chart.value)) > 0:
query = f"{self.x} == @{self.x}_value"
query_str_dict[self.name] = query
query_local_variables_dict[self.x + "_value"] = self.chart.value
else:
query_str_dict.pop(self.name, None)
query_local_variables_dict.pop(self.x + "_value", None)
class FloatSlider(BaseWidget):
_datatile_loaded_state: bool = False
value = None
datatile_active_color = DATATILE_ACTIVE_COLOR
@property
def datatile_loaded_state(self):
return self._datatile_loaded_state
@datatile_loaded_state.setter
def datatile_loaded_state(self, state: bool):
self._datatile_loaded_state = state
if state:
self.chart.bar_color = self.datatile_active_color
else:
self.chart.bar_color = DATATILE_INACTIVE_COLOR
def initiate_chart(self, dashboard_cls):
"""
initiate chart on dashboard creation
"""
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
self.generate_widget()
self.add_events(dashboard_cls)
def generate_widget(self):
"""
generate widget float slider
"""
if self.value is None:
self.value = self.min_value
if self.stride is None:
self.chart = pn.widgets.FloatSlider(
start=self.min_value,
end=self.max_value,
value=self.value,
width=self.width,
height=self.height,
**self.params,
)
self.stride = self.chart.step
else:
self.chart = pn.widgets.FloatSlider(
start=self.min_value,
end=self.max_value,
value=self.value,
step=self.stride,
width=self.width,
height=self.height,
**self.params,
)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
# interactive slider
self.datatile_active_color = theme.datatile_active_color
def add_events(self, dashboard_cls):
"""
add events
"""
def widget_callback(event):
if dashboard_cls._active_view != self.name:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles(cumsum=False)
dashboard_cls._query_datatiles_by_indices([], [event.new])
# add callback to filter_Widget on value change
self.chart.param.watch(widget_callback, ["value"], onlychanged=False)
# self.add_reset_event(dashboard_cls)
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
compute query value
Parameters:
-----------
query_dict:
reference to dashboard.__cls__.query_dict
"""
if len(str(self.chart.value)) > 0:
query = f"{self.x} == @{self.x}_value"
query_str_dict[self.name] = query
query_local_variables_dict[self.x + "_value"] = self.chart.value
else:
query_str_dict.pop(self.name, None)
query_local_variables_dict.pop(self.x + "_value", None)
class DropDown(BaseWidget):
value = None
def initiate_chart(self, dashboard_cls):
"""
initiate chart on dashboard creation
"""
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
if self.stride is None:
if self.max_value < 1 and self.stride_type == int:
self.stride_type = float
self.stride = self.stride_type(1)
self.calc_list_of_values(dashboard_cls._cuxfilter_df.data)
self.generate_widget()
self.add_events(dashboard_cls)
def calc_list_of_values(self, data):
"""
calculate unique list of values to be included in the drop down menu
"""
if self.label_map is None:
self.list_of_values = data[self.x].unique()
if isinstance(data, dask_cudf.core.DataFrame):
self.list_of_values = self.list_of_values.compute()
self.list_of_values = self.list_of_values.to_pandas().tolist()
# if len(self.list_of_values) > self.data_points:
# self.list_of_values = aggregated_column_unique(self, data)
if len(self.list_of_values) > 500:
print(
"""It is not recommended to use a column with
so many different values for dropdown menu"""
)
self.list_of_values.append("")
self.data_points = len(self.list_of_values) - 1
else:
self.list_of_values = self.label_map
self.list_of_values[""] = ""
self.data_points = len(self.list_of_values.items()) - 1
self.data_points = len(self.list_of_values) - 1
def generate_widget(self):
"""
generate widget dropdown
"""
self.chart = pn.widgets.Select(
options=self.list_of_values,
value="",
width=self.width,
height=self.height,
**self.params,
)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
pass
def add_events(self, dashboard_cls):
"""
add events
"""
def widget_callback(event):
if dashboard_cls._active_view != self.name:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles(cumsum=False)
dashboard_cls._query_datatiles_by_indices([], [event.new])
# add callback to filter_Widget on value change
self.chart.param.watch(widget_callback, ["value"], onlychanged=False)
# self.add_reset_event(dashboard_cls)
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
compute query value
Parameters:
-----------
query_dict:
reference to dashboard.__cls__.query_dict
"""
if len(str(self.chart.value)) > 0:
query = f"{self.x} == @{self.x}_value"
query_str_dict[self.name] = query
query_local_variables_dict[self.x + "_value"] = self.chart.value
else:
query_str_dict.pop(self.name, None)
query_local_variables_dict.pop(self.x + "_value", None)
class MultiSelect(BaseWidget):
value = None
def initiate_chart(self, dashboard_cls):
"""
initiate chart on dashboard creation
"""
self.min_value, self.max_value = get_min_max(
dashboard_cls._cuxfilter_df.data, self.x
)
if self.stride is None:
if self.max_value < 1 and self.stride_type == int:
self.stride_type = float
self.stride = self.stride_type(1)
self.calc_list_of_values(dashboard_cls._cuxfilter_df.data)
self.generate_widget()
self.add_events(dashboard_cls)
def calc_list_of_values(self, data):
"""
calculate unique list of values to be included in the multiselect menu
"""
if self.label_map is None:
self.list_of_values = data[self.x].unique()
if isinstance(data, dask_cudf.core.DataFrame):
self.list_of_values = self.list_of_values.compute()
self.list_of_values = self.list_of_values.to_pandas().tolist()
# if len(self.list_of_values) > self.data_points:
# self.list_of_values = aggregated_column_unique(self, data)
if len(self.list_of_values) > 500:
print(
"""It is not recommended to use a column with
so many different values for multiselect menu"""
)
self.list_of_values.append("")
self.data_points = len(self.list_of_values) - 1
else:
self.list_of_values = self.label_map
self.list_of_values[""] = ""
self.data_points = len(self.list_of_values.items()) - 1
def generate_widget(self):
"""
generate widget multiselect
"""
self.chart = pn.widgets.MultiSelect(
options=self.list_of_values,
value=[""],
width=self.width,
height=self.height,
**self.params,
)
def apply_theme(self, theme):
"""
apply thematic changes to the chart based on the theme
"""
pass
def add_events(self, dashboard_cls):
"""
add events
"""
def widget_callback(event):
if dashboard_cls._active_view != self.name:
dashboard_cls._reset_current_view(new_active_view=self)
dashboard_cls._calc_data_tiles(cumsum=False)
dashboard_cls._query_datatiles_by_indices(event.old, event.new)
# add callback to filter_Widget on value change
self.chart.param.watch(widget_callback, ["value"], onlychanged=False)
# self.add_reset_event(dashboard_cls)
def compute_query_dict(self, query_str_dict, query_local_variables_dict):
"""
compute query value
Parameters:
-----------
query_dict:
reference to dashboard.__cls__.query_dict
"""
if len(self.chart.value) == 0 or self.chart.value == [""]:
query_str_dict.pop(self.name, None)
elif len(self.chart.value) == 1:
query_str_dict[self.name] = f"{self.x}=={self.chart.value[0]}"
else:
indices_string | |
(eq, ":i_team", 0),
(assign, ":old_cur_y", ":cur_y"),
(try_end),
(try_end),
(try_begin),
(le, ":old_cur_y", ":cur_y"),
(assign, ":cur_y", ":old_cur_y"),
(try_end),
(assign, ":cur_x", 42),
#white line between playing players and spectators
(create_mesh_overlay, reg0, "mesh_white_plane"),
(overlay_set_color, reg0, 0xFFFFFF),
(overlay_set_alpha, reg0, 0xD0),
(store_add, ":sub_cur_x", ":cur_x", 0),
(position_set_x, pos1, ":sub_cur_x"),
(store_add, ":sub_cur_y", ":cur_y", 10),
(position_set_y, pos1, ":sub_cur_y"),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 36000),
(position_set_y, pos1, 50),
(overlay_set_size, reg0, pos1),
(try_begin),
(gt, ":spectator_rows", 0),
(assign, ":cur_x", 280),
(val_sub, ":cur_y", 50),
#"spectators" text
(create_text_overlay, reg0, "str_spectators", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, ":cur_x"),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(create_text_overlay, reg0, "str_ping", tf_right_align),
(overlay_set_color, reg0, 0xFFFFFF),
(store_add, ":sub_cur_x", ":cur_x", 215), #200
(position_set_x, pos1, ":sub_cur_x"),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 750),
(position_set_y, pos1, 750),
(overlay_set_size, reg0, pos1),
#white line for spectators list
(create_mesh_overlay, reg0, "mesh_white_plane"),
(overlay_set_color, reg0, 0xFFFFFF),
(overlay_set_alpha, reg0, 0xD0),
(store_add, ":sub_cur_x", ":cur_x", 0),
(position_set_x, pos1, ":sub_cur_x"),
(store_add, ":sub_cur_y", ":cur_y", -10),
(position_set_y, pos1, ":sub_cur_y"),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 12000),
(position_set_y, pos1, 50),
(overlay_set_size, reg0, pos1),
(val_sub, ":cur_y", 30),
(assign, ":font_color", 0xC0C0C0),
(store_add, ":end_cond", ":num_players", 1),
(try_for_range, ":player_no", 0, ":end_cond"),
(store_add, ":slot_index", ":player_no", multi_data_player_index_list_begin),
(troop_slot_eq, "trp_multiplayer_data", ":slot_index", 1),
(player_get_team_no, ":player_team", ":player_no"),
(eq, ":player_team", multi_team_spectator), #to not to allow dedicated server to pass below, dedicated servers have -1 for team_no not 2(multi_team_spectator).
(troop_set_slot, "trp_multiplayer_data", ":slot_index", 1),
(try_begin),
(eq, ":my_player_no", ":player_no"),
(create_mesh_overlay, reg0, "mesh_white_plane"),
(overlay_set_color, reg0, 0xFFFFFF),
(overlay_set_alpha, reg0, 0x35),
(store_add, ":sub_cur_x", ":cur_x", 0),
(position_set_x, pos1, ":sub_cur_x"),
(store_add, ":sub_cur_y", ":cur_y", 0),
(position_set_y, pos1, ":sub_cur_y"),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 12000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(try_end),
(str_store_player_username, s1, ":player_no"),
(create_text_overlay, reg0, s1, 0),
(overlay_set_color, reg0, ":font_color"),
(position_set_x, pos1, 750),
(position_set_y, pos1, 750),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, ":cur_x"),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(player_get_ping, reg0, ":player_no"),
(create_text_overlay, reg0, "str_reg0", tf_right_align),
(overlay_set_color, reg0, ":font_color"),
(position_set_x, pos1, 750),
(position_set_y, pos1, 750),
(overlay_set_size, reg0, pos1),
(store_add, ":sub_cur_x", ":cur_x", 215), #200
(position_set_x, pos1, ":sub_cur_x"),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", 20),
(try_end),
(try_end),
(omit_key_once, key_mouse_scroll_up),
(omit_key_once, key_mouse_scroll_down),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_mouse_scroll_up),
(key_clicked, key_mouse_scroll_down),
(omit_key_once, key_mouse_scroll_up),
(omit_key_once, key_mouse_scroll_down),
(try_end),
(try_begin),
(eq, "$g_multiplayer_stats_chart_opened_manually", 1),
(neg|game_key_is_down, gk_leave),
(assign, "$g_multiplayer_stats_chart_opened_manually", 0),
(clear_omitted_keys),
(presentation_set_duration, 0),
(try_end),
(try_begin),
(store_mul, ":update_period_time_limit", "$g_stats_chart_update_period", 1000),
(gt, ":cur_time", ":update_period_time_limit"),
(clear_omitted_keys),
(presentation_set_duration, 0),
(start_presentation, "prsnt_multiplayer_stats_chart"),
(try_end),
]),
]),
("multiplayer_escape_menu", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 80),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_escape_menu_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 75),
(overlay_set_position, "$g_presentation_obj_escape_menu_container", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 550),
(overlay_set_area_size, "$g_presentation_obj_escape_menu_container", pos1),
(set_container_overlay, "$g_presentation_obj_escape_menu_container"),
(assign, ":cur_y", 500),
(create_text_overlay, reg0, "str_choose_an_option", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
# (create_button_overlay, "$g_presentation_obj_escape_menu_1", "str_choose_faction", 0),
# (overlay_set_color, "$g_presentation_obj_escape_menu_1", 0xFFFFFF),
# (multiplayer_get_my_team, ":my_team"),
# (assign, "$g_presentation_obj_escape_menu_2", -1),
# (assign, "$g_presentation_obj_escape_menu_3", -1),
# (assign, "$g_presentation_obj_escape_menu_6", -1),
# (assign, "$g_presentation_obj_escape_menu_7", -1),
# (assign, "$g_presentation_obj_escape_menu_8", -1),
# (assign, "$g_presentation_obj_escape_menu_9", -1),
# (assign, "$g_presentation_obj_escape_menu_10", -1),
# (assign, "$g_presentation_obj_escape_menu_11", -1),
# (assign, "$g_presentation_obj_escape_menu_12", -1),
# (assign, "$g_presentation_obj_escape_menu_13", -1),
# (try_begin),
# (lt, ":my_team", multi_team_spectator),
# (create_button_overlay, "$g_presentation_obj_escape_menu_2", "str_choose_troop", 0),
# (overlay_set_color, "$g_presentation_obj_escape_menu_2", 0xFFFFFF),
# (multiplayer_get_my_troop, ":my_troop"),
# (try_begin),
# (ge, ":my_troop", 0),
# (create_button_overlay, "$g_presentation_obj_escape_menu_3", "str_choose_items", 0),
# (overlay_set_color, "$g_presentation_obj_escape_menu_3", 0xFFFFFF),
# (try_end),
# (try_end),
(create_button_overlay, "$g_presentation_obj_escape_menu_4", "str_options", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_4", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_5", "str_redefine_keys", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_5", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_13", "@Show game rules", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_13", 0xFFFFFF),
(multiplayer_get_my_player, ":my_player_no"),
(try_begin),
(this_or_next|eq, "$g_multiplayer_maps_voteable", 1),
(this_or_next|eq, "$g_multiplayer_factions_voteable", 1),
(this_or_next|gt, "$g_multiplayer_num_bots_voteable", 0),
(this_or_next|eq, "$g_multiplayer_kick_voteable", 1),
(eq, "$g_multiplayer_ban_voteable", 1),
(create_button_overlay, "$g_presentation_obj_escape_menu_6", "str_submit_a_poll", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_6", 0xFFFFFF),
(assign, "$g_presentation_obj_escape_menu_6_available", 1),
(try_begin),
(ge, ":my_player_no", 0),
(player_get_slot, ":last_poll_time", ":my_player_no", slot_player_poll_disabled_until_time),
(store_mission_timer_a, ":mission_timer"),
(lt, ":mission_timer", ":last_poll_time"),
(overlay_set_color, "$g_presentation_obj_escape_menu_6", 0x888888),
(overlay_set_hilight_color, "$g_presentation_obj_escape_menu_6", 0x888888),
(assign, "$g_presentation_obj_escape_menu_6_available", 0),
(try_end),
(try_end),
(try_begin),
(ge, ":my_player_no", 0),
(player_is_admin, ":my_player_no"),
(create_button_overlay, "$g_presentation_obj_escape_menu_7", "str_administrator_panel", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_7", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_8", "str_kick_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_8", 0xFFFFFF),
(create_button_overlay, "$g_presentation_obj_escape_menu_9", "str_ban_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_9", 0xFFFFFF),
(try_end),
(create_button_overlay, "$g_presentation_obj_escape_menu_11", "str_mute_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_11", 0xFFFFFF),
(try_begin),
(assign, "$g_presentation_obj_escape_menu_12", -1),
(assign, ":any_muted", 0),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(player_get_is_muted, ":is_muted", ":player_no"),
(eq, ":is_muted", 1),
(assign, ":any_muted", 1),
(try_end),
(eq, ":any_muted", 1),
(create_button_overlay, "$g_presentation_obj_escape_menu_12", "str_unmute_player", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_12", 0xFFFFFF),
(try_end),
(create_button_overlay, "$g_presentation_obj_escape_menu_10", "str_quit", 0),
(overlay_set_color, "$g_presentation_obj_escape_menu_10", 0xFFFFFF),
(position_set_x, pos1, 130),
(position_set_y, pos1, ":cur_y"),
# (overlay_set_position, "$g_presentation_obj_escape_menu_1", pos1),
# (try_begin),
# (ge, "$g_presentation_obj_escape_menu_2", 0),
# (val_sub, ":cur_y", escape_menu_item_height),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, "$g_presentation_obj_escape_menu_2", pos1),
# (try_end),
# (try_begin),
# (ge, "$g_presentation_obj_escape_menu_3", 0),
# (val_sub, ":cur_y", escape_menu_item_height),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, "$g_presentation_obj_escape_menu_3", pos1),
# (try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_4", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_5", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_13", pos1),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_6", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_6", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_7", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_7", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_8", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_8", pos1),
(try_end),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_9", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_9", pos1),
(try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_11", pos1),
(try_begin),
(ge, "$g_presentation_obj_escape_menu_12", 0),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_12", pos1),
(try_end),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_escape_menu_10", pos1),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(try_begin),
# (eq, ":object", "$g_presentation_obj_escape_menu_1"),
# (presentation_set_duration, 0),
# (start_presentation, "prsnt_multiplayer_team_select"),
# (else_try),
# (eq, ":object", "$g_presentation_obj_escape_menu_2"),
# (presentation_set_duration, 0),
# (start_presentation, "prsnt_multiplayer_troop_select"),
# (else_try),
# (eq, ":object", "$g_presentation_obj_escape_menu_3"),
# (presentation_set_duration, 0),
# (assign, "$g_presentation_state", 0),
# (start_presentation, "prsnt_multiplayer_item_select"),
# (else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_4"),
(presentation_set_duration, 0),
(change_screen_options),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_5"),
(presentation_set_duration, 0),
(change_screen_controls),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_6"),
(eq, "$g_presentation_obj_escape_menu_6_available", 1),
(presentation_set_duration, 0),
(start_presentation, "prsnt_multiplayer_poll_menu"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_7"),
(presentation_set_duration, 0),
(multiplayer_send_message_to_server, multiplayer_event_open_admin_panel),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_8"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 3), #admin kick
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_9"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 4), #admin ban
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_10"),
(presentation_set_duration, 0),
(finish_mission, 0),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_11"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 5), #mute player
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_12"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 6), #unmute player
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_escape_menu_13"),
(presentation_set_duration, 0),
(multiplayer_send_message_to_server, multiplayer_event_open_game_rules),
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_escape),
(key_clicked, key_xbox_start),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
]),
]),
("multiplayer_poll_menu", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 80),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_poll_menu_container", s0, tf_scrollable_style_2),
(position_set_x, pos1, 285),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_poll_menu_container", pos1),
(position_set_x, pos1, 405),
(position_set_y, pos1, 500),
(overlay_set_area_size, "$g_presentation_obj_poll_menu_container", pos1),
(set_container_overlay, "$g_presentation_obj_poll_menu_container"),
(assign, "$g_presentation_obj_poll_menu_1", -1),
(assign, "$g_presentation_obj_poll_menu_4", -1),
(assign, "$g_presentation_obj_poll_menu_5", -1),
(assign, ":cur_y", 450),
(create_text_overlay, reg0, "str_choose_a_poll_type", 0),
(overlay_set_color, reg0, 0xFFFFFF),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(position_set_x, pos1, 60),
(try_begin),
(eq, "$g_multiplayer_maps_voteable", 1),
(create_button_overlay, "$g_presentation_obj_poll_menu_1", "str_poll_for_changing_the_map", 0),
(overlay_set_color, "$g_presentation_obj_poll_menu_1", 0xFFFFFF),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_poll_menu_1", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(try_begin),
(eq, "$g_multiplayer_factions_voteable", 1),
(create_button_overlay, "$g_presentation_obj_poll_menu_4", "str_poll_for_changing_the_map_and_factions", 0),
(overlay_set_color, "$g_presentation_obj_poll_menu_4", 0xFFFFFF),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_poll_menu_4", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(try_begin),
(gt, "$g_multiplayer_num_bots_voteable", 0),
(create_button_overlay, "$g_presentation_obj_poll_menu_5", "str_poll_for_changing_number_of_bots", 0),
(overlay_set_color, "$g_presentation_obj_poll_menu_5", 0xFFFFFF),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_poll_menu_5", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(try_begin),
(eq, "$g_multiplayer_kick_voteable", 1),
(create_button_overlay, "$g_presentation_obj_poll_menu_2", "str_poll_for_kicking_a_player", 0),
(overlay_set_color, "$g_presentation_obj_poll_menu_2", 0xFFFFFF),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_poll_menu_2", pos1),
(val_sub, ":cur_y", escape_menu_item_height),
(try_end),
(try_begin),
(eq, "$g_multiplayer_ban_voteable", 1),
(create_button_overlay, "$g_presentation_obj_poll_menu_3", "str_poll_for_banning_a_player", 0),
(overlay_set_color, "$g_presentation_obj_poll_menu_3", 0xFFFFFF),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_poll_menu_3", pos1),
(try_end),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_event_state_change,
[(store_trigger_param_1, ":object"),
(try_begin),
(eq, ":object", "$g_presentation_obj_poll_menu_1"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_maps_list_action_type", 1), #poll map
(start_presentation, "prsnt_multiplayer_show_maps_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_poll_menu_2"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 1), #poll kick
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_poll_menu_3"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_players_list_action_type", 2), #poll ban
(start_presentation, "prsnt_multiplayer_show_players_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_poll_menu_4"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_maps_list_action_type", 2), #poll map and factions
(start_presentation, "prsnt_multiplayer_show_maps_list"),
(else_try),
(eq, ":object", "$g_presentation_obj_poll_menu_5"),
(presentation_set_duration, 0),
(assign, "$g_multiplayer_number_of_bots_list_action_type", 1), #for team 1
(start_presentation, "prsnt_multiplayer_show_number_of_bots_list"),
(try_end),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_escape),
(key_clicked, key_xbox_start),
(gt, ":cur_time", 200),
(presentation_set_duration, 0),
(try_end),
]),
]),
("multiplayer_show_players_list", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(create_mesh_overlay, reg0, "mesh_mp_ingame_menu"),
(position_set_x, pos1, 250),
(position_set_y, pos1, 80),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1000),
(position_set_y, pos1, 1000),
| |
presence.org.id Id of organization
* @apiSuccess {Integer} presence.org.name Name of organization
* @apiSuccess {Object} tfidf
* @apiSuccess {date} tfidf.created_at When was this data created?
* @apiSuccess {date} tfidf.created_for For when was this data created?
* @apiSuccess {Object} tfidf.session object
* @apiSuccess {String} tfidf.session.name Name of session.
* @apiSuccess {Date} tfidf.session.date_ts Date and time of session.
* @apiSuccess {Date} tfidf.session.date Date of session.
* @apiSuccess {Integer} tfidf.session.id Id of session.
* @apiSuccess {Boolean} tfidf.session.in_review Return true or false if session is in review.
* @apiSuccess {Object[]} tfidf.session.orgs Organization object
* @apiSuccess {String} tfidf.session.orgs.acronym Organization acronym
* @apiSuccess {Boolean} tfidf.session.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} tfidf.session.orgs.id Id of organization
* @apiSuccess {Integer} tfidf.session.orgs.name Name of organization
* @apiSuccess {Object[]} tfidf.results
* @apiSuccess {String} tfidf.results.term Term that is analyzed.
* @apiSuccess {Object} tfidf.results.scores Scores of TFIDF
* @apiSuccess {Integer} tfidf.results.scores.tf Term frequency
* @apiSuccess {Integer} tfidf.results.scores.df Document frequency
* @apiSuccess {Integer} tfidf.results.scores.tf-idf Term frequency / Document frequency
* @apiSuccess {Object} session object
* @apiSuccess {String} session.name Name of session.
* @apiSuccess {Date} session.date_ts Date and time of session.
* @apiSuccess {Date} session.date Date of session.
* @apiSuccess {Integer} session.id Id of session.
* @apiSuccess {Boolean} session.in_review Return true or false if session is in review.
* @apiSuccess {Object[]} session.orgs Organization object
* @apiSuccess {String} session.orgs.acronym Organization acronym
* @apiSuccess {Boolean} session.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} session.orgs.id Id of organization
* @apiSuccess {String} session.orgs.name Name of organization
* @apiSuccess {Object[]} motion
* @apiSuccess {Object} motion.results.session object
* @apiSuccess {String} motion.results.session.name Name of session.
* @apiSuccess {Date} motion.results.session.date_ts Date and time of session.
* @apiSuccess {Date} motion.results.session.date Date of session.
* @apiSuccess {Integer} motion.results.session.id Id of session.
* @apiSuccess {Boolean} motion.results.session.in_review Return true or false if session is in review.
* @apiSuccess {Object[]} motion.results.session.orgs Organization object
* @apiSuccess {String} motion.results.session.orgs.acronym Organization acronym
* @apiSuccess {Boolean} motion.results.session.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} motion.results.session.orgs.id Id of organization
* @apiSuccess {Integer} motion.results.session.orgs.name Name of organization
* @apiSuccess {Integer} motion.results.results IDs of all speeches on session.
* @apiSuccess {Object} motion.results.results object
* @apiSuccess {Integer} motion.results.abstain Number of MPs that abstain on voting.
* @apiSuccess {Integer} motion.results.against Number of MPs that are against on voting.
* @apiSuccess {Integer} motion.results.motion_id ID of motion.
* @apiSuccess {String} motion.results.text Text of motion
* @apiSuccess {String[]} motion.results.tags Array of tags of motion.
* @apiSuccess {Boolean} motion.results.is_outlier Analaysis if person is outlier.
* @apiSuccess {Integer} motion.results.not_present Number of MPs that were not present.
* @apiSuccess {Integer} motion.results.votes_for Number of MPs that voted with yes.
* @apiSuccess {Boolean} motion.results.result True or False if the motion was successful.
* @apiSuccess {String[]} motion.results.tags Array of tags of motion.
* @apiExample {curl} Example:
curl -i https://analize.parlameter.si/v1/s/getLastSessionLanding
* @apiExample {curl} Example with date:
curl -i https://analize.parlameter.si/v1/s/getLastSessionLanding/21.12.2016
* @apiSuccessExample {json} Example response:
{
"created_for": "20.03.2017",
"presence": [
{
"org": {
"acronym": "PS NP",
"is_coalition": false,
"id": 109,
"name": "PS nepovezanih poslancev "
},
"percent": 100
},
{
"org": {
"acronym": "SMC",
"is_coalition": true,
"id": 1,
"name": "PS Stranka modernega centra"
},
"percent": 99
},
...
"created_at": "16.04.2017",
"tfidf": {
"session": {
"name": "28. red<NAME>",
"date_ts": "2017-03-20T01:00:00",
"org": {
"acronym": "DZ",
"is_coalition": false,
"name": "<NAME>",
"id": 95
},
"date": "20. 3. 2017",
"orgs": [
{
"acronym": "DZ",
"is_coalition": false,
"name": "<NAME>",
"id": 95
}
],
"id": 9379,
"in_review": true
},
"session": {
"name": "<NAME>",
"date_ts": "2017-03-20T01:00:00",
"orgs": [
{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "<NAME>"
}
],
"date": "20. 3. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "<NAME>"
},
"id": 9379,
"in_review": true
},
"motions": [
{
"session": {
"name": "<NAME>",
"date_ts": "2017-03-20T01:00:00",
"org": {
"acronym": "DZ",
"is_coalition": false,
"name": "<NAME>",
"id": 95
},
"date": "20. 3. 2017",
"orgs": [
{
"acronym": "DZ",
"is_coalition": false,
"name": "<NAME>",
"id": 95
}
],
"id": 9379,
"in_review": true
},
"results": {
"abstain": 0,
"tags": [
"Proceduralna glasovanja"
],
"text": "Dnevni red v celoti",
"motion_id": 6900,
"against": 1,
"votes_for": 83,
"is_outlier": true,
"not_present": 6,
"result": true
}
}
}
"""
if date_:
fdate = datetime.strptime(date_, API_DATE_FORMAT).date()
else:
fdate = datetime.now().today()
ready = False
presences = PresenceOfPG.objects.filter(created_for__lte=fdate).order_by("-created_for")
if not presences:
raise Http404("Nismo našli kartice")
presence_index = 0
motions = None
presence = None
while not ready:
print presence_index
presence = presences[presence_index]
motions = json.loads(getMotionOfSession(None, presence.session.id_parladata).content)
if type(motions) == dict:
if "results" in motions.keys():
tfidf = json.loads(getTFIDF(None, presence.session.id_parladata).content)
if tfidf["results"]:
ready = True
else:
presence_index += 1
else:
presence_index += 1
results = [{"org": Organization.objects.get(id_parladata=p).getOrganizationData(),
"percent": presence.presence[0][p]} for p in presence.presence[0]]
result = sorted(results, key=lambda k: k['percent'], reverse=True)
session = Session.objects.get(id_parladata=int(presence.session.id_parladata))
return JsonResponse({"session": session.getSessionData(),
"created_for": session.start_time.strftime(API_DATE_FORMAT),
"created_at": datetime.today().strftime(API_DATE_FORMAT),
"presence": result,
"motions": motions["results"],
"tfidf": tfidf}, safe=False)
def getSessionsByClassification(request):
"""
* @api {get} /getSessionsByClassification/ All sessions grouped by classification
* @apiName getSessionsByClassification
* @apiGroup Session
* @apiSuccess {Object[]} kolegij Classification of session
* @apiSuccess {String} kolegij.name Name of session.
* @apiSuccess {Date} kolegij.date_ts Date and time of session.
* @apiSuccess {Date} kolegij.date Date of session.
* @apiSuccess {Integer} kolegij.id Id of session.
* @apiSuccess {Boolean} kolegij.in_review Returns true or false if session is in review.
* @apiSuccess {Boolean} kolegij.votes Returns true or false if session has votes.
* @apiSuccess {Boolean} kolegij.speeches Returns true or false if session has speeches.
* @apiSuccess {Object[]} kolegij.orgs Organization object
* @apiSuccess {String} kolegij.orgs.acronym Organization acronym
* @apiSuccess {Boolean} kolegij.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} kolegij.orgs.id Id of organization
* @apiSuccess {Integer} kolegij.orgs.name Name of organization
* @apiSuccess {Object[]} dt Classification of session
* @apiSuccess {String} dt.name Name of session.
* @apiSuccess {Date} dt.date_ts Date and time of session.
* @apiSuccess {Date} dt.date Date of session.
* @apiSuccess {Integer} dt.id Id of session.
* @apiSuccess {Boolean} dt.in_review Returns true or false if session is in review.
* @apiSuccess {Boolean} dt.votes Returns true or false f session has votes.
* @apiSuccess {Boolean} dt.speeches Returns true or false if session has speeches.
* @apiSuccess {Object[]} dt.orgs Organization object
* @apiSuccess {String} dt.orgs.acronym Organization acronym
* @apiSuccess {Boolean} dt.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} dt.orgs.id Id of organization
* @apiSuccess {Integer} dt.orgs.name Name of organization
* @apiSuccess {Object[]} dz Classification of session
* @apiSuccess {String} dz.name Name of session.
* @apiSuccess {Date} dz.date_ts Date and time of session.
* @apiSuccess {Date} dz.date Date of session.
* @apiSuccess {Integer} dz.id Id of session.
* @apiSuccess {Boolean} dz.in_review Returns true or false if session is in review.
* @apiSuccess {Boolean} dz.votes Returns true or false f session has votes.
* @apiSuccess {Boolean} dz.speeches Returns true or false if session has speeches.
* @apiSuccess {Object[]} dz.orgs Organization object
* @apiSuccess {String} dz.orgs.acronym Organization acronym
* @apiSuccess {Boolean} dz.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} dz.orgs.id Id of organization
* @apiSuccess {Integer} dz.orgs.name Name of organization
* @apiExample {curl} Example:
curl -i https://analize.parlameter.si/v1/s/getSessionsByClassification
* @apiSuccessExample {json} Example response:
{
"kolegij": [
{
"votes": false,
"name": "<NAME>",
"date_ts": "2017-04-13T02:00:00",
"speeches": true,
"orgs": [
{
"acronym": "",
"is_coalition": false,
"id": 9,
"name": "<NAME>"
}
],
"date": "13. 4. 2017",
"org": {
"acronym": "",
"is_coalition": false,
"id": 9,
"name": "<NAME>"
},
"id": 9419,
"in_review": true
},
"dt": [
{
"acronym": "",
"sessions": [
{
"votes": false,
"name": "<NAME>",
"date_ts": "2017-03-31T02:00:00",
"speeches": false,
"orgs": [
{
"acronym": "",
"is_coalition": false,
"id": 101,
"name": "Preiskovalna komisija za ugotavljanje politične odgovornosti nosilcev javnih funkcij pri investiciji v blok 6 Termoelektrarne Šoštanj"
}
],
"date": "31. 3. 2017",
"org": {
"acronym": "",
"is_coalition": false,
"id": 101,
"name": "Preiskovalna komisija za ugotavljanje politične odgovornosti nosilcev javnih funkcij pri investiciji v blok 6 Termoelektrarne Šoštanj"
},
"id": 9397,
"in_review": false
},
"dz": [
{
"votes": true,
"name": "<NAME>",
"date_ts": "2017-03-20T01:00:00",
"speeches": true,
"orgs": [
{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
| |
14],
282: [102, 24, 0, 254, 240, 192, 254, 0],
283: [102, 24, 0, 60, 126, 96, 60, 0],
284: [24, 102, 0, 62, 96, 102, 62, 0],
285: [24, 102, 0, 62, 102, 62, 6, 124],
286: [198, 124, 0, 62, 96, 102, 62, 0],
287: [198, 124, 0, 62, 102, 62, 6, 124],
288: [24, 0, 62, 96, 110, 102, 62, 0],
289: [24, 0, 62, 102, 102, 62, 6, 124],
290: [60, 102, 96, 110, 102, 102, 62, 112],
291: [16, 24, 0, 62, 102, 62, 6, 124],
292: [24, 102, 0, 102, 126, 102, 102, 0],
293: [204, 210, 192, 252, 198, 198, 198, 0],
294: [102, 255, 102, 126, 102, 102, 102, 0],
295: [96, 120, 96, 124, 102, 102, 102, 0],
296: [118, 220, 0, 126, 24, 24, 126, 0],
297: [118, 220, 0, 56, 24, 24, 60, 0],
298: [126, 0, 126, 24, 24, 24, 126, 0],
299: [126, 0, 56, 24, 24, 24, 60, 0],
300: [198, 124, 0, 126, 24, 24, 126, 0],
301: [198, 124, 0, 56, 24, 24, 60, 0],
302: [30, 120, 24, 24, 24, 30, 120, 14],
303: [0, 24, 0, 24, 24, 24, 12, 14],
304: [24, 0, 126, 24, 24, 24, 126, 0],
305: [0, 0, 56, 24, 24, 24, 60, 0],
306: [198, 198, 198, 198, 198, 214, 204, 0],
307: [102, 0, 102, 102, 102, 102, 102, 12],
308: [24, 102, 0, 6, 6, 6, 252, 0],
309: [24, 102, 0, 12, 12, 12, 12, 120],
310: [230, 108, 248, 108, 108, 108, 238, 24],
311: [96, 96, 124, 108, 120, 108, 110, 24],
312: [0, 0, 102, 108, 120, 108, 102, 0],
313: [14, 0, 96, 96, 96, 96, 126, 0],
314: [14, 0, 56, 24, 24, 24, 62, 0],
315: [96, 48, 48, 48, 96, 112, 222, 24],
316: [56, 24, 24, 24, 24, 24, 60, 112],
317: [102, 108, 96, 96, 96, 96, 126, 0],
318: [230, 108, 96, 96, 96, 96, 240, 0],
319: [96, 96, 96, 108, 96, 96, 126, 0],
320: [14, 0, 56, 24, 24, 24, 62, 0],
321: [48, 48, 60, 120, 240, 48, 62, 0],
322: [120, 24, 30, 60, 120, 24, 30, 0],
323: [14, 0, 198, 230, 246, 222, 206, 0],
324: [14, 0, 120, 108, 108, 108, 102, 0],
325: [230, 118, 126, 110, 102, 102, 204, 24],
326: [0, 0, 120, 108, 108, 108, 110, 24],
327: [102, 24, 0, 198, 246, 222, 198, 0],
328: [102, 24, 0, 124, 102, 102, 102, 0],
329: [0, 0, 188, 182, 54, 54, 54, 0],
330: [252, 198, 198, 198, 198, 198, 204, 24],
331: [0, 0, 124, 102, 102, 102, 102, 12],
332: [126, 0, 124, 198, 198, 198, 124, 0],
333: [126, 0, 62, 102, 102, 102, 124, 0],
334: [198, 124, 0, 124, 198, 198, 124, 0],
335: [198, 124, 62, 102, 102, 102, 124, 0],
336: [102, 204, 0, 124, 198, 198, 124, 0],
337: [102, 204, 62, 102, 102, 102, 124, 0],
338: [126, 204, 204, 206, 204, 204, 126, 0],
339: [0, 0, 108, 218, 222, 216, 118, 0],
340: [14, 0, 124, 102, 124, 102, 102, 0],
341: [14, 0, 124, 102, 96, 96, 96, 0],
342: [252, 198, 198, 252, 204, 198, 214, 48],
343: [0, 0, 124, 102, 96, 96, 96, 112],
344: [102, 24, 0, 124, 102, 124, 102, 0],
345: [102, 24, 0, 124, 102, 96, 96, 0],
346: [14, 0, 126, 192, 60, 6, 252, 0],
347: [14, 0, 62, 96, 126, 6, 124, 0],
348: [24, 102, 126, 192, 60, 6, 252, 0],
349: [24, 102, 62, 96, 126, 6, 124, 0],
350: [60, 102, 96, 60, 6, 102, 60, 112],
351: [0, 0, 62, 96, 60, 6, 124, 224],
352: [102, 24, 0, 126, 240, 30, 252, 0],
353: [102, 24, 0, 62, 120, 30, 124, 0],
354: [126, 24, 24, 24, 24, 24, 24, 56],
355: [48, 48, 124, 48, 48, 48, 28, 112],
356: [102, 24, 126, 24, 24, 24, 24, 0],
357: [6, 108, 96, 248, 96, 96, 56, 0],
358: [126, 24, 24, 60, 24, 24, 24, 0],
359: [48, 48, 124, 48, 120, 48, 28, 0],
360: [118, 220, 0, 198, 198, 198, 124, 0],
361: [118, 220, 204, 204, 204, 204, 118, 0],
362: [126, 0, 198, 198, 198, 198, 124, 0],
363: [126, 0, 204, 204, 204, 204, 118, 0],
364: [198, 124, 0, 198, 198, 198, 124, 0],
365: [198, 124, 204, 204, 204, 204, 118, 0],
366: [24, 24, 0, 198, 198, 198, 124, 0],
367: [24, 24, 0, 102, 102, 102, 62, 0],
368: [102, 204, 0, 198, 198, 198, 124, 0],
369: [102, 204, 204, 204, 204, 204, 118, 0],
370: [238, 102, 102, 198, 198, 230, 124, 14],
371: [0, 0, 204, 204, 204, 204, 118, 14],
372: [24, 102, 0, 198, 214, 254, 198, 0],
373: [24, 102, 0, 198, 214, 124, 108, 0],
374: [24, 102, 0, 102, 60, 24, 24, 0],
375: [24, 102, 0, 102, 102, 62, 6, 60],
376: [102, 0, 102, 102, 60, 24, 24, 0],
377: [14, 0, 254, 12, 56, 96, 254, 0],
378: [14, 0, 126, 12, 24, 48, 126, 0],
379: [24, 0, 254, 12, 56, 96, 254, 0],
380: [24, 0, 126, 12, 24, 48, 126, 0],
381: [102, 24, 0, 126, 12, 48, 126, 0],
382: [102, 24, 0, 126, 12, 48, 126, 0],
383: [60, 102, 96, 96, 96, 96, 96, 0],
384: [96, 248, 96, 124, 102, 102, 124, 0],
385: [124, 182, 182, 60, 54, 54, 60, 0],
386: [126, 96, 96, 124, 102, 102, 124, 0],
387: [124, 96, 124, 102, 102, 102, 124, 0],
388: [96, 224, 96, 124, 102, 102, 124, 0],
389: [96, 224, 96, 124, 102, 102, 60, 0],
390: [60, 102, 6, 6, 6, 102, 60, 0],
391: [6, 126, 192, 192, 192, 192, 126, 0],
392: [6, 12, 124, 192, 192, 192, 124, 0],
393: [120, 108, 102, 246, 102, 108, 120, 0],
394: [124, 182, 182, 54, 54, 54, 124, 0],
395: [62, 6, 6, 62, 102, 102, 62, 0],
396: [62, 6, 62, 102, 102, 102, 62, 0],
397: [0, 0, 60, 102, 102, 60, 6, 28],
398: [126, 6, 6, 62, 6, 6, 126, 0],
399: [60, 102, 6, 126, 102, 102, 60, 0],
400: [60, 102, 96, 56, 96, 102, 60, 0],
401: [126, 96, 96, 124, 96, 96, 96, 192],
402: [14, 27, 24, 60, 24, 24, 216, 112],
403: [6, 124, 192, 192, 220, 204, 124, 0],
404: [198, 198, 198, 108, 56, 108, 56, 0],
405: [192, 192, 243, 219, 219, 219, 206, 0],
407: [126, 24, 24, 126, 24, 24, 126, 0],
408: [206, 218, 216, 240, 216, 204, 204, 0],
409: [60, 96, 102, 108, 120, 108, 102, 0],
410: [120, 24, 24, 126, 24, 24, 30, 0],
411: [12, 62, 12, 124, 204, 204, 204, 0],
412: [214, 214, 214, 214, 214, 214, 126, 0],
413: [198, 230, 246, 254, 222, 206, 198, 192],
414: [0, 0, 124, 102, 102, 102, 102, 6],
415: [60, 102, 102, 126, 102, 102, 60, 0],
461: [102, 24, 0, 60, 102, 126, 102, 0],
462: [102, 24, 0, 62, 102, 198, 126, 0],
463: [102, 24, 0, 126, 24, 24, 126, 0],
464: [102, 24, 0, 56, 24, 24, 60, 0],
465: [102, 24, 0, 124, 198, 198, 124, 0],
466: [102, 24, 0, 60, 102, 102, 60, 0],
467: [102, 24, 0, 198, 198, 198, 124, 0],
468: [102, 24, 0, 102, 102, 102, 62, 0],
469: [254, 108, 0, 198, 198, 198, 124, 0],
470: | |
parameter=None):
"""Construct all the necessary attributes for the IDBPacketTree object.
Parameters
----------
children : `list`, optional
list of IDBPacketTree, by default None: will be transformed to []
counter : `int`, optional
how often this parameter is repeated, by default 1
name : `str`, optional
unique name of the parameter, by default 'top'
parameter : IDBParameter, optional
enhanced description of the parameter, by default None
"""
if children is None:
children = []
self.children = children
self.counter = counter
self.name = name
self.parameter = parameter
@property
def children(self):
"""Sequential ordered list of child Parameters (nested due to repeaters).
Returns
-------
`list`
List of `~stixcore/idb/idb/IDBPacketTree`
"""
return self._children
@children.setter
def children(self, value):
self._children = value
@property
def parameter(self):
"""Telemetry packet parameter.
Returns
-------
`~stixcore/idb/idb/IDBParameter`
enhanced description of the parameter
"""
return self._parameter
@parameter.setter
def parameter(self, value):
self._parameter = value
@property
def counter(self):
"""How often this parameter is repeated.
Returns
-------
`int`
Normally 1 only for repeaters more then 1
"""
return self._counter
@counter.setter
def counter(self, value):
self._counter = value
@property
def name(self):
"""Unique name of the parameter.
Returns
-------
`str`
Project wide unique name.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
class IDB:
"""Class provides reading functionality to a IDB (definition of TM/TC packet structures)."""
def __init__(self, filename):
"""Create the IDB reader for a given file.
Parameters
----------
filename : `str` | `pathlib.Path`
Path to the idb file
"""
self.conn = None
self.cur = None
self.parameter_structures = dict()
self.packet_info = dict()
self.parameter_units = dict()
self.calibration_polynomial = dict()
self.calibration = dict()
self.calibration_curves = dict()
self.textual_parameter_lut = dict()
self.soc_descriptions = dict()
self.parameter_descriptions = dict()
self.s2k_table_contents = dict()
self.filename = filename
logger.info(f"Creating IDB reader for: {self.filename}")
if self.filename:
self._connect_database()
def is_connected(self):
"""Is the reader connected to the IDB.
returns
-------
True | False
"""
if self.cur:
return True
return False
@property
def version(self):
"""Get the Version of the IDB.
Returns
-------
`str`
the Version label like '2.3.4' or None
"""
return self._version
def get_idb_filename(self):
"""Get the path to the connected IDB file.
returns
-------
`os.path`
the path to the IDB file
"""
return os.path.abspath(self.filename)
def _connect_database(self):
try:
# connect to the DB in read only mode
uri = self.filename.as_uri() + "?mode=ro"
if sys.version_info < (3, 7):
self.conn = sqlite3.connect(uri, check_same_thread=False, uri=True)
else:
source = sqlite3.connect(uri, check_same_thread=False, uri=True)
self.conn = sqlite3.connect(':memory:')
source.backup(self.conn)
source.close()
logger.info('IDB loaded from {}'.format(self.filename))
self.cur = self.conn.cursor()
self._version = self.get_idb_version()
except sqlite3.Error:
logger.error('Failed load IDB from {}'.format(self.filename))
self.close()
raise
def __getstate__(self):
"""Return state values to be pickled."""
return self.filename
def __setstate__(self, state):
"""Restore state from the unpickled state values."""
self.filename = state
self.parameter_structures = dict()
self.parameter_units = dict()
self.packet_info = dict()
self.calibration_polynomial = dict()
self.calibration = dict()
self.calibration_curves = dict()
self.textual_parameter_lut = dict()
self.soc_descriptions = dict()
self.parameter_descriptions = dict()
self.s2k_table_contents = dict()
if self.filename:
self._connect_database()
def close(self):
"""Close the IDB connection."""
if self.conn:
self.conn.close()
self.cur = None
else:
logger.warning("IDB connection already closed")
@classmethod
def generate_calibration_name(cls, prefix, id, suffix="TM"):
zeros = 10-len(prefix)-len(suffix)-len(str(id))
name = prefix + ("0" * zeros) + str(id) + suffix
return name, id + 1
def _execute(self, sql, arguments=None, result_type='list'):
"""Execute sql and return results in a list or a dictionary."""
if not self.cur:
raise Exception('IDB is not initialized!')
else:
if arguments:
self.cur.execute(sql, arguments)
else:
self.cur.execute(sql)
if result_type == 'list':
rows = self.cur.fetchall()
else:
rows = [
dict(
zip([column[0]
for column in self.cur.description], row))
for row in self.cur.fetchall()
]
return rows
def get_spid_info(self, spid):
"""Get SPID description.
returns
-------
(PID_DESCR, PID_TYPE, PID_STYPE)
"""
sql = 'select PID_DESCR,PID_TYPE,PID_STYPE from PID where PID_SPID=? limit 1'
return self._execute(sql, (spid, ))
def get_all_spid(self):
"""get list of all SPIDs and short description
returns
-------
(PID_SPID, PID_DESCR)
"""
sql = 'select PID_SPID, PID_DESCR from PID'
return self._execute(sql, None)
def get_scos_description(self, name):
"""get scos long description
Parameters
----------
name : ´str´
the scos_name like 'NIX00354'
Returns
-------
´str´
the long description
"""
if name in self.soc_descriptions:
return self.soc_descriptions[name]
else:
rows = self._execute(
'select SW_DESCR from sw_para where scos_name=? ', (name, ))
if rows:
res = rows[0][0]
self.soc_descriptions[name] = res
return res
logger.warning("nothing found in IDB table: sw_para")
return ''
def get_telemetry_description(self, spid):
"""Get telemetry data information.
Parameters
----------
spid : `int`
returns
-------
(SW_DESCR, tpcf_name)
"""
sql = ('select sw_para.SW_DESCR, tpcf.tpcf_name '
' from sw_para join tpcf '
'on tpcf.tpcf_name=sw_para.scos_name and tpcf.tpcf_spid= ?')
return self._execute(sql, (spid, ))
def get_packet_pi1_val_position(self, service_type, service_subtype):
"""Get offset and width for optional PI1_VAL for the packet defined by service type and
subtype.
Parameters
----------
service_type : `int`
service_subtype : `int`
returns
-------
`IDBPi1ValPosition` or None
"""
sql = ('select PIC_PI1_OFF, PIC_PI1_WID from PIC '
'where PIC_TYPE = ? and PIC_STYPE = ? and PIC_PI1_OFF >= 0 limit 1')
args = (service_type, service_subtype)
res = self._execute(sql, args, result_type='dict')
if res:
return IDBPi1ValPosition(**res[0])
return None
def get_parameter_description(self, name):
"""Get scos long description.
Parameters
----------
name : `str`
returns
-------
´str´
a long describtion
"""
if name in self.parameter_descriptions:
return self.parameter_descriptions[name]
else:
rows = self._execute('select PCF_DESCR from PCF where PCF_NAME=? ',
(name, ))
if not rows:
rows = self._execute(
'select CPC_DESCR from CPC where CPC_PNAME=? ', (name, ))
if rows:
res = rows[0][0]
self.parameter_descriptions[name] = res
return res
logger.warning("nothing found in IDB table: PCF or CPC")
return ''
def get_packet_type_info(self, packet_type, packet_subtype, pi1_val=None):
"""Identify packet type using service, service subtype and information in IDB table PID.
Parameters
----------
packet_type : `int`
packet_subtype : `int`
pi1_val : `int`
returns
-------
`IDBPacketTypeInfo` or `None` if not found
"""
if (packet_type, packet_subtype, pi1_val) in self.packet_info:
return self.packet_info[(packet_type, packet_subtype, pi1_val)]
if pi1_val is None:
sql = ('select pid_spid, pid_descr, pid_tpsd from PID '
'where PID_TYPE=? and PID_STYPE=? limit 1')
args = (packet_type, packet_subtype)
else:
sql = (
'select pid_spid, pid_descr, pid_tpsd from PID '
'where PID_TYPE=? and PID_STYPE=? and PID_PI1_VAL=? limit 1')
args = (packet_type, packet_subtype, pi1_val)
rows = self._execute(sql, args, 'dict')
if rows:
resObj = IDBPacketTypeInfo(**rows[0])
self.packet_info[(packet_type, packet_subtype, pi1_val)] = resObj
return resObj
else:
logger.warning(f"No information in IDB for service {packet_type},"
f"service_subtype {packet_subtype} and pi1_val: {pi1_val}")
return None
def get_s2k_parameter_types(self, ptc, pfc):
"""gets parameter type
Parameters
----------
ptc : `int`
the paramter
pfc : `int`
PFC_LB and PFC_UB
returns
-------
`str`
the type
"""
if (ptc, pfc) in self.s2k_table_contents:
return self.s2k_table_contents[(ptc, pfc)]
else:
sql = ('select S2K_TYPE from '
' tblConfigS2KParameterTypes where PTC = ? '
' and ? >= PFC_LB and PFC_UB >= ? limit 1')
args = (ptc, pfc, pfc)
rows = self._execute(sql, args, 'list')
if rows:
s2k_type = rows[0][0]
self.s2k_table_contents[(ptc, pfc)] = s2k_type
return s2k_type
logger.warning("nothing found in IDB table: tblConfigS2KParameterTypes")
return None
def get_telecommand_info(self, service_type, service_subtype, subtype=None):
"""get TC description for a header
Parameters
----------
service_type : `int`
service_subtype : `int`
subtype : `int`, optional
returns
--------
`dict` | `None`
"""
sql = (
'select CCF_CNAME, CCF_DESCR, CCF_DESCR2, '
' CCF_NPARS from CCF where CCF_TYPE=? and CCF_STYPE =? order by CCF_CNAME asc'
)
res = self._execute(sql, (service_type, service_subtype), 'dict')
index = 0
if len(res) > 1 and (subtype is not None):
index = subtype - 1
try:
return res[index]
except IndexError:
logger.warning("nothing found in IDB table: CCF")
return None
def get_telecommand_structure(self, name):
"""Get the structure of a telecommand by its name. The structure will be used to decode
the TC packet.
Parameters
----------
name : `str`
a structure name like 'ZIX06009'
returns
-------
tm structure
"""
sql = ('select CDF_ELTYPE, CDF_DESCR, CDF_ELLEN, CDF_BIT, '
'CDF_GRPSIZE, CDF_PNAME, CPC_DESCR, CPC_PAFREF, CPC_PTC,'
'CPC_PFC from CDF left join CPC on CDF_PNAME=CPC_PNAME'
' where CDF_CNAME=? order by CDF_BIT asc')
args = (name, )
res = self._execute(sql, args, 'dict')
return res
def is_variable_length_telecommand(self, name):
"""Determines if the TM structure is of variable length
Parameters
----------
name : `str`
a structure name like 'ZIX06009'
returns
-------
True|False
"""
sql = 'select CDF_GRPSIZE from CDF where CDF_GRPSIZE >0 and CDF_CNAME=?'
args = (name, )
rows = self._execute(sql, args, 'list')
if rows:
num_repeater = int(rows[0][0])
if num_repeater > 0:
return True
return | |
Break: %s" % (tid, repr(bp)))
cmdstr = self.bpcmds.get(bp.id, None)
if cmdstr != None:
self.onecmd(cmdstr)
else:
self.vprint("Thread: %d NOTIFY_BREAK" % tid)
if self.runagain: # One-time run-again behavior (for cli option)
if self.config.vdb.BreakOnEntry:
setupBreakOnEntry(trace)
trace.runAgain()
self.runagain = False
elif event == vtrace.NOTIFY_EXIT:
ecode = trace.getMeta('ExitCode')
self.vprint("PID %d exited: %d (0x%.8x)" % (pid,ecode,ecode))
elif event == vtrace.NOTIFY_LOAD_LIBRARY:
self.vprint("Loading Binary: %s" % trace.getMeta("LatestLibrary",None))
if self.waitlib != None:
normname = trace.getMeta('LatestLibraryNorm', None)
if self.waitlib == normname:
self.waitlib = None
trace.runAgain(False)
elif event == vtrace.NOTIFY_UNLOAD_LIBRARY:
self.vprint("Unloading Binary: %s" % trace.getMeta("LatestLibrary",None))
elif event == vtrace.NOTIFY_CREATE_THREAD:
self.vprint("New Thread: %d" % tid)
elif event == vtrace.NOTIFY_EXIT_THREAD:
ecode = trace.getMeta("ExitCode", 0)
self.vprint("Exit Thread: %d (ecode: 0x%.8x (%d))" % (tid,ecode,ecode))
elif event == vtrace.NOTIFY_DEBUG_PRINT:
s = "<unknown>"
win32 = trace.getMeta("Win32Event", None)
if win32:
s = win32.get("DebugString", "<unknown>")
self.vprint("DEBUG PRINT: %s" % s)
else:
pass
#self.vprint('unhandled event: %d' % event)
###################################################################
#
# All CLI extension commands start here
#
# FIXME this is duplicate, but... PUNT...
def do_writemem(self, args):
"""
Over-write some memory in the target address space.
Usage: writemem [options] <addr expression> <string>
-X The specified string is in hex (ie 414141 = AAA)
-U The specified string needs to be unicode in mem (AAA -> 410041004100)
"""
dohex = False
douni = False
try:
argv = e_cli.splitargs(args)
opts,args = getopt(argv, "XU")
except:
return self.do_help("writemem")
if len(args) != 2:
return self.do_help("writemem")
for opt,optarg in opts:
if opt == "-X":
dohex = True
elif opt == "-U":
douni = True
exprstr, memstr = args
if dohex: memstr = memstr.decode('hex')
if douni: memstr = ("\x00".join(memstr)) + "\x00"
addr = self.parseExpression(exprstr)
self.memobj.writeMemory(addr, memstr)
self.vdbUIEvent('vdb:writemem', (addr,memstr))
def do_vstruct(self, line):
"""
List the available structure modules and optionally
structure definitions from a particular module in the
current vstruct.
Usage: vstruct [modname]
"""
if len(line) == 0:
self.vprint("\nVStruct Namespaces:")
plist = self.trace.getStructNames()
else:
self.vprint("\nKnown Structures (from %s):" % line)
plist = self.trace.getStructNames(namespace=line)
plist.sort()
for n in plist:
self.vprint(str(n))
self.vprint("\n")
def do_dis(self, line):
"""
Print out the opcodes for a given address expression
Usage: dis <address expression> [<size expression>]
"""
argv = e_cli.splitargs(line)
size = 20
argc = len(argv)
if argc == 0:
addr = self.trace.getProgramCounter()
else:
addr = self.parseExpression(argv[0])
if argc > 1:
size = self.parseExpression(argv[1])
self.vprint("Dissassembly:")
self.canvas.renderMemory(addr, size, rend=self.opcoderend)
def do_var(self, line):
"""
Set a variable in the expression parsing context. This allows
for scratchspace names (python compatable names) to be used in
expressions.
Usage: var <name> <addr_expression>
NOTE: The address expression *must* resolve at the time you set it.
"""
t = self.trace
if len(line):
argv = e_cli.splitargs(line)
if len(argv) == 1:
return self.do_help("var")
name = argv[0]
expr = " ".join(argv[1:])
addr = t.parseExpression(expr)
t.setVariable(name, addr)
vars = t.getVariables()
self.vprint("Current Variables:")
if not vars:
self.vprint("None.")
else:
vnames = vars.keys()
vnames.sort()
for n in vnames:
val = vars.get(n)
if type(val) in (int, long):
self.vprint("%20s = 0x%.8x" % (n,val))
else:
rstr = repr(val)
if len(rstr) > 30:
rstr = rstr[:30] + '...'
self.vprint("%20s = %s" % (n,rstr))
def do_alloc(self, args):
#"""
#Allocate a chunk of memory in the target process. You may
#optionally specify permissions and a suggested base address.
#Usage: alloc [-p rwx] [-s <base>] <size>
#"""
"""
Allocate a chunk of memory in the target process. It will be
allocated with rwx permissions.
Usage: alloc <size expr>
"""
if len(args) == 0:
return self.do_help("alloc")
t = self.trace
#argv = e_cli.splitargs(args)
try:
size = t.parseExpression(args)
base = t.allocateMemory(size)
self.vprint("Allocated %d bytes at: 0x%.8x" % (size, base))
except Exception, e:
traceback.print_exc()
self.vprint("Allocation Error: %s" % e)
def do_autoscript(self, line):
'''
Tell vdb to run a python script on every process attach.
Usage: autoscript <scriptfile>|clear
'''
argv = e_cli.splitargs(line)
if len(argv) != 1:
self.vprint('Current Autoscript: %s' % self.autoscript)
return
if argv[0] == 'clear':
self.vprint('clearing autoscript: %s' % self.autoscript)
return
if not os.path.isfile(argv[0]):
self.vprint('Error: %s is not a valid file' % argv[0])
return
self.autoscript = argv[0]
def do_memload(self, line):
'''
Load a file into memory. (straight mapping, no parsing)
Usage: memload <filename>
'''
argv = e_cli.splitargs(line)
if len(argv) != 1:
return self.do_help('memload')
fname = argv[0]
if not os.path.isfile(fname):
self.vprint('Invalid File: %s' % fname)
return
fbytes = file(fname, 'rb').read()
memva = self.trace.allocateMemory(len(fbytes))
self.trace.writeMemory(memva, fbytes)
self.vprint('Loaded At: 0x%.8x (%d bytes)' % (memva, len(fbytes)))
def do_struct(self, line):
'''
Show and optionally apply a vstruct definition to memory.
Use the 'vstruct' command to find and display a structure of interest.
Usage: struct <vstruct name> [memory expression]
'''
argv = shlex.split(line)
if len(argv) not in (1, 2):
return self.do_help('struct')
clsname = argv[0]
expr = None
va = None
if len(argv) == 2:
expr = argv[1]
va = self.trace.parseExpression(expr)
sinfo = self.trace.getStruct(clsname, va=va)
if sinfo is None:
self.vprint('%s not found.' % clsname)
return
# yuck.
if len(argv) == 1:
va = 0
stree = sinfo.tree(va=va)
self.vprint(stree)
def do_signal(self, args):
"""
Show the current pending signal/exception code.
Usage: signal
"""
# FIXME -i do NOT pass the signal on to the target process.
t = self.trace
t.requireAttached()
cursig = t.getCurrentSignal()
if cursig == None:
self.vprint('No Pending Signals/Exceptions!')
else:
self.vprint("Current signal: %d (0x%.8x)" % (cursig, cursig))
def do_snapshot(self, line):
"""
Take a process snapshot of the current (stopped) trace and
save it to the specified file.
Usage: snapshot <filename>
"""
if len(line) == 0:
return self.do_help("snapshot")
alist = e_cli.splitargs(line)
if len(alist) != 1:
return self.do_help("snapshot")
t = self.trace
t.requireAttached()
self.vprint("Taking Snapshot...")
snap = vs_snap.takeSnapshot(t)
self.vprint("Saving To File")
snap.saveToFile(alist[0])
self.vprint("Done")
snap.release()
def do_ignore(self, args):
"""
Add the specified signal id (exception id for windows) to the ignored
signals list for the current trace. This will make the smallest possible
performance impact for that particular signal but will also not alert
you that it has occured.
Usage: ignore [options] [-c | <sigcode>...]
-d - Remove the specified signal codes.
-c - Include the *current* signal in the sigcode list
-C - Clear the list of ignored signals
Example: ignore -c # Ignore the currently posted signal
ignore -d 0x80000001 # Remove 0x80000001 from the ignores
"""
argv = e_cli.splitargs(args)
try:
opts,args = getopt(argv, 'Ccd')
except Exception, e:
return self.do_help('ignore')
remove = False
sigs = []
for opt,optarg in opts:
if opt == '-c':
sig = self.trace.getCurrentSignal()
if sig == None:
self.vprint('No current signal to ignore!')
return
sigs.append(sig)
elif opt == '-C':
self.vprint('Clearing ignore list...')
self.trace.setMeta('IgnoredSignals', [])
elif opt == '-d':
remove = True
for arg in args:
sigs.append(self.trace.parseExpression(arg))
for sig in sigs:
if remove:
self.vprint('Removing: 0x%.8x' % sig)
self.trace.delIgnoreSignal(sig)
else:
self.vprint('Adding: 0x%.8x' % sig)
self.trace.addIgnoreSignal(sig)
ilist = self.trace.getMeta("IgnoredSignals")
self.vprint("Currently Ignored Signals/Exceptions:")
for x in ilist:
self.vprint("0x%.8x (%d)" % (x, x))
def do_exec(self, cmd):
"""
Execute a program with the given command line and
attach to it.
Usage: exec </some/where and some args>
"""
t = self.newTrace()
t.execute(cmd)
def do_threads(self, line):
"""
List the current threads in the target process or select
the current thread context for the target tracer.
Usage: threads [thread id]
"""
self.trace.requireNotRunning()
if self.trace.isRunning():
self.vprint("Can't list threads while running!")
return
if len(line) > 0:
thrid = int(line, 0)
self.trace.selectThread(thrid)
self.vdbUIEvent('vdb:setthread', thrid)
self.vprint("Current Threads:")
self.vprint("[thrid] [thrinfo] [pc]")
curtid = self.trace.getMeta("ThreadId")
for tid, tinfo in self.trace.getThreads().items():
a = " "
if tid == curtid:
a = "*"
sus = ""
if self.trace.isThreadSuspended(tid):
sus = "(suspended)"
ctx = self.trace.getRegisterContext(tid)
pc = ctx.getProgramCounter()
self.vprint("%s%6d 0x%.8x 0x%.8x %s" % (a, tid, tinfo, pc, sus))
def do_suspend(self, line):
"""
Suspend a thread.
Usage: suspend <-A | <tid>[ <tid>...]>
"""
argv = e_cli.splitargs(line)
try:
opts,args = getopt(argv, "A")
except Exception, e:
return self.do_help("suspend")
for opt,optarg in opts:
if opt == "-A":
# hehe...
args = [str(tid) for tid in self.trace.getThreads().keys()]
if not len(args):
return self.do_help("suspend")
for arg in args:
tid = int(arg)
self.trace.suspendThread(tid)
self.vprint("Suspended Thread: %d" % tid)
def do_restart(self, line):
'''
Restart the current process.
Usage: restart
NOTE: This only works if the process was exec'd to begin with!
TODO: Plumb options for persisting bp's etc...
'''
t = self.trace
cmdline | |
CPU utilization. Please check top output. Possible system overload.",
"CPU utilization check.");
r1 = group by CLUSTER, KEY do SD_ANOMALY(s, ==, 3);
ASSERT(r1, False, "Skewed cluster CPU utilization.", "ANOMALY", WARNING,
"Listed node[s] show different CPU utilization characteristic compared to other node[s]. Please run top command on those node[s] to confirm such behavior. Possible skew in workload.",
"CPU utilization anomaly check.");
s = select "resident_memory" from SYSTEM.TOP save;
r = group by KEY do SD_ANOMALY(s, ==, 3);
ASSERT(r, False, "Skewed cluster resident memory utilization.", "ANOMALY", WARNING,
"Listed node[s] show different resident memory usage compared to other node[s]. Please run top command on those node[s] to confirm such behavior. Possible skewed data distribution. This may be non-issue in case migrations are going on.",
"Resident memory utilization anomaly.");
s = select "system_swapping" from SERVICE.STATISTICS save;
r = do s == true;
ASSERT(r, False, "System memory swapping.", "LIMITS", INFO,
"Listed node[s] are swapping. Please run 'show statistics service like system_swapping' to confirm such behaviour. Possible misconfiguration. This may be non-issue if amount of swap is small and good amount of memory available.",
"System swap check.");
/* TODO - is it really actually an issue */
s = select "system_free_mem_pct" from SERVICE.STATISTICS save;
r = do s < 20;
ASSERT(r, False, "Low system memory percentage.", "LIMITS", CRITICAL,
"Listed node[s] have lower than normal (< 20%) system free memory percentage. Please run 'show statistics service like system_free_mem_pct' to get actual values. Possible misconfiguration.",
"System memory percentage check.");
f = select "memory_free_pct" as "stats", "free-pct-memory" as "stats" from NAMESPACE.STATISTICS save;
s = select "stop-writes-pct" as "stats" from NAMESPACE.CONFIG save;
u = do 100 - f save as "memory_used_pct";
r = do u <= s;
ASSERT(r, True, "Low namespace memory available pct (stop-write enabled).", "OPERATIONS", CRITICAL,
"Listed namespace[s] have lower than normal (< (100 - memory_free_pct)) available memory space. Probable cause - namespace size misconfiguration.",
"Critical Namespace memory available pct check.");
/* NB : ADD CHECKS IF NODES ARE NOT HOMOGENOUS MEM / NUM CPU etc */
s = select "available_bin_names", "available-bin-names" from NAMESPACE save;
r = group by NAMESPACE do s > 3200;
ASSERT(r, True, "Low namespace available bin names.", "LIMITS", WARNING,
"Listed node[s] have low available bin name (< 3200) for corresponding namespace[s]. Maximum unique bin names allowed per namespace are 32k. Please run 'show statistics namespace like available' to get actual values. Possible improperly modeled data.",
"Namespace available bin names check.");
/* Holds only upto 4B key */
SET CONSTRAINT VERSION < 3.12;
s = select "memory-size" from NAMESPACE.CONFIG save;
r = group by CLUSTER, NODE, NAMESPACE do SUM(s);
e = do r <= 274877906944;
ASSERT(e, True, "Namespace configured to use more than 256G.", "LIMITS", WARNING,
"On listed nodes namespace as mentioned have configured more than 256G of memory. Namespace with data not in memory can have max upto 4 billion keys and can utilize only up to 256G. Please run 'show statistics namespace like memory-size' to check configured memory.",
"Namespace per node memory limit check.");
SET CONSTRAINT VERSION ALL;
/*
Following query selects assigned memory-size from namespace config and total ram size from system statistics.
group by for namespace stats sums all memory size and gives node level memory size.
group by for system stats helps to remove key, this is requirement for proper matching for simple operations.
*/
s = select "memory-size" from NAMESPACE.CONFIG save;
n = group by NODE do SUM(s) save as "sum of memory-size";
s = select "total" from SYSTEM.FREE.MEM;
m = group by NODE do SUM(s) save as "total physical memory";
r = do n <= m on common;
ASSERT(r, True, "Namespace memory misconfiguration.", "LIMITS", WARNING,
"Listed node[s] have more namespace memory configured than available physical memory. Please run 'show statistics namespace like memory-size' to check configured memory and check output of 'free' for system memory. Possible namespace misconfiguration.",
"Namespace memory configuration check.");
r = do m - n on common save as "runtime memory";
r = do r >= 5368709120;
ASSERT(r, True, "Aerospike runtime memory configured < 5G.", "LIMITS", INFO,
"Listed node[s] have less than 5G free memory available for Aerospike runtime. Please run 'show statistics namespace like memory-size' to check configured memory and check output of 'free' for system memory. Possible misconfiguration.",
"Runtime memory configuration check.");
/*
Current configurations and config file values difference check
*/
oc = select * from SERVICE.ORIGINAL_CONFIG save;
c = select * from SERVICE.CONFIG save;
r = do oc == c on common;
ASSERT(r, True, "Service configurations different than config file values.", "OPERATIONS", INFO,
"Listed Service configuration[s] are different than actual initial value set in aerospike.conf file.",
"Service config runtime and conf file difference check.");
oc = select * from NETWORK.ORIGINAL_CONFIG save;
c = select * from NETWORK.CONFIG save;
r = do oc == c on common;
ASSERT(r, True, "Network configurations different than config file values.", "OPERATIONS", INFO,
"Listed Network configuration[s] are different than actual initial value set in aerospike.conf file.",
"Network config runtime and conf file difference check.");
oc = select * from NAMESPACE.ORIGINAL_CONFIG save;
c = select * from NAMESPACE.CONFIG save;
r = do oc == c on common;
ASSERT(r, True, "Namespace configurations different than config file values.", "OPERATIONS", INFO,
"Listed namespace configuration[s] are different than actual initial value set in aerospike.conf file.",
"Namespace config runtime and conf file difference check.");
oc = select * from XDR.ORIGINAL_CONFIG save;
c = select * from XDR.CONFIG save;
r = do oc == c on common;
ASSERT(r, True, "XDR configurations different than config file values.", "OPERATIONS", INFO,
"Listed XDR configuration[s] are different than actual initial value set in aerospike.conf file.",
"XDR config runtime and conf file difference check.");
oc = select * from DC.ORIGINAL_CONFIG save;
c = select * from DC.CONFIG save;
r = do oc == c on common;
ASSERT(r, True, "DC configurations different than config file values.", "OPERATIONS", INFO,
"Listed DC configuration[s] are different than actual initial value set in aerospike.conf file.",
"DC config runtime and conf file difference check.");
/*
Following query selects proto-fd-max from service config and client_connections from service statistics.
It uses as clause to get proper matching structure for simple operation.
*/
max = select "proto-fd-max" as "fd" from SERVICE.CONFIG save;
conn = select "client_connections" as "fd" from SERVICE.STATISTICS save;
bound = do 80 %% max;
r = do conn > bound;
ASSERT(r, False, "High system client connections.", "OPERATIONS", WARNING,
"Listed node[s] show higher than normal client-connections (> 80% of the max configured proto-fd-max). Please run 'show config like proto-fd-max' and 'show statistics like client_connections' for actual values. Possible can be network issue / improper client behavior / FD leak.",
"Client connections check.");
s = select like(".*available_pct") as "stats" from NAMESPACE.STATISTICS save;
m = select like(".*min-avail-pct") as "stats" from NAMESPACE.CONFIG save;
critical_check = do s >= m;
ASSERT(critical_check, True, "Low namespace disk available pct (stop-write enabled).", "OPERATIONS", CRITICAL,
"Listed namespace[s] have lower than normal (< min-avail-pct) available disk space. Probable cause - namespace size misconfiguration.",
"Critical Namespace disk available pct check.");
critical_check = do s < m;
r = do s >= 20;
r = do r || critical_check;
ASSERT(r, True, "Low namespace disk available pct.", "OPERATIONS", WARNING,
"Listed namespace[s] have lower than normal (< 20 %) available disk space. Probable cause - namespace size misconfiguration.",
"Namespace disk available pct check.");
s = select * from SERVICE.CONFIG ignore "heartbeat.mtu", "node-id-interface", "node-id", "pidfile", like(".*address"), like(".*port") save;
r = group by CLUSTER, KEY do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different service configurations.", "OPERATIONS", WARNING,
"Listed Service configuration[s] are different across multiple nodes in cluster. Please run 'show config service diff' to check different configuration values. Probable cause - config file misconfiguration.",
"Service configurations difference check.");
multicast_mode_enabled = select like(".*mode") from NETWORK.CONFIG;
multicast_mode_enabled = do multicast_mode_enabled == "multicast";
multicast_mode_enabled = group by CLUSTER, NODE do OR(multicast_mode_enabled);
s = select like(".*mtu") from SERVICE.CONFIG save;
r = group by CLUSTER do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different heartbeat.mtu.", "OPERATIONS", WARNING,
"Listed node[s] have a different heartbeat.mtu configured. A multicast packet can only be as large as the interface mtu. Different mtu values might create cluster stability issue. Please contact Aerospike Support team.",
"heartbeat.mtu check.",
multicast_mode_enabled);
interval = select "heartbeat.interval" from NETWORK.CONFIG save;
r1 = do interval < 150;
r2 = do interval > 250;
r = do r1 || r2;
ASSERT(r, False, "Heartbeat interval is not in expected range (150 <= p <= 250)", "OPERATIONS", INFO,
"Listed nodes(s) have heartbeat interval value not in expected range (150 <= p <= 250). New node might fail to join cluster.",
"Heartbeat interval Check (150 <= p <= 250)");
timeout | |
if isData:
add_dref(xrefFrom, xrefTo, flag)
else:
add_cref(xrefFrom, xrefTo, flag)
except Exception as e:
None
def keepCon_VFuncAndVTSMember(funcEA, vtableStructId, offset, keepNameCon=True, keepTypeCon=True, isFromFuncToMember = True):
#print "keepCon_VFuncAndVTSMember", offset, funcEA
memberId = GetMemberId(vtableStructId, offset)
memberName = ida_struct.get_member_name(memberId)
funcName = getName(funcEA)
addXref(memberId, funcEA, 1)
addXref(funcEA, memberId, 1)
if None is funcName:
#print "keepCon_VFuncAndVTSMember funcName None {} {} 0x{:016X}".format(vtableStructId, offset, funcEA)
return
if funcName.startswith("___cxa_pure_virtual") or funcName.startswith("nullsub_"):
return
if isFromFuncToMember:
SetMemberComment(vtableStructId, offset, hex(funcEA), 1)
if keepNameCon:
if memberName != funcName and memberName != None and memberName.replace("::", "__") != funcName:
SetOrAddMemberNameOverride(vtableStructId, offset, funcName)
if keepTypeCon:
funcTinfo = getTinfoAtEA(funcEA)
if None is funcTinfo:
#print "[!] keepCon_VFuncAndVTSMember {:016X} funcTinfo None".format(funcEA)
return
funcPtrTinfo = tinfo_t()
funcPtrTinfo.create_ptr(funcTinfo)
SetType(memberId, str(funcPtrTinfo))
#funcType = GetType(funcEA)
#if funcType != None:
# funcTypeArgStartLoc = funcType.find("(")
# funcPTRType = funcType[:funcTypeArgStartLoc] + "(*)" + funcType[funcTypeArgStartLoc:]
# SetType(memberId, funcPTRType)
else:
if keepNameCon:
if memberName != funcName and memberName != None and memberName.replace("::", "__") != funcName:
demangledFuncName = Demangle(funcName, GetLongPrm(INF_LONG_DN))
# We do not want to change func name if it has a mangled name
if None is demangledFuncName:
setNameOverride(funcEA, memberName);
addXref(memberId, funcEA, 1)
addXref(funcEA, memberId, 1)
if keepTypeCon:
funcName = getName(funcEA)
funcPTRType = GetType(memberId)
if funcType != None:
funcPTRStartLoc = funcPTRType.find("(")
funcPTREndLoc = funcPTRType.find(")")
funcType = funcPTRType[:funcPTRStartLoc] + " " + funcName + " " + funcPTRType[funcPTREndLoc + 1:]
SetType(funcEA, funcType)
def getAllParentVirtualFuncAtOffset(className, funcEA, offset):
parentVirtualFuncEAToClassNameMap = {}
while className in classNameToParentClassNameMap and className != None and className != "OSObject" and offset != -1:
parentClassName = classNameToParentClassNameMap[className]
if parentClassName in classNameToVTableAddrMap:
parentVTableStartEA, parentVTableEndEA = classNameToVTableAddrMap[parentClassName]
if parentVTableStartEA + offset < parentVTableEndEA:
parentFuncEA = Qword(parentVTableStartEA + offset)
if parentFuncEA != funcEA:
parentVirtualFuncEAToClassNameMap[parentFuncEA] = parentClassName
className = parentClassName
return parentVirtualFuncEAToClassNameMap
def getAllChildVirtualFuncAtOffset(className, funcEA, offset):
childVirtualFuncEAToClassNameMap = {}
if not None is className and className in classNameToChildClassNameSetMap and offset != -1:
childClassNameSet = classNameToChildClassNameSetMap[className]
for childClassName in childClassNameSet:
if childClassName in classNameToVTableAddrMap:
childVTableStartEA, childVTableEndEA = classNameToVTableAddrMap[childClassName]
if childVTableStartEA + offset < childVTableEndEA:
childFuncEA = Qword(childVTableStartEA + offset)
if childFuncEA != funcEA and getName(funcEA) != "___cxa_pure_virtual":
childVirtualFuncEAToClassNameMap[childFuncEA] = childClassName
childMapForChild = getAllChildVirtualFuncAtOffset(childClassName, childFuncEA, offset)
childVirtualFuncEAToClassNameMap.update(childMapForChild)
return childVirtualFuncEAToClassNameMap
def getAllChildMemberIdAtOffset(className, offset):
childMemberIdToClassStructIdMap = {}
if not None is className and className in classNameToChildClassNameSetMap and offset != -1:
childClassNameSet = classNameToChildClassNameSetMap[className]
for childClassName in childClassNameSet:
if childClassName in classNameToClassStructIdMap:
childClassStructId = classNameToClassStructIdMap[childClassName]
if childClassStructId != BADADDR:
childClassStructSize = get_struc_size(childClassStructId)
if offset < childClassStructSize:
memberId = GetMemberId(childClassStructId, offset)
childMemberIdToClassStructIdMap[memberId] = childClassStructId
childMapForChild = getAllChildMemberIdAtOffset(childClassName, offset)
childMemberIdToClassStructIdMap.update(childMapForChild)
return childMemberIdToClassStructIdMap
def keepAllConsistency_AncestorToDescendant():
None
def keepAllCon_ParentAndChild():
for className in classNameToVTableAddrMap:
keepCon_ParentAndChildren(className)
def keepCon_ParentAndChildren(className):
if className in classNameToVTableAddrMap:
(vtableStartEA, vtableEndEA) = classNameToVTableAddrMap[className]
vtableEA = vtableStartEA
if className in classNameToParentClassNameMap:
parentClassName = classNameToParentClassNameMap[className]
if parentClassName in classNameToVTableAddrMap:
(parentVTableStartEA, parentVTableEndEA) = classNameToVTableAddrMap[parentClassName]
# only process the methods defined by itself
vtableEA = vtableStartEA + (parentVTableEndEA - parentVTableStartEA)
while vtableEA < vtableEndEA:
offset = vtableEA - vtableStartEA
funcEA = Qword(vtableEA)
funcName = getName(funcEA)
keepCon_ParentAndChildrenVTableAtOffset(className, funcEA, offset, True, True)
vtableEA = vtableEA + 0x8
def keepCon_ParentAndChildrenClassStructAtOffset(parentClassName, offset, keepNameCon, keepTypeCon):
#print "keepCon_ParentAndChildrenClassStructAtOffset", parentClassName, hex(offset)
if parentClassName != None and parentClassName in classNameToClassStructIdMap:
childMemberIdToClassStructIdMap = getAllChildMemberIdAtOffset(parentClassName, offset)
parentClassStructId = classNameToClassStructIdMap[parentClassName]
parentClassStruct = get_struc(parentClassStructId)
member = idaapi.get_member(parentClassStruct, offset)
if None is member:
#print "member is None", parentClassName, hex(offset)
return
memberName = ida_struct.get_member_name(member.id)
if keepNameCon and not (memberName is None or memberName.startswith("member") or memberName.startswith("field")):
for childMemberId in childMemberIdToClassStructIdMap:
childClassStructId = childMemberIdToClassStructIdMap[childMemberId]
childMemberName = ida_struct.get_member_name(childMemberId)
if childMemberName.startswith("member") or childMemberName.startswith("field"):
childMemberNewName = memberName
SetOrAddMemberName(childClassStructId, offset, childMemberNewName)
#print "SetOrAddMemberName", hex(childClassStructId), hex(offset), childMemberNewName
memberType = GetType(member.id)
if keepTypeCon and not None is memberType:
for childMemberId in childMemberIdToClassStructIdMap:
childClassStructId = childMemberIdToClassStructIdMap[childMemberId]
childMemberType = GetType(childMemberId)
if None is childMemberType:
childMemberNewType = memberType
SetType(childMemberId, childMemberNewType)
def keepCon_ParentAndChildrenVTableAtOffset(parentClassName, funcEA, offset, keepNameCon, keepTypeCon):
if parentClassName != None:
childVirtualFuncEAToClassNameMap = getAllChildVirtualFuncAtOffset(parentClassName, funcEA, offset)
funcName = getName(funcEA)
if None is funcName:
#print "keepCon_ParentAndChildrenVTableAtOffset funcName None {} {} 0x{:016X}".format(parentClassName, offset, funcEA)
return
#if None is funcName:
# print "parentClassName {}, 0x{:016X} name None".format(parentClassName, funcEA)
demangledFuncName = Demangle(funcName, GetLongPrm(INF_LONG_DN))
if keepNameCon and funcName != "___cxa_pure_virtual" and not funcName.startswith("sub_"):
for childFuncEA in childVirtualFuncEAToClassNameMap:
childFuncName = getName(childFuncEA)
childClassName = childVirtualFuncEAToClassNameMap[childFuncEA]
childFuncNewName = None
if demangledFuncName != None:
childFuncNewName = replaceClassInMangledName(funcName, childClassName)
elif "::" in funcName:
#childFuncNewName = funcName.replace(parentClassName, childClassName)
childFuncNewName = childClassName + funcName[funcName.rfind("::"):]
if not None is childFuncNewName and childFuncNewName != childFuncName and childFuncName != "___cxa_pure_virtual":
set_name(childFuncEA, childFuncNewName)
#print "keepCon_ParentAndChildrenVTableAtOffset", parentClassName, hex(funcEA), funcName, hex(offset), hex(childFuncEA), childClassName
if keepTypeCon and funcName != "___cxa_pure_virtual" :
funcType = GetType(funcEA)
# in some conditions, we can not know the parent func type but we can know the child func type,
# in these cases, we should first set the parent func type for propagation
arglist = parseFuncTypeToGetArglist(funcType)
if len(arglist) > 0 and not (arglist[0].startswith(parentClassName+"*") or arglist[0].startswith(parentClassName+" *")):
arglist.insert(0, parentClassName + "*" + "this")
for childFuncEA in childVirtualFuncEAToClassNameMap:
childFuncClassName = childVirtualFuncEAToClassNameMap[childFuncEA]
childFuncType = GetType(childFuncEA)
childFuncName = getName(childFuncEA)
childDemangledFuncName = Demangle(childFuncName, GetLongPrm(INF_LONG_DN))
if None is childDemangledFuncName and childFuncName != "___cxa_pure_virtual":
childFuncArgList = parseFuncTypeToGetArglist(childFuncType)
# propagate parent func type only when the parent func has more args than children
if len(arglist) > len(childFuncArgList):
childFuncNewType = funcType.replace(parentClassName, childFuncClassName)
childFuncNewTypeArgStartLoc = childFuncNewType.find("(")
childFuncNewType = childFuncNewType[:childFuncNewTypeArgStartLoc] + " " + childFuncClassName + childFuncNewType[childFuncNewTypeArgStartLoc:]
if childFuncType != childFuncNewType:
SetType(childFuncEA, childFuncNewType)
def keepAllConsistency():
print "[+] Keep Everything in consistency"
# Too many problems, should not be used
# print classNameToParentClassNameMap
keepAllCon_VTAndVTS()
keepAllCon_ParentAndChild()
def splitArgString(argString):
bracketCnt = 0
arglist = []
currentArg = ""
if argString == "" or argString == "void":
return []
argString = argString + ","
for i in range(0, len(argString)):
ch = argString[i]
if ch == ",":
if bracketCnt == 0:
if not getTinfoForTypeStr(currentArg).is_well_defined():
if currentArg == "void":
continue
elif "(*)" in currentArg:
funcPtr = currentArg
funcPtrArgStr = funcPtr[funcPtr.find("(*)")+3:]
funcPtrArgStr = funcPtrArgStr[funcPtrArgStr.find("(")+1:funcPtrArgStr.rfind(")")]
funcPtrArgList = splitArgString(funcPtrArgStr)
newFuncPtrArgStr = ", ".join(funcPtrArgList)
currentArg = funcPtr[:funcPtr.find("(*)")+3] + "(" + newFuncPtrArgStr + ")"
elif currentArg[-1] == "*":
currentArg = "void * " + currentArg[:currentArg.find("*")].replace("::", "__")
else:
currentArg = "uint64_t " + currentArg.replace("::", "__")
currentArg = currentArg.strip()
arglist.append(currentArg)
currentArg = ""
continue
elif ch == "(":
bracketCnt += 1
elif ch == ")":
bracketCnt -= 1
currentArg = currentArg + ch
return arglist
def parseFuncTypeToGetArglist(funcType):
className, pureFuncName, arglist = parseFuncProtoWORet(funcType)
return arglist
def convertArgStringToArgType(argString):
argString = argString.strip()
idx = len(argString)-1
if ")" in argString:
return argString[:argString.rfind(")")+1]
elif "*" in argString:
return argString[:argString.rfind("*")+1]
elif " " in argString:
return argString[:argString.rfind(" ")].strip()
else:
return argString
def convertArgStringListToArgTypeList(argStringList):
argTypeList = list()
for argString in argStringList:
argTypeList.append(convertArgStringToArgType(argString))
return argTypeList
None
'''
class VarType:
def __init__(self, typeString):
typeString = typeString.strip()
self.typeString = typeString
self.tinfo = getTinfoForTypeStr(typeString)
def __str__(self):
if not None is self.tinfo:
return str(self.tinfo)
else:
if self.typeString.endswith("*"):
return "void " + self.typeString[self.typeString.find("*"):]
else:
return "uint64_t"
'''
def convertUnknownArgType(argType):
#print "[?] Unknown arg: %s"%(argType)
return "void *" + argType.replace(" ", "").replace("*", "_").replace(":", "_").replace("&", "_").replace(".", "_")
def repairUnknownFuncPTRTypeString(funcPTRType):
newFuncPTRType = None
if "(*)" in funcPTRType:
if funcPTRType.count("(*)") > 1:
print "[?] Can not repair func pointer type: %s"%(funcPTRType)
idx =funcPTRType.find("(*)")
funcType = funcPTRType[:idx] + funcPTRType[idx+3:]
funcRetType = funcType[:funcType.find("(")]
argString = funcType[funcType.find("(")+1:funcType.find(")")]
argStringList = argString.split(",")
for i in range(0, len(argStringList)):
arg = argStringList[i]
argTinfo = getTinfoForTypeStr(arg)
if None is argTinfo:
argStringList[i] = convertUnknownArgType(arg)
newFuncPTRType = funcRetType + " (*)" + "(" + ",".join(argStringList) + ")"
else:
print "[?] Can not repair func pointer type: %s"%(funcPTRType)
return newFuncPTRType
def parseFuncProtoWORet(funcProtoWORet, isNonStatic=False, knownClassName = None, isThisIncluded=False):
arglist = []
demangledClassName = None
demangledFuncName = None
#print "funcProtoWithDummyRet:" + funcProtoWithDummyRet
if funcProtoWORet != None:
funcProtoWithDummyRet = "void " + funcProtoWORet
funcProtoWORetWithoutArgs = funcProtoWORet[:funcProtoWORet.find("(")]
if "::" in funcProtoWORetWithoutArgs:
demangledClassName = funcProtoWORetWithoutArgs[:funcProtoWORetWithoutArgs.rfind("::")]
demangledFuncName = funcProtoWORetWithoutArgs[funcProtoWORetWithoutArgs.rfind("::")+2:]
else:
demangledFuncName = funcProtoWORetWithoutArgs
funcTinfoWithDummyRet = getTinfoForTypeStr(funcProtoWithDummyRet)
if not None is funcTinfoWithDummyRet:
nargs = funcTinfoWithDummyRet.get_nargs()
for cnt in range(0, nargs):
arglist.append(str(funcTinfoWithDummyRet.get_nth_arg(cnt)))
else:
if funcProtoWORet.find("(") < funcProtoWORet.rfind(")"):
strOfArgs = funcProtoWORet[funcProtoWORet.find("(")+1:funcProtoWORet.rfind(")")]
argStringList = []
bracketLevel = 0
lastIdx = -1
isFuncPTR = False
for i in range(0, len(strOfArgs)):
ch = strOfArgs[i]
if ch == "(":
bracketLevel += 1
elif ch == ")":
bracketLevel -= 1
elif ch == ",":
if bracketLevel == 0:
arg = strOfArgs[lastIdx+1:i]
argTinfo = getTinfoForTypeStr(arg)
if None is argTinfo:
if isFuncPTR:
arg = repairUnknownFuncPTRTypeString(arg)
if | |
list(slice_steps.keys())
self.progress_message_update(0,len(slices)+2)
req = TRequest.objects.get(reqid=reqid)
fail_slice_save = save_slice_changes(reqid, slice_steps)
error_slices = []
if (req.request_type == 'MC') and not fail_slice_save:
error_slices=check_slice_jos(reqid,slice_steps)
try:
fill_request_priority(reqid,reqid)
except:
pass
for slice, steps_status in list(slice_steps.items()):
slice_steps[slice] = steps_status['sliceSteps']
for steps_status in list(slice_steps.values()):
for steps in steps_status[:-2]:
steps['value'] = steps['value'].strip()
slice_new_input = {}
for slice, steps_status in list(slice_steps.items()):
if steps_status[-1]:
slice_new_input.update({slice:steps_status[-1]['input_dataset']})
slice_steps[slice]= steps_status[:-1]
# Check input on missing tags, wrong skipping
missing_tags,wrong_skipping_slices,old_double_trf = step_validation(slice_steps)
error_approve_message = False
owner = user_name
if (owner != req.manager) and (req.request_type == 'MC') and (req.phys_group != 'VALI'):
if (not is_superuser) and ('MCCOORD' not in egroup_permissions(req.manager)):
error_approve_message = True
results = {'missing_tags': missing_tags,'slices': [],'no_action_slices' :slices,'wrong_slices':wrong_skipping_slices,
'double_trf':old_double_trf, 'success': True, 'new_status':'', 'fail_slice_save': fail_slice_save,
'error_approve_message': error_approve_message, 'async_name':'save_slices'}
removed_input = []
no_action_slices = []
self.progress_message_update(1,len(slices)+2)
if (not missing_tags) and (not error_approve_message) and (not error_slices):
if req.request_type == 'MC':
for steps_status in list(slice_steps.values()):
for index,steps in enumerate(steps_status[:-2]):
if (StepExecution.STEPS[index] == 'Reco') or (StepExecution.STEPS[index] == 'Atlfast'):
if not steps['formats']:
steps['formats'] = 'AOD'
if ['-1'] == list(slice_steps.keys()):
slice_0 = deepcopy(slice_steps['-1'])
if req.request_type == 'MC':
error_slices, no_action_slices = create_steps(self, {0:slice_steps['-1']},reqid,StepExecution.STEPS, approve_level,waiting_level)
else:
error_slices, no_action_slices = create_steps(self, {0:slice_steps['-1']},reqid,['']*len(StepExecution.STEPS), approve_level,waiting_level)
if req.request_type == 'MC':
approved_steps = StepExecution.objects.filter(request=reqid, status='Approved').count()
if (0 not in error_slices) and (approved_steps == 0):
fill_all_slices_from_0_slice(reqid)
else:
slice_count = InputRequestList.objects.filter(request=reqid).count()
extended_slice_steps = {}
for i in range(1,slice_count):
extended_slice_steps.update({str(i):deepcopy(slice_0)})
error_slices, no_action_slices = create_steps(self, extended_slice_steps,reqid,StepExecution.STEPS, approve_level)
else:
if '-1' in list(slice_steps.keys()):
del slice_steps['-1']
if not (req.manager) or (req.manager == 'None'):
missing_tags.append('No manager name!')
else:
removed_input = []
#child_steps_before_update = find_child_steps(reqid,slice_steps)
if req.request_type == 'MC':
error_slices, no_action_slices = create_steps(self,slice_steps,reqid,StepExecution.STEPS, approve_level, waiting_level)
good_slices = [int(x) for x in slices if int(x) not in error_slices]
removed_input = remove_input(good_slices,reqid)
else:
error_slices, no_action_slices = create_steps(self,slice_steps,reqid,['']*len(StepExecution.STEPS), approve_level, waiting_level)
try:
make_child_update(reqid,owner,slice_steps)
except Exception as e:
_logger.error("Problem with step modifiaction: %s" % e)
if (req.cstatus.lower() not in ['test','cancelled']) and (approve_level>=0):
if not owner:
owner = req.manager
req.cstatus = request_approve_status(req,None,user_name,is_superuser)
req.save()
request_status = RequestStatus(request=req,comment='Request approved by WebUI',owner=owner,
status=req.cstatus)
request_status.save_with_current_time()
if req.request_type == 'MC':
if do_split:
split_request(reqid,[x for x in map(int,slices) if x not in error_slices])
else:
for slice, new_dataset in list(slice_new_input.items()):
if new_dataset:
change_dataset_in_slice(req, int(slice), new_dataset)
if approve_level >= 0:
for slice_number in [x for x in map(int,slices) if x not in (error_slices + no_action_slices)]:
if SliceError.objects.filter(request=reqid, is_active=True, slice=InputRequestList.objects.get(request=reqid,slice=slice_number)).exists():
for slice_error in SliceError.objects.filter(request=reqid, is_active=True, slice=InputRequestList.objects.get(request=reqid,slice=slice_number)):
slice_error.is_active = False
slice_error.save()
_jsonLogger.info('{message}'.format(message=slice_error.message),extra={'prod_request':reqid,
'slice':slice_error.slice_id,'exception_time':slice_error.exception_time,
'exception_type':slice_error.exception_type})
results = {'missing_tags': missing_tags,
'slices': [x for x in map(int,slices) if x not in (error_slices + no_action_slices)],
'wrong_slices':wrong_skipping_slices,
'double_trf':old_double_trf, 'error_slices':error_slices,
'no_action_slices' :no_action_slices,'success': True, 'new_status': req.cstatus,
'removed_input':removed_input, 'fail_slice_save':'',
'error_approve_message': error_approve_message, 'async_name':'save_slices'}
else:
results = {'missing_tags': missing_tags,
'slices': [x for x in map(int, slices) if x not in (error_slices + no_action_slices)],
'wrong_slices': wrong_skipping_slices,
'double_trf': old_double_trf, 'error_slices': error_slices,
'no_action_slices': no_action_slices, 'success': True, 'new_status': req.cstatus,
'removed_input': removed_input, 'fail_slice_save': '',
'error_approve_message': error_approve_message, 'async_name':'save_slices'}
_jsonLogger.info('Finish step modification, saved slices {slices}, problem slices {error_slices}'.format(slices=len(results.get('slices',[])),
error_slices=len(results.get('error_slices',[]))),
extra=form_json_request_dict(reqid,None,{'user':user_name,'duration':time()-start_time}))
self.progress_message_update(len(slices)+2,len(slices)+2)
except Exception as e:
_jsonLogger.error('Problem with step modifiaction',extra=form_json_request_dict(reqid,None,{'user':user_name,'error':str(e)}))
return json.dumps(results)
def find_skipped_dataset(DSID,job_option,tags,data_type):
"""
Find a datasets and their events number for first not skipped step in chain
:param DSID: dsid of the chain
:param job_option: job option name of the chain input
:param tags: list of tags which were already proceeded
:param data_type: expected data type
:return: list of dict {'dataset_name':'...','events':...}
"""
return_list = []
for base_value in ['valid','mc']:
dataset_pattern = base_value+"%"+str(DSID)+"%"+job_option+"%"+data_type+"%"+"%".join(tags)+"%"
_logger.debug("Search dataset by pattern %s"%dataset_pattern)
return_list += find_dataset_events(dataset_pattern, tags)
return return_list
def find_old_double_trf(tags):
return None
def step_validation(slice_steps):
tags = []
# Slices with skipped
wrong_skipping_slices = set()
for slice, steps_status in list(slice_steps.items()):
is_skipped = True
is_not_skipped = False
for steps in steps_status[:-1]:
if steps['value'] and (steps['value'] not in tags):
tags.append(steps['value'])
if steps['value']:
if steps['is_skipped'] == True:
is_skipped = True
else:
if is_not_skipped and is_skipped:
wrong_skipping_slices.add(slice)
else:
is_skipped = False
is_not_skipped = True
missing_tags = find_missing_tags(tags)
old_double_trf = find_old_double_trf(tags)
return missing_tags,list(wrong_skipping_slices),old_double_trf
@csrf_protect
def request_steps_save(request, reqid):
if request.method == 'POST':
return request_steps_approve_or_save(request, reqid, -1)
return HttpResponseRedirect(reverse('prodtask:input_list_approve', args=(reqid,)))
@api_view(['POST'])
def request_steps_save_async(request, reqid):
slice_steps = request.data
return_value= single_request_action_celery_task(reqid,request_steps_approve_or_save_async,'Save slices',request.user.username,
slice_steps,request.user.username,request.user.is_superuser, reqid, -1)
return Response(return_value)
@csrf_protect
def request_steps_approve(request, reqid, approve_level, waiting_level):
if request.method == 'POST':
return request_steps_approve_or_save(request, reqid, int(approve_level)-1, int(waiting_level))
return HttpResponseRedirect(reverse('prodtask:input_list_approve', args=(reqid,)))
@csrf_protect
def request_steps_approve_split(request, reqid, approve_level, waiting_level):
if request.method == 'POST':
return request_steps_approve_or_save(request, reqid, int(approve_level)-1, int(waiting_level), True)
return HttpResponseRedirect(reverse('prodtask:input_list_approve', args=(reqid,)))
def form_step_hierarchy(tags_formats_text):
step_levels = []
for line in tags_formats_text.split('\n'):
step_levels.append([])
step_levels[-1] = [(x.split(':')[0],x.split(':')[1]) for x in line.split(' ') if x]
step_hierarchy = []
for level_index,level in enumerate(step_levels):
step_hierarchy.append([])
# find if tag on some previous level already exist, then make a link
for i in range(level_index):
if level[0] == step_levels[i][-1]:
step_hierarchy[-1].insert(0,{'level':i,'step_number':len(step_levels[i])-1,'ctag':'','formats':''})
# no link
if len(step_hierarchy[-1]) == 0:
step_hierarchy[-1].append({'level':level_index,'step_number':0,'ctag':level[0][0],'formats':level[0][1]})
for j in range(1,len(level)):
step_hierarchy[-1].append({'level':level_index,'step_number':j-1,'ctag':level[j][0],'formats':level[j][1]})
return step_hierarchy
@csrf_protect
def request_reprocessing_steps_create(request, reqid=None):
if request.method == 'POST':
cur_request = TRequest.objects.get(reqid=reqid)
result = {}
try:
data = request.body
input_dict = json.loads(data)
tags_formats_text = input_dict['tagsFormats']
slices = input_dict['slices']
#form levels from input text lines
step_levels = form_step_hierarchy(tags_formats_text)
#create chains for each input
new_slice_number = InputRequestList.objects.filter(request=reqid).count()
for slice_number in slices:
real_steps_hierarchy=[]
input_skeleton = {}
for level_index,level in enumerate(step_levels):
current_slice = {}
real_steps_hierarchy.append([])
if level_index == 0:
current_slices = InputRequestList.objects.filter(request=reqid,slice=slice_number)
input_skeleton = current_slices.values('brief','phys_comment','comment','project_mode',
'priority','input_events')[0]
input_skeleton['request'] = cur_request
current_slice = current_slices[0]
else:
input_skeleton['slice'] = new_slice_number
new_slice_number += 1
current_slice = InputRequestList(**input_skeleton)
current_slice.save()
for i,current_tag in enumerate(level):
if current_tag['ctag'] == '':
real_steps_hierarchy[-1].append(real_steps_hierarchy[current_tag['level']][current_tag['step_number']])
else:
step_template = fill_template('', current_tag['ctag'], current_slice.priority, current_tag['formats'])
new_step_exec = StepExecution(request=cur_request, step_template=step_template,status='NotChecked',
slice=current_slice,priority=current_slice.priority,
input_events=-1)
new_step_exec.save_with_current_time()
if (current_tag['level'] == level_index) and (current_tag['step_number'] == i):
new_step_exec.step_parent = new_step_exec
else:
new_step_exec.step_parent = real_steps_hierarchy[current_tag['level']][current_tag['step_number']]
if current_slice.project_mode:
new_step_exec.set_task_config({'project_mode' : current_slice.project_mode})
new_step_exec.save()
real_steps_hierarchy[-1].append(new_step_exec)
except Exception as e:
return HttpResponse(json.dumps(result), content_type='application/json',status=500)
return HttpResponse(json.dumps(result), content_type='application/json')
return HttpResponseRedirect(reverse('prodtask:input_list_approve', args=(reqid,)))
@csrf_protect
def make_test_request(request, reqid):
results = {}
if request.method == 'POST':
try:
_logger.debug(form_request_log(reqid,request,'Make as test'))
cur_request = TRequest.objects.get(reqid=reqid)
cur_request.cstatus = 'test'
cur_request.save()
except Exception as e:
pass
return HttpResponse(json.dumps(results), content_type='application/json')
@csrf_protect
def make_request_fast(request, reqid):
results = {}
if request.method == 'POST':
try:
_logger.debug(form_request_log(reqid,request,'Make request fast'))
cur_request = TRequest.objects.get(reqid=reqid)
cur_request.is_fast = True
cur_request.save()
except Exception as e:
pass
return HttpResponse(json.dumps(results), content_type='application/json')
def home(request):
tmpl = get_template('prodtask/_index.html')
c = Context({'active_app' : 'prodtask', 'title' : 'Monte Carlo Production Home'})
return HttpResponse(tmpl.render(c))
def about(request):
tmpl = get_template('prodtask/_about.html')
c = Context({'active_app' : 'prodtask', 'title' : 'Monte Carlo Production about', })
return HttpResponse(tmpl.render(c))
def step_skipped(step):
return (step.status=='Skipped')or(step.status=='NotCheckedSkipped')
def fixPattern(pattern):
pattern_d = json.loads(pattern.pattern_dict)
for step in list(pattern_d.keys()):
if not pattern_d[step]['ctag']:
if not pattern_d[step]['project_mode']:
pattern_d[step]['project_mode'] = get_default_project_mode_dict()[step]
if not pattern_d[step]['nEventsPerJob']:
pattern_d[step]['nEventsPerJob'] = get_default_nEventsPerJob_dict()[step]
pattern.pattern_dict = json.dumps(pattern_d)
pattern.save()
@ensure_csrf_cookie
def input_list_approve(request, rid=None):
return request_table_view(request, rid, show_hidden=False)
@ensure_csrf_cookie
def input_list_approve_full(request, rid=None):
return request_table_view(request, rid, show_hidden=True)
NUMBER_EVENTS_TO_SPLIT = 2000000
def redirect_to_value(request, site_name):
return redirect("http://%s.cern.ch/"%site_name)
def egroup_permissions(username):
return_list = []
try:
current_user = User.objects.get(username = username )
user_groups = current_user.groups.all()
group_permissions = []
for group in user_groups:
group_permissions += list(group.permissions.all())
for group_permission in group_permissions:
if "has_" in group_permission.name and "_permissions" in group_permission.name:
return_list.append(group_permission.codename)
except:
return []
return return_list
def get_full_patterns():
result = []
task_configs = {}
patterns = list(InputRequestList.objects.filter(request=29269).order_by('slice'))
steps = list(StepExecution.objects.filter(request=29269).order_by('id'))
CHANGABLE = ['nEventsPerJob', 'project_mode', 'nFilesPerJob', 'nGBPerJob',
'maxFailure','container_name','onlyTagsForFC']
NON_DEFAULT = ['input_format', 'output_formats']
for step in steps:
task_config = step.get_task_config()
if task_config.get('tag') == 'x9999':
task_config['tag'] = ''
task_configs[int(step.slice_id)] = task_configs.get(int(step.slice_id), []) + [task_config]
for pattern in patterns[1:]:
if not pattern.is_hide:
task_config_steps = task_configs[int(pattern.id)]
tag_step = []
for task_config in task_config_steps:
parameters = []
for x in CHANGABLE:
parameters.append((x,task_config.get(x,'')))
for x in NON_DEFAULT:
if task_config.get(x,''):
parameters.append((x, task_config.get(x, '')))
tag_step.append((task_config['tag'],parameters))
result.append((pattern.brief,tag_step))
result.sort(key=lambda x:x[0])
return result
def request_table_view(request, rid=None, show_hidden=False):
# Prepare data for step manipulation page
def get_approve_status(ste_task_list,slice=None):
return_status = {'submitted': 'not_submitted', 'original': 'changed', 'split': 'no'}
if slice:
if slice.is_hide:
return {'submitted': 'hidden'}
return_status = {'submitted':'not_submitted','original':'changed','split':'no'}
exist_approved = False
exist_not_approved = False
exist_spreadsheet_original = False
exist_to_split = False
has_waiting = False
for step_task in ste_task_list:
if step_task['step']:
if (step_task['step']['status'] == 'Approved')or(step_task['step']['status'] == 'Skipped'):
exist_approved = True
if (step_task['step']['status'] == 'NotChecked'):
exist_not_approved = True
if (step_task['step']['status'] == 'Waiting'):
has_waiting = True
if ('split_events' in step_task['step']['task_config']) and (step_task['step']['status'] not in ['Skipped','NotCheckedSkipped']):
return_status['split'] = 'split'
if exist_approved and exist_not_approved:
return_status['submitted'] = 'partially_submitted'
if exist_approved and not(exist_not_approved):
return_status['submitted'] = 'submitted'
if has_waiting:
return_status['submitted'] = 'waiting'
return return_status
def approve_level(step_task_list):
max_level = -1
for index,step_task in enumerate(step_task_list):
if step_task['step']:
if (step_task['step']['status'] == 'Approved')or(step_task['step']['status'] == 'Skipped'):
max_level=index
return max_level+1
def has_waiting(step_task_list):
for step_task in step_task_list:
if step_task['step']:
if (step_task['step']['status'] == 'Waiting'):
return True
return False
BIG_PANDA_TASK_BASE = 'http://bigpanda.cern.ch/task/'
FAKE_TASK_NUMBER = | |
@classmethod
def _getUniqueId(cls):
return str(cls.MOD_uuid.uuid1()).upper()
@classmethod
def _stringToUniqueId(cls, string=None):
if string is not None:
basicUuid = cls._basicUniqueId()
return str(cls.MOD_uuid.uuid3(cls.MOD_uuid.UUID(basicUuid), str(string))).upper()
return cls._getUniqueId()
@classmethod
def _stringsToUniqueId(cls, *args):
def toOrderCodeString_(strings_):
return ''.join([str(ord(i) + seq).zfill(4) for seq, i in enumerate(strings_)])
if len(args) > 1:
strings = list(args)
else:
strings = cls._string2list(args[0])
#
subCode = toOrderCodeString_(strings)
return cls._stringToUniqueId(subCode)
@classmethod
def _setOsJsonWrite(cls, fileString, raw, indent=4, ensure_ascii=True):
temporaryName = cls._getOsFileTemporaryName(fileString)
with open(temporaryName, u'w') as j:
cls.MOD_json.dump(
raw,
j,
indent=indent,
ensure_ascii=ensure_ascii
)
cls._setOsFileCopy(temporaryName, fileString)
@classmethod
def _setAddMessage(cls, text):
print u' |{}'.format(cls._getActivePrettifyTime())
print u'{}'.format(text)
@classmethod
def _setAddResult(cls, text):
cls._setAddMessage(
u''' result |{}'''.format(text)
)
@classmethod
def _setAddWarning(cls, text):
cls._setAddMessage(
u'''warning |{}'''.format(text)
)
@classmethod
def _setAddError(cls, text):
cls._setAddMessage(
u''' error |{}'''.format(text)
)
class Mtd_BscPath(Mtd_BscUtility):
@classmethod
def _toDagpathRemapList(cls, pathStr, pathsep):
def addFnc_(lis_, item_):
if not item_ in lis_:
lis_.append(item_)
#
def getBranchFnc_(lis_, pathString_):
if not pathString_ in lis:
_strList = pathString_.split(pathsep)
#
_strCount = len(_strList)
for _seq, _s in enumerate(_strList):
if _s:
if (_seq + 1) < _strCount:
subPath = pathsep.join(_strList[:_seq + 1])
addFnc_(lis_, subPath)
#
addFnc_(lis_, pathString_)
#
lis = []
pathStrList = cls._string2list(pathStr)
for i in pathStrList:
# Debug add root
if not i.startswith(pathsep):
i = pathsep + i
#
getBranchFnc_(lis, i)
return lis
@classmethod
def _getDagpathRemapDict(cls, pathStr, pathsep):
def addFnc_(item):
if not item in lis:
lis.append(item)
#
def getBranchFnc_(pathString_, pathDatumList):
parent = pathDatumList[-2]
parentPathString_ = pathsep.join(pathDatumList[:-1])
nameString_ = pathDatumList[-1]
addFnc_(((parent, parentPathString_), (nameString_, pathString_)))
#
def getRootFnc_(pathString_, pathDatumList):
nameString_ = pathDatumList[-1]
addFnc_(
((None, None), (nameString_, pathString_))
)
#
def getMainFnc_():
# Get Dict
pathStringLis = cls._string2list(pathStr)
if pathStringLis:
for i in pathStringLis:
pathDatumList = i.split(pathsep)
isRoot = len(pathDatumList) == 2
# Filter is Root
if isRoot:
getRootFnc_(i, pathDatumList)
else:
getBranchFnc_(i, pathDatumList)
# Reduce Dict
if lis:
list2dictFnc_(dic, lis)
def list2dictFnc_(dic_, lis_):
[dic_.setdefault(p, []).append(c) for p, c in lis_]
#
lis = []
dic = cls.CLS_dic_order()
#
getMainFnc_()
return dic
@classmethod
def _setDicConvertToPathCreateDic(cls, dic, nodesep):
def getBranchFnc_(parent):
if parent in dic:
parentPathString_ = parent
if parent in dic_:
parentPathString_ = dic_[parent]
#
children = dic[parent]
if children:
for child in children:
childPath = parentPathString_ + pathsep + child
dic_[child] = childPath
getBranchFnc_(child)
pathsep = nodesep
#
dic_ = cls.CLS_dic_order()
root = dic.keys()[0]
dic_[root] = root
getBranchFnc_(root)
return dic_
@classmethod
def _nodeString2namespace(cls, nodepathString, nodesep, namespacesep):
if namespacesep in nodepathString:
return namespacesep.join(nodepathString.split(nodesep)[-1].split(namespacesep)[:-1])
return ''
@classmethod
def _nodepathString2nodenameString(cls, nodepathString, nodesep, namespacesep):
return nodepathString.split(nodesep)[-1].split(namespacesep)[-1]
@classmethod
def _nodeString2nodenameWithNamespace(cls, nodepathString, nodesep):
return nodepathString.split(nodesep)[-1]
@classmethod
def _portString2portname(cls, portpathString, portsep):
return portpathString.split(portsep)[-1]
@classmethod
def _attrpathString2portpathString(cls, portpathString, portsep):
return portsep.join(portpathString.split(portsep)[1:])
@classmethod
def _portString2nodeString(cls, portpathString, portsep):
return portpathString.split(portsep)[0]
class Mtd_BscDagpath(Mtd_BscUtility):
@classmethod
def _toDagpathRemapList_(cls, pathStr, pathsep):
def addFnc_(item):
if item:
if not item in lis:
lis.append(item)
def getBranchFnc_(pathString_):
if not pathString_ in lis:
stringLis = pathString_.split(pathsep)
#
dataCount = len(stringLis)
for seq, data in enumerate(stringLis):
if data:
if seq < dataCount:
subPath = pathsep.join(stringLis[:seq])
addFnc_(subPath)
#
addFnc_(pathString_)
lis = []
_ = cls._string2list(pathStr)
for i in _:
getBranchFnc_(i)
return lis
@classmethod
def _getDagpathDict_(cls, pathStr, pathsep):
def addFnc_(item):
if not item in lis:
lis.append(item)
def getBranchFnc_(pathString_, pathDatumList):
parentPathString_ = pathsep.join(pathDatumList[:-1])
addFnc_(
(parentPathString_, pathString_)
)
def getRootFnc_(pathString_):
addFnc_(
(None, pathString_)
)
def getMainFnc_():
# Get Dict
pathStringLis = cls._string2list(pathStr)
if pathStringLis:
for i in pathStringLis:
pathDatumList = i.split(pathsep)
isRoot = len(pathDatumList) == 1
# Filter is Root
if isRoot:
getRootFnc_(i)
else:
getBranchFnc_(i, pathDatumList)
# Reduce Dict
if lis:
list2dictFnc_(dic, lis)
def list2dictFnc_(dic_, lis_):
for p, c in lis_:
if p is None:
dic[c] = c
else:
if p in dic_:
if isinstance(dic_[p], (str, unicode)):
dic_[p] = []
dic_.setdefault(p, []).append(c)
#
lis = []
dic = cls.CLS_dic_order()
#
getMainFnc_()
return dic
class Mtd_BscFile(Mtd_BscUtility):
@classmethod
def isExist(cls, fileString):
return cls._isOsFileExist(fileString)
@classmethod
def createDirectory(cls, fileString):
cls._bsc_mtd__os_path__set_file_directory_create_(fileString)
@classmethod
def name(cls, fileString):
return cls._getOsFileName(fileString)
@classmethod
def dirname(cls, fileString):
return cls._getOsFileDirname(fileString)
@classmethod
def basename(cls, fileString):
return cls._getOsFileBasename(fileString)
@classmethod
def base(cls, fileString):
return cls._getOsFileBase(fileString)
@classmethod
def ext(cls, fileString):
return cls.MTD_os_path.splitext(fileString)[1]
@classmethod
def isSame(cls, fileString, targetFileString):
return cls._isOsSameFile(fileString, targetFileString)
@classmethod
def copyTo(cls, fileString, targetFileString, force=True):
if cls.isExist(fileString):
cls._setOsFileCopy(fileString, targetFileString, force)
@classmethod
def backupTo(cls, fileString, backupFileString, timetag=None):
if cls.isExist(fileString):
cls._setOsFileBackup(fileString, backupFileString, timetag)
@classmethod
def renameDirnameTo(cls, fileString, newDirnameString):
basenameString = cls.basename(fileString)
targetTexture = cls._toOsFilename(newDirnameString, basenameString)
return targetTexture
@classmethod
def renameBasenameTo(cls, fileString, newBasenameString):
cls._setOsFileRename(fileString, newBasenameString)
@classmethod
def renameTo(cls, fileString, newFileString):
cls._setOsFileRename_(fileString, newFileString)
@classmethod
def renameExtTo(cls, fileString, extString):
return cls.base(fileString) + extString
@classmethod
def remove(cls, fileString):
cls._setOsPathRemove(fileString)
@classmethod
def open(cls, fileString):
cls._setOsFileOpen(fileString)
@classmethod
def moveTo(cls, fileString, targetFileString):
cls._setOsFileMove(fileString, targetFileString)
@classmethod
def openDirectory(cls, fileString):
if cls._isOsFileExist(fileString):
directoryString = cls._getOsFileDirname(fileString)
cls._setOsDirectoryOpen(directoryString)
@classmethod
def openAsTemporary(cls, fileString, temporaryFileString):
if cls._isOsFileExist(fileString):
timestamp = str(cls._getOsFileMtimestamp(fileString))
if cls._isOsFileExist(temporaryFileString):
tempTimestamp = str(cls._getOsFileMtimestamp(temporaryFileString))
else:
tempTimestamp = None
if not timestamp == tempTimestamp:
cls._setOsFileCopy(fileString, temporaryFileString)
#
cls._setOsFileOpen(temporaryFileString)
@classmethod
def openAsBackup(cls, fileString):
pass
@classmethod
def isFileTimeChanged(cls, fileString, targetFileString):
return cls._isOsFileTimeChanged(fileString, targetFileString)
@classmethod
def mtimestamp(cls, fileString):
return cls._getOsFileMtimestamp(fileString)
@classmethod
def mtimetag(cls, fileString):
return cls._getOsFileMtimetag(fileString)
@classmethod
def mtimeChnPrettify(cls, fileString, useMode=0):
return cls._timestampToChnPrettify(cls._getOsFileMtimestamp(fileString), useMode)
@classmethod
def temporaryName(cls, fileString, timetag=None):
return cls._getOsFileTemporaryName(fileString, timetag)
@classmethod
def temporaryVedioName(cls, fileString):
tempDirectory = u'{}/vedio'.format(cls.DEF_path_temporary_local)
basenameString = cls._getOsFileBasename(fileString)
return cls._toOsFilename(tempDirectory, basenameString)
@classmethod
def backupName(cls, fileString, timetag=None, useMode=0):
return cls._toOsFileJoinTimetag(fileString, timetag, useMode)
@classmethod
def uniqueName(cls, fileString):
directoryString = cls._getOsFileDirname(fileString)
uniqueId = cls._stringToUniqueId(cls._getOsFileBasename(fileString))
return cls._toOsFilename(directoryString, uniqueId)
@classmethod
def infoJsonName(cls, fileString):
return cls._toOsFileInfoJsonFileString(fileString)
@classmethod
def resultName(cls, fileString):
return cls._toOsFileResultFileString(fileString)
@classmethod
def backupNameDict(cls, fileString):
return cls._getOsFileBackupNameDict(fileString)
@classmethod
def toJoinTimetag(cls, fileString, timetag=None, useMode=0):
return cls._toOsFileJoinTimetag(fileString, timetag, useMode)
@classmethod
def findTimetag(cls, fileString):
return cls._getOsFileTimetag(fileString)
@classmethod
def infoDict(cls, fileString):
return cls._infoDict(fileString)
@classmethod
def productInfoDict(cls, fileString, stage=None, description=None, note=None):
dic = cls._infoDict(fileString)
dic[cls.DEF_key_stage] = stage
dic[cls.DEF_key_description] = description
dic[cls.DEF_key_note] = note
return dic
@classmethod
def size(cls, fileString):
return cls._getOsFileSize(fileString)
@classmethod
def seqLabel(cls, seq):
return ['', '_' + str(seq).zfill(4)][seq > 0]
@classmethod
def subFilename(cls, fileString, labelString):
return labelString.join(cls.MTD_os_path.splitext(fileString))
@classmethod
def reduceFilename(cls, fileString):
pathsep = cls.DEF_bsc__pathsep
return cls.MOD_re.sub('{0}|{1}'.format(pathsep * 2, pathsep * 3), pathsep, fileString)
@classmethod
def toExtSplit(cls, fileString):
return cls.MTD_os_path.splitext(fileString)
@classmethod
def raw2hash(cls, fileString):
cls._getOsFileHash(fileString)
@classmethod
def collectionDatum(cls, fileString, targetDirectoryString, ignoreMtimeChanged=False, ignoreExists=False):
def getBranchFnc_(sourceFileString):
targetFileString = cls.renameDirnameTo(sourceFileString, targetDirectoryString)
#
enable = False
if cls.isExist(targetFileString):
if ignoreExists is True:
enable = False
else:
if ignoreMtimeChanged is True:
enable = True
else:
isMtimeChanged = cls._isOsFileTimeChanged(sourceFileString, targetFileString)
if isMtimeChanged:
enable = True
else:
enable = True
#
if enable is True:
lis.append((sourceFileString, targetFileString))
#
lis = []
#
osFileLis = cls._string2list(fileString)
if osFileLis:
[getBranchFnc_(i) for i in osFileLis]
return lis
@classmethod
def composeBy(cls, directoryString, basenameString):
return cls._toOsFilename(directoryString, basenameString)
@classmethod
def _getOsFileInfoDic(cls, osSourceFile, description=None, note=None):
return orderedDict(
[
(cls.DEF_key_info_timestamp, cls._getSystemActiveTimestamp()),
(cls.DEF_key_info_username, cls._getSystemUsername()),
#
(cls.DEF_key_info_host, cls._getSystemHost()),
(cls.DEF_key_info_hostname, cls._getSystemHostname()),
#
(cls.DEF_key_info_sourcefile, osSourceFile),
#
(cls.DEF_key_info_description, description),
(cls.DEF_key_info_note, note)
]
)
@classmethod
def _getOsFileBackupDatum(cls, fileString):
hashKey = cls._getOsFileHash(fileString)
dirname, filename, ext = cls._getOsFileDirname(fileString), cls._getOsFileName(fileString), cls._getOsFileExt(fileString)
#
targetFileString = cls.DEF_bsc__pathsep.join([cls._getOsFileDirname(fileString), cls.LynxiOsFolder_History, filename + ext, hashKey])
osVersionFile = cls.DEF_bsc__pathsep.join([cls._getOsFileDirname(fileString), cls.LynxiOsFolder_History, filename + cls.LynxiOsExtVAR_kit__window__version])
return targetFileString, osVersionFile
@classmethod
def _setOsFileBackupTo(cls, sourceFileString, targetFileString):
cls._setOsFileCopy(sourceFileString, targetFileString)
#
info = cls._getOsFileInfoDic(sourceFileString)
infoFile = cls._toOsFileInfoJsonFileString(targetFileString)
cls._setOsJsonWrite(infoFile, info)
@classmethod
def backup(cls, fileString):
if cls._isOsFileExist(fileString):
targetFileString, osVersionFile = cls._getOsFileBackupDatum(fileString)
if not cls._isOsFileExist(targetFileString):
cls._setOsFileBackupTo(fileString, targetFileString)
#
cls._setOsJsonWrite(
osVersionFile,
{
cls._getSystemActiveTimestamp(): cls._getOsFileBasename(targetFileString)
}
)
class Mtd_BscSystem(Mtd_BscBasic):
VAR_bsc__system__name = None
VAR_bsc__system__name_dict = {
u'Windows': u'windows',
u'Linux': u'linux',
u'maya.exe': u'maya',
u'maya': u'maya',
u'houdini.exe': u'maya',
u'houdini': u'maya'
}
VAR_bsc__system__version_dict = {
u'32bit': u'x86',
u'64bit': u'x64'
}
@classmethod
def _bsc__system_cls__get_is_active_(cls, appNameStr):
pass
@classmethod
def isActive(cls):
return cls._bsc__system_cls__get_is_active_(
cls.VAR_bsc__system__name
)
@classmethod
def name(cls):
return cls.VAR_bsc__system__name
@classmethod
def _bsc__system_cls__get_full_version_str_(cls):
pass
@classmethod
def fullVersion(cls):
return cls._bsc__system_cls__get_full_version_str_()
@classmethod
def _bsc__system_cls__get_version_str_(cls):
pass
@classmethod
def version(cls):
return cls._bsc__system_cls__get_version_str_()
class Mtd_BscPlatform(Mtd_BscSystem):
@classmethod
def _bsc__system_cls__get_is_active_(cls, appNameStr):
key = cls.MOD_platform.system()
return cls.VAR_bsc__system__name_dict[key] == cls.VAR_bsc__system__name
@classmethod
def _bsc__system_cls__get_full_version_str_(cls):
_ = u'.'.join(list(cls.MOD_platform.architecture()))
if _ in cls.VAR_bsc__system__version_dict:
return cls.VAR_bsc__system__version_dict[_]
return _
@classmethod
def _bsc__system_cls__get_version_str_(cls):
_ = str(cls.MOD_platform.architecture()[0])
if _ in cls.VAR_bsc__system__version_dict:
return cls.VAR_bsc__system__version_dict[_]
return _
class Mtd_BscApplication(Mtd_BscSystem):
VAR_bsc__system__name = None
@classmethod
def _bsc__system_cls__get_is_active_(cls, appNameStr):
data = cls.MTD_os_path.basename(cls.MOD_sys.argv[0])
if data.lower() == u'{}.exe'.format(appNameStr):
return True
elif data.lower() == cls.VAR_bsc__system__name:
return True
return False
class _EnvironString(str):
def __init__(self, value):
self._value = value
self._key = u''
self._parent = None
def _add(self, value):
if self._value:
lis = [i.lstrip().rstrip() for i in self._value.split(Mtd_BscUtility.MOD_os.pathsep)]
lowerLis = [i.lstrip().rstrip().lower() for i in self._value.lower().split(Mtd_BscUtility.MOD_os.pathsep)]
if value.lower() not in | |
each
:py:meth:`dispense`, but only if the pipette has no liquid left
in it. If set to `False` (default), no :py:meth:`blow_out` will
occur.
* *mix_before* (``tuple``) --
The tuple, if specified, gives the amount of volume to
:py:meth:`mix` preceding each :py:meth:`aspirate` during the
transfer. The tuple is interpreted as (repetitions, volume).
* *mix_after* (``tuple``) --
The tuple, if specified, gives the amount of volume to
:py:meth:`mix` after each :py:meth:`dispense` during the
transfer. The tuple is interpreted as (repetitions, volume).
* *disposal_volume* (``float``) --
(:py:meth:`distribute` only) Volume of liquid to be disposed off
after distributing. When dispensing multiple times from the same
tip, it is recommended to aspirate an extra amount of liquid to
be disposed off after distributing.
* *carryover* (``boolean``) --
If `True` (default), any `volume` that exceeds the maximum volume
of this Pipette will be split into multiple smaller volumes.
* *gradient* (``lambda``) --
Function for calculating the curve used for gradient volumes.
When `volume` is a tuple of length 2, its values are used to
create a list of gradient volumes. The default curve for this
gradient is linear (lambda x: x), however a method can be passed
with the `gradient` keyword argument to create a custom curve.
:returns: This instance
"""
self._log.debug("Transfer {} from {} to {}".format(
volume, source, dest))
kwargs['mode'] = kwargs.get('mode', 'transfer')
mix_strategy, mix_opts = self._mix_from_kwargs(kwargs)
if trash:
drop_tip = transfers.DropTipStrategy.TRASH
else:
drop_tip = transfers.DropTipStrategy.RETURN
new_tip = kwargs.get('new_tip')
if isinstance(new_tip, str):
new_tip = types.TransferTipPolicy[new_tip.upper()]
blow_out = None
if kwargs.get('blow_out'):
blow_out = transfers.BlowOutStrategy.TRASH
if new_tip != types.TransferTipPolicy.NEVER:
tr, next_tip = self._next_available_tip()
max_volume = min(next_tip.max_volume, self.max_volume)
else:
max_volume = self.hw_pipette['working_volume']
touch_tip = None
if kwargs.get('touch_tip'):
touch_tip = transfers.TouchTipStrategy.ALWAYS
default_args = transfers.Transfer()
disposal = kwargs.get('disposal_volume')
if disposal is None:
disposal = default_args.disposal_volume
transfer_args = transfers.Transfer(
new_tip=new_tip or default_args.new_tip,
air_gap=kwargs.get('air_gap') or default_args.air_gap,
carryover=kwargs.get('carryover') or default_args.carryover,
gradient_function=(kwargs.get('gradient_function') or
default_args.gradient_function),
disposal_volume=disposal,
mix_strategy=mix_strategy,
drop_tip_strategy=drop_tip,
blow_out_strategy=blow_out or default_args.blow_out_strategy,
touch_tip_strategy=(touch_tip or
default_args.touch_tip_strategy)
)
transfer_options = transfers.TransferOptions(transfer=transfer_args,
mix=mix_opts)
plan = transfers.TransferPlan(volume, source, dest, self, max_volume,
kwargs['mode'], transfer_options)
self._execute_transfer(plan)
return self
def _execute_transfer(self, plan: transfers.TransferPlan):
for cmd in plan:
getattr(self, cmd['method'])(*cmd['args'], **cmd['kwargs'])
@staticmethod
def _mix_from_kwargs(
top_kwargs: Dict[str, Any])\
-> Tuple[transfers.MixStrategy, transfers.Mix]:
def _mix_requested(kwargs, opt):
"""
Helper for determining mix options from :py:meth:`transfer` kwargs
Mixes can be ignored in kwargs by either
- Not specifying the kwarg
- Specifying it as None
- Specifying it as (0, 0)
This handles all these cases.
"""
val = kwargs.get(opt)
if None is val:
return False
if val == (0, 0):
return False
return True
mix_opts = transfers.Mix()
if _mix_requested(top_kwargs, 'mix_before')\
and _mix_requested(top_kwargs, 'mix_after'):
mix_strategy = transfers.MixStrategy.BOTH
before_opts = top_kwargs['mix_before']
after_opts = top_kwargs['mix_after']
mix_opts = mix_opts._replace(
mix_after=mix_opts.mix_after._replace(
repetitions=after_opts[0], volume=after_opts[1]),
mix_before=mix_opts.mix_before._replace(
repetitions=before_opts[0], volume=before_opts[1]))
elif _mix_requested(top_kwargs, 'mix_before'):
mix_strategy = transfers.MixStrategy.BEFORE
before_opts = top_kwargs['mix_before']
mix_opts = mix_opts._replace(
mix_before=mix_opts.mix_before._replace(
repetitions=before_opts[0], volume=before_opts[1]))
elif _mix_requested(top_kwargs, 'mix_after'):
mix_strategy = transfers.MixStrategy.AFTER
after_opts = top_kwargs['mix_after']
mix_opts = mix_opts._replace(
mix_after=mix_opts.mix_after._replace(
repetitions=after_opts[0], volume=after_opts[1]))
else:
mix_strategy = transfers.MixStrategy.NEVER
return mix_strategy, mix_opts
def delay(self):
return self._ctx.delay()
def move_to(self, location: types.Location, force_direct: bool = False,
minimum_z_height: float = None,
speed: float = None
) -> 'InstrumentContext':
""" Move the instrument.
:param location: The location to move to.
:type location: :py:class:`.types.Location`
:param force_direct: If set to true, move directly to destination
without arc motion.
:param minimum_z_height: When specified, this Z margin is able to raise
(but never lower) the mid-arc height.
:param speed: The speed at which to move. By default,
:py:attr:`InstrumentContext.default_speed`. This controls
the straight linear speed of the motion; to limit
individual axis speeds, you can use
:py:attr:`.ProtocolContext.max_speeds`.
"""
if self._ctx.location_cache:
from_lw = self._ctx.location_cache.labware
else:
from_lw = None
if not speed:
speed = self.default_speed
from_center = 'centerMultichannelOnWells'\
in quirks_from_any_parent(from_lw)
cp_override = CriticalPoint.XY_CENTER if from_center else None
from_loc = types.Location(
self._hw_manager.hardware.gantry_position(
self._mount, critical_point=cp_override),
from_lw)
moves = geometry.plan_moves(from_loc, location, self._ctx.deck,
force_direct=force_direct,
minimum_z_height=minimum_z_height)
self._log.debug("move_to: {}->{} via:\n\t{}"
.format(from_loc, location, moves))
try:
for move in moves:
self._hw_manager.hardware.move_to(
self._mount, move[0], critical_point=move[1], speed=speed,
max_speeds=self._ctx.max_speeds.data)
except Exception:
self._ctx.location_cache = None
raise
else:
self._ctx.location_cache = location
return self
@property
def mount(self) -> str:
""" Return the name of the mount this pipette is attached to """
return self._mount.name.lower()
@property
def speed(self) -> 'PlungerSpeeds':
""" The speeds (in mm/s) configured for the pipette plunger.
This is an object with attributes ``aspirate``, ``dispense``, and
``blow_out`` holding the plunger speeds for the corresponding
operation.
.. note::
This property is equivalent to :py:attr:`flow_rate`; the only
difference is the units in which this property is specified.
Specifying this attribute uses the units of the linear speed of
the plunger inside the pipette, while :py:attr:`flow_rate` uses
the units of the volumetric flow rate of liquid into or out of the
tip. Because :py:attr:`speed` and :py:attr:`flow_rate` modify the
same values, setting one will override the other.
For instance, to set the plunger speed during an aspirate action, do
.. code-block :: python
instrument.speed.aspirate = 50
"""
return self._speeds
@property
def flow_rate(self) -> 'FlowRates':
""" The speeds (in uL/s) configured for the pipette.
This is an object with attributes ``aspirate``, ``dispense``, and
``blow_out`` holding the flow rates for the corresponding operation.
.. note::
This property is equivalent to :py:attr:`speed`; the only
difference is the units in which this property is specified.
specifiying this property uses the units of the volumetric flow rate
of liquid into or out of the tip, while :py:attr:`speed` uses the
units of the linear speed of the plunger inside the pipette.
Because :py:attr:`speed` and :py:attr:`flow_rate` modify the
same values, setting one will override the other.
For instance, to change the flow rate for aspiration on an instrument
you would do
.. code-block :: python
instrument.flow_rate.aspirate = 50
"""
return self._flow_rates
@property
def pick_up_current(self) -> float:
"""
The current (amperes) the pipette mount's motor will use
while picking up a tip. Specified in amps.
"""
raise NotImplementedError
@pick_up_current.setter
def pick_up_current(self, amps: float):
""" Set the current used when picking up a tip.
:param amps: The current, in amperes. Acceptable values: (0.0, 2.0)
"""
raise NotImplementedError
@property
def type(self) -> str:
""" One of `'single'` or `'multi'`.
"""
model = self.name
if 'single' in model:
return 'single'
elif 'multi' in model:
return 'multi'
else:
raise RuntimeError("Bad pipette name: {}".format(model))
@property
def tip_racks(self) -> List[Labware]:
"""
The tip racks that have been linked to this pipette.
This is the property used to determine which tips to pick up next when
calling :py:meth:`pick_up_tip` without arguments.
"""
return self._tip_racks
@tip_racks.setter
def tip_racks(self, racks: List[Labware]):
self._tip_racks = racks
@property
def trash_container(self) -> Labware:
""" The trash container associated with this pipette.
This is the property used to determine where to drop tips and blow out
liquids when calling :py:meth:`drop_tip` or :py:meth:`blow_out` without
arguments.
"""
return self._trash
@trash_container.setter
def trash_container(self, trash: Labware):
self._trash = trash
@property
def name(self) -> str:
"""
The name string for the pipette (e.g. 'p300_single')
"""
return self.hw_pipette['name']
@property
def model(self) -> str:
"""
The model string for the pipette (e.g. 'p300_single_v1.3')
"""
return self.hw_pipette['model']
@property
def min_volume(self) -> float:
return self.hw_pipette['min_volume']
@property
def max_volume(self) -> float:
"""
The maximum volume, in microliters, this pipette can hold.
"""
return self.hw_pipette['max_volume']
@property
def current_volume(self) -> float:
"""
The current amount of liquid, in microliters, held in the pipette.
"""
return self.hw_pipette['current_volume']
@property
def hw_pipette(self) -> Dict[str, Any]:
""" View the information returned by the hardware API directly.
:raises: a :py:class:`.types.PipetteNotAttachedError` if the pipette is
no longer attached (should not happen).
"""
pipette = self._hw_manager.hardware.attached_instruments[self._mount]
if pipette is None:
raise types.PipetteNotAttachedError
return pipette
@property
def channels(self) -> int:
""" The number of channels on the pipette. """
return self.hw_pipette['channels']
@property
def well_bottom_clearance(self) -> 'Clearances':
""" The distance above the bottom of a well to aspirate or dispense.
This is an object with attributes ``aspirate`` and ``dispense``,
describing the default heights of the corresponding operation. The
default is 1.0mm for both aspirate and dispense.
When :py:meth:`aspirate` or :py:meth:`dispense` is given a
:py:class:`.Well` rather than a full :py:class:`.Location`, the robot
will move this | |
<reponame>leegoonz/Maya-devkit
from PySide.QtGui import QGraphicsView as _QGraphicsView
from PySide.QtCore import QObject as _QObject
from PySide.QtGui import QGraphicsObject as _QGraphicsObject
class _Property(object):
def __call__(*args, **kwargs):
"""
x.__call__(...) <==> x(...)
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def getter(*args, **kwargs):
pass
def read(*args, **kwargs):
pass
def setter(*args, **kwargs):
pass
def write(*args, **kwargs):
pass
__new__ = None
class _Object(object):
__dict__ = None
class QDeclarativeParserStatus(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def classBegin(*args, **kwargs):
pass
def componentComplete(*args, **kwargs):
pass
__new__ = None
class QDeclarativeNetworkAccessManagerFactory(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def create(*args, **kwargs):
pass
__new__ = None
class QDeclarativeComponent(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __nonzero__(*args, **kwargs):
"""
x.__nonzero__() <==> x != 0
"""
pass
def beginCreate(*args, **kwargs):
pass
def completeCreate(*args, **kwargs):
pass
def create(*args, **kwargs):
pass
def creationContext(*args, **kwargs):
pass
def errorString(*args, **kwargs):
pass
def errors(*args, **kwargs):
pass
def isError(*args, **kwargs):
pass
def isLoading(*args, **kwargs):
pass
def isNull(*args, **kwargs):
pass
def isReady(*args, **kwargs):
pass
def loadUrl(*args, **kwargs):
pass
def progress(*args, **kwargs):
pass
def setData(*args, **kwargs):
pass
def status(*args, **kwargs):
pass
def url(*args, **kwargs):
pass
Error = None
Loading = None
Null = None
Ready = None
Status = None
__new__ = None
progressChanged = None
staticMetaObject = None
statusChanged = None
class QDeclarativeExtensionInterface(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def initializeEngine(*args, **kwargs):
pass
def registerTypes(*args, **kwargs):
pass
__new__ = None
class QDeclarativeExpression(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def clearError(*args, **kwargs):
pass
def context(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def error(*args, **kwargs):
pass
def evaluate(*args, **kwargs):
pass
def expression(*args, **kwargs):
pass
def hasError(*args, **kwargs):
pass
def lineNumber(*args, **kwargs):
pass
def notifyOnValueChanged(*args, **kwargs):
pass
def scopeObject(*args, **kwargs):
pass
def setExpression(*args, **kwargs):
pass
def setNotifyOnValueChanged(*args, **kwargs):
pass
def setSourceLocation(*args, **kwargs):
pass
def sourceFile(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
valueChanged = None
class QDeclarativeListReference(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def append(*args, **kwargs):
pass
def at(*args, **kwargs):
pass
def canAppend(*args, **kwargs):
pass
def canAt(*args, **kwargs):
pass
def canClear(*args, **kwargs):
pass
def canCount(*args, **kwargs):
pass
def clear(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def listElementType(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
__new__ = None
class QDeclarativePropertyMap(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def clear(*args, **kwargs):
pass
def contains(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def insert(*args, **kwargs):
pass
def isEmpty(*args, **kwargs):
pass
def keys(*args, **kwargs):
pass
def size(*args, **kwargs):
pass
def value(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
valueChanged = None
class QDeclarativeError(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def column(*args, **kwargs):
pass
def description(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def line(*args, **kwargs):
pass
def setColumn(*args, **kwargs):
pass
def setDescription(*args, **kwargs):
pass
def setLine(*args, **kwargs):
pass
def setUrl(*args, **kwargs):
pass
def toString(*args, **kwargs):
pass
def url(*args, **kwargs):
pass
__new__ = None
class QDeclarativeEngine(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addImageProvider(*args, **kwargs):
pass
def addImportPath(*args, **kwargs):
pass
def addPluginPath(*args, **kwargs):
pass
def baseUrl(*args, **kwargs):
pass
def clearComponentCache(*args, **kwargs):
pass
def imageProvider(*args, **kwargs):
pass
def importPathList(*args, **kwargs):
pass
def importPlugin(*args, **kwargs):
pass
def networkAccessManager(*args, **kwargs):
pass
def networkAccessManagerFactory(*args, **kwargs):
pass
def offlineStoragePath(*args, **kwargs):
pass
def outputWarningsToStandardError(*args, **kwargs):
pass
def pluginPathList(*args, **kwargs):
pass
def removeImageProvider(*args, **kwargs):
pass
def rootContext(*args, **kwargs):
pass
def setBaseUrl(*args, **kwargs):
pass
def setImportPathList(*args, **kwargs):
pass
def setNetworkAccessManagerFactory(*args, **kwargs):
pass
def setOfflineStoragePath(*args, **kwargs):
pass
def setOutputWarningsToStandardError(*args, **kwargs):
pass
def setPluginPathList(*args, **kwargs):
pass
def contextForObject(*args, **kwargs):
pass
def objectOwnership(*args, **kwargs):
pass
def setContextForObject(*args, **kwargs):
pass
def setObjectOwnership(*args, **kwargs):
pass
CppOwnership = None
JavaScriptOwnership = None
ObjectOwnership = None
__new__ = None
quit = None
staticMetaObject = None
warnings = None
class QDeclarativeContext(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def baseUrl(*args, **kwargs):
pass
def contextObject(*args, **kwargs):
pass
def contextProperty(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def parentContext(*args, **kwargs):
pass
def resolvedUrl(*args, **kwargs):
pass
def setBaseUrl(*args, **kwargs):
pass
def setContextObject(*args, **kwargs):
pass
def setContextProperty(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QDeclarativeView(_QGraphicsView):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def engine(*args, **kwargs):
pass
def errors(*args, **kwargs):
pass
def eventFilter(*args, **kwargs):
pass
def initialSize(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def resizeMode(*args, **kwargs):
pass
def rootContext(*args, **kwargs):
pass
def rootObject(*args, **kwargs):
pass
def setResizeMode(*args, **kwargs):
pass
def setRootObject(*args, **kwargs):
pass
def setSource(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def source(*args, **kwargs):
pass
def status(*args, **kwargs):
pass
def timerEvent(*args, **kwargs):
pass
Error = None
Loading = None
Null = None
Ready = None
ResizeMode = None
SizeRootObjectToView = None
SizeViewToRootObject = None
Status = None
__new__ = None
sceneResized = None
staticMetaObject = None
statusChanged = None
class ListProperty(_Property):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
class QDeclarativeScriptString(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def context(*args, **kwargs):
pass
def scopeObject(*args, **kwargs):
pass
def script(*args, **kwargs):
pass
def setContext(*args, **kwargs):
pass
def setScopeObject(*args, **kwargs):
pass
def setScript(*args, **kwargs):
pass
__new__ = None
class QDeclarativeProperty(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getattribute__(*args, **kwargs):
"""
| |
'''
Computes the magnetic field components from Langlais' spherical harmonic model at a certain altitude.
References:
<NAME>.; <NAME>., 1981. Spherical harmonic analysis of the geomagnetic field: an example of a
linear inverse problem, Geophysical Journal of the Royal Astronomical Society, 65, 645-693.
DOI: doi.org/10.1111/j.1365-246X.1981.tb04877.x
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>., 2019. A new model of the crustal
magnetic field of Mars using MGS and MAVEN, Journal of Geophysical Research: Planets, 124, 6, 1542-1569.
DOI: doi.org/10.1029/2018JE005854
<NAME>., The main field, In: Geomagnetism, 1987, ed. by <NAME>, Academic Press.
'''
import pyshtools as sh
import numpy as np
from tqdm import tqdm
from enum import Enum
def legendre_schmidt_Pyshtools(lat, nmax = 134):
"""
Uses the pyshtools package to compute the Schmidt semi-normalized associated Legendre functions of the cosine of the colatitude, with nmax = 134 (Langlais model),
Parameters:
lat: float or array
The latitude, in degrees.
Returns:
P: 135 X 135 array containing the associated Legendre functions.
dP: 135 X 135 array containing the first derivatives of the functions.
"""
theta = np.deg2rad(90.0 - lat)
if np.isscalar(lat):
P0, dP0 = sh.legendre.PlmSchmidt_d1(nmax, np.cos(theta))
P = np.zeros((nmax+1, nmax+1))
dP = np.zeros((nmax+1, nmax+1))
i = 0
for n in range(nmax+1):
for m in range(n+1):
P[n, m] = P0[i]
dP[n, m] = dP0[i]
i += 1
else:
P = np.zeros((nmax+1, nmax+1, len(lat)))
dP = np.zeros((nmax+1, nmax+1, len(lat)))
for k in range(len(lat)):
P0, dP0 = sh.legendre.PlmSchmidt_d1(nmax, np.cos(theta[k]))
i = 0
for n in range(nmax+1):
for m in range(n+1):
P[n, m, k] = P0[i]
dP[n, m, k] = dP0[i]
i += 1
return P, dP
def legendre_schmidt_Brain(lat, nmax = 134):
"""
Uses the David Brain's approach (CCATi - mars_crust_model.pro) to compute the Schmidt semi-normalized associated Legendre functions and its derivatives.
Parameters:
lat: float
The latitude, in degrees.
nmax: integer
The maximum degree and order of the functions.
Returns:
P, dP: nmax+1 X nmax+1 arrays containing the associated Legendre functions and its derivatives, respectively.
"""
theta = np.deg2rad(90.0 - lat)
x = np.cos(theta)
P = np.zeros((nmax+1, nmax+1))
dP = np.zeros((nmax+1, nmax+1))
P[0, 0] = 1.0
P[1, 0] = x
P[1, 1] = - np.sqrt(1 - x**2)
dP[0, 0] = 0.0
dP[1, 0] = 1.0
for n in range(2, nmax+1):
P[n, 0] = ((2*(n-1)+1) * x * P[n-1, 0] - (n-1) * P[n-2, 0])/n
dP[nmax, 0] = nmax / (x**2 - 1) * (x * P[nmax, 0] - P[nmax-1, 0])
for n in range(2, nmax):
dP[n, 0] = (n+1) / (x**2 - 1) * (P[n+1, 0] - x * P[n, 0])
Cm = np.sqrt(2.0)
for m in range(1, nmax+1):
Cm /= np.sqrt(2.0*m * (2.0*m - 1.0))
P[m, m] = (1.0 - x**2)**(0.5*m) * Cm
for i in range(1, m):
P[m, m] *= (2.0*i + 1.0)
dP[m, m] = -P[m, m] * m * x / np.sqrt(1 - x**2)
if nmax > m:
twoago = 0.0
for n in range(m+1, nmax+1):
P[n, m] = (x * (2.0*n - 1.0) * P[n-1, m] - np.sqrt((n+m-1.0) * (n-m-1.0)) * twoago) / np.sqrt(n**2 - m**2)
twoago = P[n-1, m]
for n in range(2, nmax+1):
for m in range(1, n):
dP[n, m] = np.sqrt((n-m) * (n+m+1)) * P[n, m+1] - P[n, m] * m * x / np.sqrt(1 - x**2)
return P, dP
def legendre_schmidt_ChaosMagPy(theta, nmax):
"""
Copy of the function developed in the ChaosMagPy package. I did not write this code!
I do not use this, it is here only for reference on the equations!
Use legendre_schmidt_Pyshtools instead.
Returns associated Legendre polynomials `P(n,m)` (Schmidt quasi-normalized)
and the derivative :math:`dP(n,m)/d\\theta` evaluated at :math:`\\theta`.
Parameters
----------
nmax : int, positive
Maximum degree of the spherical expansion.
theta : ndarray, shape (...)
Colatitude in degrees :math:`[0^\\circ, 180^\\circ]`
of arbitrary shape.
Returns
-------
Pnm : ndarray, shape (n, m, ...)
Evaluated values and derivatives, grid shape is appended as trailing
dimensions. `P(n,m)` := ``Pnm[n, m, ...]`` and `dP(n,m)` :=
``Pnm[m, n+1, ...]``
References
----------
Based on Equations 26-29 and Table 2 in:
<NAME>., "Geomagnetism - The main field", Academic Press, 1987,
chapter 4
"""
costh = np.cos(np.deg2rad(theta))
sinth = np.sqrt(1-costh**2)
Pnm = np.zeros((nmax+1, nmax+2) + costh.shape)
Pnm[0, 0] = 1. # is copied into trailing dimensions
Pnm[1, 1] = sinth # write theta into trailing dimenions via broadcasting
rootn = np.sqrt(np.arange(2 * nmax**2 + 1))
# Recursion relations after Langel "The Main Field" (1987),
# eq. (27) and Table 2 (p. 256)
for m in range(nmax):
Pnm_tmp = rootn[m+m+1] * Pnm[m, m]
Pnm[m+1, m] = costh * Pnm_tmp
if m > 0:
Pnm[m+1, m+1] = sinth*Pnm_tmp / rootn[m+m+2]
for n in np.arange(m+2, nmax+1):
d = n * n - m * m
e = n + n - 1
Pnm[n, m] = ((e * costh * Pnm[n-1, m] - rootn[d-e] * Pnm[n-2, m])
/ rootn[d])
# dP(n,m) = Pnm(m,n+1) is the derivative of P(n,m) vrt. theta
Pnm[0, 2] = -Pnm[1, 1]
Pnm[1, 2] = Pnm[1, 0]
for n in range(2, nmax+1):
Pnm[0, n+1] = -np.sqrt((n*n + n) / 2) * Pnm[n, 1]
Pnm[1, n+1] = ((np.sqrt(2 * (n*n + n)) * Pnm[n, 0]
- np.sqrt((n*n + n - 2)) * Pnm[n, 2]) / 2)
for m in np.arange(2, n):
Pnm[m, n+1] = (0.5*(np.sqrt((n + m) * (n - m + 1)) * Pnm[n, m-1]
- np.sqrt((n + m + 1) * (n - m)) * Pnm[n, m+1]))
Pnm[n, n+1] = np.sqrt(2 * n) * Pnm[n, n-1] / 2
return Pnm
def mag_components(lon, lat, alt, comp, nmax = 134, a = 3393.5):
"""
Calculates the magnetic field component (Br, Btheta, Bphi, or Bt) of the crustal field model for one set of aerographic coordinates.
Obs.: Btheta is positive southwards (the origin is in the north pole).
Parameters:
lon: float or array
The longitude, in degrees.
lat: float or array
The latitude, in degrees.
alt: float or array
The altitude, in km.
comp: string
The desired magnetic field component, in spherical coordinates. Options are 'Br', 'Btheta', 'Bphi', and 'Bt.
nmax: integer, optional
The maximum degree and order of the functions.
a: float, optional
The radius of the planet. Default is the Mars' radius.
Returns:
A float or an array containing the magnetic field component Br, Btheta, Bphi, or Bt.
"""
# Raise an AssertionError if component is invalid
assert comp == 'Br' or comp == 'Btheta' or comp == 'Bphi' or comp == 'Bt', "Check argument for comp"
# Import the coefficient files
from IsabelaFunctions.langlais_coeff import glm as g
from IsabelaFunctions.langlais_coeff import hlm as h
# Calculate r, theta, phi, and the Legendre functions
r = a + alt
if hasattr(lon, '__len__') is False:
l = 1
else:
l = len(lon)
P, dP = legendre_schmidt_Pyshtools(lat)
cos = np.empty((nmax+1, l)) * np.nan
sen = np.empty_like(cos) * np.nan
for phi in range(l):
for m in range(nmax+1):
if np.isscalar(lon) is True:
cos[m, phi] = np.cos(m * np.deg2rad(lon))
sen[m, phi] = np.sin(m * np.deg2rad(lon))
else:
cos[m, phi] = np.cos(m * np.deg2rad(lon[phi]))
sen[m, phi] = np.sin(m * np.deg2rad(lon[phi]))
a_over_r = np.empty((nmax+1, l)) * np.nan
if l == 1:
for n in range(nmax+1):
a_over_r[n] = (a/r)**(n+2)
else:
for radius in range(l):
for n in range(nmax+1):
a_over_r[n, radius] = (a/r[radius])**(n+2)
# Calculate Br, Btheta, Bphi, Bt
if comp == 'Bt':
Br = np.zeros(l)
Btheta = np.zeros(l)
Bphi = np.zeros(l)
sen_theta = np.sin(np.deg2rad(90.0 - lat))
for n in range(1, nmax+1):
for m in range(n+1):
Br += (g[n, m] * cos[m] + h[n, m] * sen[m]) * P[n, m] * (n+1) * a_over_r[n]
Btheta += (g[n, m] * cos[m] + h[n, m] * sen[m]) * dP[n, m] * sen_theta * a_over_r[n]
Bphi += (g[n, m] * sen[m] + h[n, m] * cos[m]) | |
self.query,
'Namespace has ambiguous uniqueness state',
result=result[n])
n += 1
# check we processed everything...
if n != len(result):
raise MQLInternalError(
self.query,
'Got %(count)d results, expecting %(expected_count)d',
count=len(result),
expected_count=n)
def add_order_results(self, results):
self.existing_order = []
for existing in results:
# find out what the existing order looks like...
n = len(self.result)
guid = None
order = None
# ['guid', 'typeguid', 'contents']
for i in xrange(n):
if self.result[i] == 'guid':
guid = self.transform_result(self.result[i], existing[i])
# there's exactly one more thing -- the order info itself.
# ['guid','typeguid','datatype','value']
if len(existing[n]) != 1:
raise MQLResultError(
self.query,
'More than one piece of order information at %(guid)s',
guid=guid)
m = len(self.ordered.result)
for i in xrange(m):
if self.ordered.result[i] == 'value':
res = self.transform_result(self.ordered.result[i], existing[n][0][i])
order = self.primitive_result(self.ordered.datatype, res)
if guid is None or order is None:
raise MQLInternalError(
self.query, 'Found order information without guid and order value')
# check we processed everything...
if n + 1 != len(existing):
raise MQLInternalError(
self.query,
'Got %(count)d results, expecting %(expected_count)d',
count=len(self.result),
expected_count=n)
# add information about what we found. Note that the length of this array is also important.
self.existing_order.append((order, guid))
def generate_new_order(self):
VERY_LARGE_NUMBER = 1000000.0
VERY_SMALL_NUMBER = -VERY_LARGE_NUMBER
# using self.new_order, and self.existing_order figure out what the new orders are for each primitive...
# note that the formats are radically different. Oh well..
# XXX This algorithm is sub-optimal in complicated ways - ask tsturge for more details.
# what have we found?
current_order = []
seen_guids = set()
for item in self.new_order:
if item.state == 'found' and item.ordered.state == 'order_found':
current_order.append(item)
seen_guids.add(item.guid)
# what is the highest order we cover?
first_missing_order = VERY_LARGE_NUMBER
for pair in self.existing_order:
if pair[1] not in seen_guids:
first_missing_order = min(pair[0], first_missing_order)
best_preserved_order = []
# what could we possibly preserve?
for item in current_order:
if item.ordered.value < first_missing_order:
best_preserved_order.append([item.ordered.value, None, item])
# find the best match between current_order and new_order
best_preserved_guids = set(
[x[2].guid for x in incr_subseq(best_preserved_order)])
# so we need to change everything we are NOT preserving...
# we need to fill in the list like [ (A,None), (B,-2), (C,None), (D,None), (E,3.1), (F,3.4), (G,None) ]
# with good intermediate values for A,C,D and G (-3,-1,0,4.4)
i = 0
prev_order = VERY_SMALL_NUMBER
next_order = None
k = 0
while i < len(self.new_order):
item = self.new_order[i]
if item.state == 'found' and item.ordered.state == 'order_found' and item.guid in best_preserved_guids:
prev_order = item.ordered.value
next_order = None
k = i
i += 1
item.ordered.change_state('found')
continue
if next_order is None:
j = i + 1
while j < len(self.new_order):
next_item = self.new_order[j]
if next_item.state == 'found' and next_item.ordered.state == 'order_found' and next_item.guid in best_preserved_guids:
next_order = next_item.ordered.value
break
j += 1
if next_order is None:
# we'll never go past the first missing order.
next_order = first_missing_order
# so what order will we give this item?
if prev_order == VERY_SMALL_NUMBER and next_order == VERY_LARGE_NUMBER:
assigned_order = i + 0.0
elif prev_order == VERY_SMALL_NUMBER:
assigned_order = next_order - (j - i)
elif next_order == VERY_LARGE_NUMBER:
assigned_order = prev_order + (i - k)
else:
assigned_order = prev_order + (next_order -
prev_order) * (i - k + 0.0) / (
j - k + 0.0)
# and give the item the new order (possibly modifying the existing order)
item.ordered.assign_order(assigned_order)
i += 1
self.change_state('checked')
def assign_order(self, assigned_order):
if self.state == 'order_found':
self.previous = self.guid
self.guid = None
elif self.state == 'order_missing':
pass
else:
raise MQLInternalError(
self.query,
"Can't assign an order to a primitive in state %(state)s",
state=self.state)
self.change_state('create')
self.value = assigned_order
def handle_multiple_results(self, reader, results):
guids = [('#' + x[0]) for x in results]
LOG.error(
'multiple.unique.results',
'got multiple results for %(state)s unique check',
guids=guids,
state=self.orig.state)
if self.orig.state in ('remove', 'notpresent'):
# we're trying to remove a duplicate.
self.change_state('duplicate')
else:
# this is an outright failure.
raise MQLResultError(
self.query,
'Unique check may have at most one result. Got %(count)d',
guids=guids,
count=len(results))
def mark_unique_missing(self, reader):
if self.orig.state in ('create', 'notpresent'):
# nothing to do
pass
else:
raise MQLInternalError(
self.query,
'Nothing in unique checks but not creating anything',
state=self.orig.state)
self.change_state('checked')
def check_unique_namespace(self, reader, result):
# default is non-unique
is_unique = False
if result is not None:
# yes, this is what it takes to get True out of a graph result. I need a better architecture...
is_unique = self.primitive_result(
dict(zip(self.result, result))['datatype'],
self.transform_result('value',
dict(zip(self.result, result))['value']))
if not isinstance(is_unique, bool):
raise MQLInternalError(
self.query,
'Expected a boolean result from the unique namespace check, not %(value)s',
value=is_unique,
result=result)
# ideally, we would dispatch the second level checks now; we even have the reader avaialable.
# But they have to wait until we have processed the first stage so we know the RHS
# finally, the second stage namespace prepares -- these kick off their own queries...
if is_unique:
for namespace_check in self.left.unique_namespace_checks:
if namespace_check.state == 'replace_check':
namespace_check.change_state('update_check')
if namespace_check.state in ('unique_check', 'update_check'):
namespace_check.run_namespace_prepare(reader)
else:
raise MQLInternalError(
self.query,
'Not expecting a %(state)s here',
state=namespace_check.state)
self.change_state('namespace_unique')
else:
# make sure we aren't trying to use update on a non-unique namespace
for namespace_check in self.left.unique_namespace_checks:
if namespace_check.state == 'update_check':
raise MQLResultError(
self.left.query,
"Can't use 'connect': 'update' on a namespace that is not unique")
elif namespace_check.state in ('replace_check', 'unique_check'):
namespace_check.change_state('checked')
else:
raise MQLInternalError(
self.query,
'Not expecting a %(state)s here',
state=namespace_check.state)
self.change_state('namespace_regular')
def attach_unique_result(self, reader, result):
n = len(self.result)
for i in xrange(n):
# first make sure that we match what we expected to query here
res = self.transform_result(self.result[i], result[i])
if self.result[i] == 'value':
# we have a big problem with the datatype here.
# we didn't know what the datatype was when we queried.
# so self.datatype is None.
# We can't use the datatype from orig in case we are (quite deliberately and properly)
# attempting to change the datatype. So we need to inspect the returned datatype in
# advance...
datatype = dict(zip(self.result, result))['datatype']
res = self.primitive_result(datatype, res)
if getattr(self, self.result[i], None) is not None:
# XXX we explicitly do not do the case insensitive check here as this codepath is called
# on keys (things that have unique values) rather than on texts (things that have unique rights)
if getattr(self, self.result[i], None) != res:
if self.result[i] == 'value' and datatype == 'string' and res.lower(
) == self.value.lower():
# discussion with Tristan -- we should give a nice error in case insensitive matches:
LOG.warning('case.insensitive.unique.error',
[self.value, res] + result[0:n - 1])
# still an error in this case
raise MQLResultError(
self.query,
'Value exists that differs only in case',
key=self.result[i],
newvalue=getattr(self, self.result[i], None),
value=res)
else:
raise MQLInternalError(
self.query,
"Values didn't match in prepare",
key=self.result[i],
newvalue=getattr(self, self.result[i], None),
value=res)
elif getattr(self.orig, self.result[i], None) is not None:
# XXX there is weirdness around self.orig.right == Missing here.
# this is why we don't ask for result=right in that case.
# we only ask for result=left if we have left uniqueness
if self.result[i] == 'right':
newvalue = self.orig.right.guid
elif self.result[i] == 'left':
newvalue = self.orig.left.guid
else:
newvalue = getattr(self.orig, self.result[i])
if newvalue != res:
# we allow for case-insensitive matches to be changed.
if self.state == 'update_check':
# we must have already set self.guid by this point as we do it first.
if not self.orig.update or self.orig.previous is not None:
raise MQLInternalError(self.query,
'Trying to update a non-updateable node')
self.orig.previous = self.guid
elif self.result[i] == 'value' and datatype == 'string' and res.lower(
) == newvalue.lower():
# we are equal on a case insensitive basis...
LOG.warning('case.insensitive.unique',
[self.value, res] + result[0:n - 1])
# we believe we're OK with this...
pass
elif self.state == 'unique_check':
# if this is a delete, finding a different value is not an error. You might
# ask why we bother to do the unique check in the first place; mostly because
# it is hard to detect a delete at the time the check is inserted.
# we depend on the deletion already having been processed
if self.orig.state == 'notpresent':
pass
else:
| |
<filename>src/Pyro4/utils/httpgateway.py
"""
HTTP gateway: connects the web browser's world of javascript+http and Pyro.
Creates a stateless HTTP server that essentially is a proxy for the Pyro objects behind it.
It exposes the Pyro objects through a HTTP interface and uses the JSON serializer,
so that you can immediately process the response data in the browser.
You can start this module as a script from the command line, to easily get a
http gateway server running:
:command:`python -m Pyro4.utils.httpgateway`
or simply: :command:`pyro4-httpgateway`
It is also possible to import the 'pyro_app' function and stick that into a WSGI
server of your choice, to have more control.
The javascript code in the web page of the gateway server works with the same-origin
browser policy because it is served by the gateway itself. If you want to access it
from scripts in different sites, you have to work around this or embed the gateway app
in your site. Non-browser clients that access the http api have no problems.
See the `http` example for two of such clients (node.js and python).
Pyro - Python Remote Objects. Copyright by <NAME> (<EMAIL>).
"""
from __future__ import print_function
import sys
import re
import cgi
import os
import uuid
import warnings
from wsgiref.simple_server import make_server
import traceback
from Pyro4.util import json # don't import stdlib json directly, we want to use the JSON_MODULE config item
from Pyro4.configuration import config
from Pyro4 import constants, errors, core, message, util, naming
__all__ = ["pyro_app", "main"]
_nameserver = None
def get_nameserver(hmac=None):
global _nameserver
if not _nameserver:
_nameserver = naming.locateNS(hmac_key=hmac)
try:
_nameserver.ping()
return _nameserver
except errors.ConnectionClosedError:
_nameserver = None
print("Connection with nameserver lost, reconnecting...")
return get_nameserver(hmac)
def invalid_request(start_response):
"""Called if invalid http method."""
start_response('405 Method Not Allowed', [('Content-Type', 'text/plain')])
return [b'Error 405: Method Not Allowed']
def not_found(start_response):
"""Called if Url not found."""
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [b'Error 404: Not Found']
def redirect(start_response, target):
"""Called to do a redirect"""
start_response('302 Found', [('Location', target)])
return []
index_page_template = """<!DOCTYPE html>
<html>
<head>
<title>Pyro HTTP gateway</title>
<style type="text/css">
body {{ margin: 1em; }}
table, th, td {{border: 1px solid #bbf; padding: 4px;}}
table {{border-collapse: collapse;}}
pre {{border: 1px solid #bbf; padding: 1ex; margin: 1ex; white-space: pre-wrap;}}
#title-logo {{ float: left; margin: 0 1em 0 0; }}
</style>
</head>
<body>
<script src="//code.jquery.com/jquery-2.1.3.min.js"></script>
<script>
"use strict";
function pyro_call(name, method, params) {{
$.ajax({{
url: name+"/"+method,
type: "GET",
data: params,
dataType: "json",
// headers: {{ "X-Pyro-Correlation-Id": "11112222-1111-2222-3333-222244449999" }},
// headers: {{ "X-Pyro-Gateway-Key": "secret-key" }},
// headers: {{ "X-Pyro-Options": "oneway" }},
beforeSend: function(xhr, settings) {{
$("#pyro_call").text(settings.type+" "+settings.url);
}},
error: function(xhr, status, error) {{
var errormessage = "ERROR: "+xhr.status+" "+error+" \\n"+xhr.responseText;
$("#pyro_response").text(errormessage);
}},
success: function(data) {{
$("#pyro_response").text(JSON.stringify(data, null, 4));
}}
}});
}}
</script>
<div id="title-logo"><img src="http://pyro4.readthedocs.io/en/stable/_static/pyro.png"></div>
<div id="title-text">
<h1>Pyro HTTP gateway</h1>
<p>
Use http+json to talk to Pyro objects.
<a href="http://pyro4.readthedocs.io/en/stable/tipstricks.html#pyro-via-http-and-json">Docs.</a>
</p>
</div>
<p><em>Note: performance isn't a key concern here; it is a stateless server.
It does a name lookup and uses a new Pyro proxy for each request.</em></p>
<h2>Currently exposed contents of name server on {hostname}:</h2>
<p>(Limited to 10 entries, exposed name pattern = '{ns_regex}')</p>
{name_server_contents_list}
<p>Name server examples: (these examples are working if you expose the Pyro.NameServer object)</p>
<ul>
<li><a href="Pyro.NameServer/$meta" onclick="pyro_call('Pyro.NameServer','$meta'); return false;">Pyro.NameServer/$meta</a>
-- gives meta info of the name server (methods)</li>
<li><a href="Pyro.NameServer/list" onclick="pyro_call('Pyro.NameServer','list'); return false;">Pyro.NameServer/list</a>
-- lists the contents of the name server</li>
<li><a href="Pyro.NameServer/list?prefix=test."
onclick="pyro_call('Pyro.NameServer','list', {{'prefix':'test.'}}); return false;">
Pyro.NameServer/list?prefix=test.</a> -- lists the contents of the name server starting with 'test.'</li>
<li><a href="Pyro.NameServer/lookup?name=Pyro.NameServer"
onclick="pyro_call('Pyro.NameServer','lookup', {{'name':'Pyro.NameServer'}}); return false;">
Pyro.NameServer/lookup?name=Pyro.NameServer</a> -- perform lookup method of the name server</li>
<li><a href="Pyro.NameServer/lookup?name=test.echoserver"
onclick="pyro_call('Pyro.NameServer','lookup', {{'name':'test.echoserver'}}); return false;">
Pyro.NameServer/lookup?name=test.echoserver</a> -- perform lookup method of the echo server</li>
</ul>
<p>Echoserver examples: (these examples are working if you expose the test.echoserver object)</p>
<ul>
<li><a href="test.echoserver/error" onclick="pyro_call('test.echoserver','error'); return false;">test.echoserver/error</a>
-- perform error call on echoserver</li>
<li><a href="test.echoserver/echo?message=Hi there, browser script!"
onclick="pyro_call('test.echoserver','echo', {{'message':'Hi there, browser script!'}}); return false;">
test.echoserver/echo?message=Hi there, browser script!</a> -- perform echo call on echoserver</li>
</ul>
<h2>Pyro response data (via Ajax):</h2>
Call: <pre id="pyro_call"> </pre>
Response: <pre id="pyro_response"> </pre>
<p>Pyro version: {pyro_version} — © <NAME></p>
</body>
</html>
"""
def return_homepage(environ, start_response):
try:
nameserver = get_nameserver(hmac=pyro_app.hmac_key)
except errors.NamingError as x:
print("Name server error:", x)
start_response('500 Internal Server Error', [('Content-Type', 'text/plain')])
return [b"Cannot connect to the Pyro name server. Is it running? Refresh page to retry."]
start_response('200 OK', [('Content-Type', 'text/html')])
nslist = ["<table><tr><th>Name</th><th>methods</th><th>attributes (zero-param methods)</th></tr>"]
names = sorted(list(nameserver.list(regex=pyro_app.ns_regex).keys())[:10])
with core.batch(nameserver) as nsbatch:
for name in names:
nsbatch.lookup(name)
for name, uri in zip(names, nsbatch()):
attributes = "-"
try:
with core.Proxy(uri) as proxy:
proxy._pyroHmacKey = pyro_app.hmac_key
proxy._pyroBind()
methods = " ".join(proxy._pyroMethods) or "-"
attributes = [
"<a href=\"{name}/{attribute}\" onclick=\"pyro_call('{name}','{attribute}'); return false;\">{attribute}</a>"
.format(name=name, attribute=attribute)
for attribute in proxy._pyroAttrs
]
attributes = " ".join(attributes) or "-"
except errors.PyroError as x:
stderr = environ["wsgi.errors"]
print("ERROR getting metadata for {0}:".format(uri), file=stderr)
traceback.print_exc(file=stderr)
methods = "??error:%s??" % str(x)
nslist.append(
"<tr><td><a href=\"{name}/$meta\" onclick=\"pyro_call('{name}','$meta'); "
"return false;\">{name}</a></td><td>{methods}</td><td>{attributes}</td></tr>"
.format(name=name, methods=methods, attributes=attributes))
nslist.append("</table>")
index_page = index_page_template.format(ns_regex=pyro_app.ns_regex,
name_server_contents_list="".join(nslist),
pyro_version=constants.VERSION,
hostname=nameserver._pyroUri.location)
return [index_page.encode("utf-8")]
def process_pyro_request(environ, path, parameters, start_response):
pyro_options = environ.get("HTTP_X_PYRO_OPTIONS", "").split(",")
if not path:
return return_homepage(environ, start_response)
matches = re.match(r"(.+)/(.+)", path)
if not matches:
return not_found(start_response)
object_name, method = matches.groups()
if pyro_app.gateway_key:
gateway_key = environ.get("HTTP_X_PYRO_GATEWAY_KEY", "") or parameters.get("$key", "")
gateway_key = gateway_key.encode("utf-8")
if gateway_key != pyro_app.gateway_key:
start_response('403 Forbidden', [('Content-Type', 'text/plain')])
return [b"403 Forbidden - incorrect gateway api key"]
if "$key" in parameters:
del parameters["$key"]
if pyro_app.ns_regex and not re.match(pyro_app.ns_regex, object_name):
start_response('403 Forbidden', [('Content-Type', 'text/plain')])
return [b"403 Forbidden - access to the requested object has been denied"]
try:
nameserver = get_nameserver(hmac=pyro_app.hmac_key)
uri = nameserver.lookup(object_name)
with core.Proxy(uri) as proxy:
header_corr_id = environ.get("HTTP_X_PYRO_CORRELATION_ID", "")
if header_corr_id:
core.current_context.correlation_id = uuid.UUID(header_corr_id) # use the correlation id from the request header
else:
core.current_context.correlation_id = uuid.uuid4() # set new correlation id
proxy._pyroHmacKey = pyro_app.hmac_key
proxy._pyroGetMetadata()
if "oneway" in pyro_options:
proxy._pyroOneway.add(method)
if method == "$meta":
result = {"methods": tuple(proxy._pyroMethods), "attributes": tuple(proxy._pyroAttrs)}
reply = json.dumps(result).encode("utf-8")
start_response('200 OK', [('Content-Type', 'application/json; charset=utf-8'),
('X-Pyro-Correlation-Id', str(core.current_context.correlation_id))])
return [reply]
else:
proxy._pyroRawWireResponse = True # we want to access the raw response json
if method in proxy._pyroAttrs:
# retrieve the attribute
assert not parameters, "attribute lookup can't have query parameters"
msg = getattr(proxy, method)
else:
# call the remote method
msg = getattr(proxy, method)(**parameters)
if msg is None or "oneway" in pyro_options:
# was a oneway call, no response available
start_response('200 OK', [('Content-Type', 'application/json; charset=utf-8'),
('X-Pyro-Correlation-Id', str(core.current_context.correlation_id))])
return []
elif msg.flags & message.FLAGS_EXCEPTION:
# got an exception response so send a 500 status
start_response('500 Internal Server Error', [('Content-Type', 'application/json; charset=utf-8')])
return [msg.data]
else:
# normal response
start_response('200 OK', [('Content-Type', 'application/json; charset=utf-8'),
('X-Pyro-Correlation-Id', str(core.current_context.correlation_id))])
return [msg.data]
except Exception as x:
stderr = environ["wsgi.errors"]
print("ERROR handling {0} with params {1}:".format(path, parameters), file=stderr)
traceback.print_exc(file=stderr)
start_response('500 Internal Server Error', [('Content-Type', 'application/json; charset=utf-8')])
reply = json.dumps(util.SerializerBase.class_to_dict(x)).encode("utf-8")
return [reply]
def pyro_app(environ, start_response):
"""
The WSGI app function that is used to process the requests.
You can stick this into a wsgi server of your choice, or use the main() method
to use the default wsgiref server.
"""
config.SERIALIZER = "json" # we only talk json through the http proxy
config.COMMTIMEOUT = pyro_app.comm_timeout
method = environ.get("REQUEST_METHOD")
path = environ.get('PATH_INFO', '').lstrip('/')
if not path:
return redirect(start_response, "/pyro/")
if path.startswith("pyro/"):
if method in ("GET", "POST"):
parameters = singlyfy_parameters(cgi.parse(environ['wsgi.input'], environ))
return process_pyro_request(environ, path[5:], parameters, start_response)
else:
return invalid_request(start_response)
return not_found(start_response)
def singlyfy_parameters(parameters):
"""
Makes a cgi-parsed parameter dictionary into a dict where the values that
are just a list of a single value, are converted to just that single value.
"""
for key, value in parameters.items():
if isinstance(value, (list, tuple)) and len(value) == 1:
parameters[key] = value[0]
return parameters
pyro_app.ns_regex = r"http\."
pyro_app.hmac_key = None
pyro_app.gateway_key = None
pyro_app.comm_timeout = config.COMMTIMEOUT
def main(args=None):
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-H", "--host", default="localhost", help="hostname to bind server on (default=%default)")
parser.add_option("-p", "--port", type="int", default=8080, help="port to bind server on (default=%default)")
parser.add_option("-e", "--expose", default=pyro_app.ns_regex, help="a regex of object names to expose (default=%default)")
parser.add_option("-k", "--pyrokey", help="the HMAC key to use to connect with Pyro (deprecated)")
parser.add_option("-g", "--gatewaykey", help="the api key to use to connect to the gateway itself")
parser.add_option("-t", "--timeout", type="float", default=pyro_app.comm_timeout,
help="Pyro timeout value to use (COMMTIMEOUT setting, default=%default)")
options, args = parser.parse_args(args)
if options.pyrokey or options.gatewaykey:
warnings.warn("using -k and/or -g to supply keys on the command line is a security problem "
"and is deprecated since Pyro 4.72. See the documentation for an alternative.")
if "PYRO_HMAC_KEY" in | |
<filename>model_natasha/unet_model.py
# full assembly of the sub-parts to form the complete net
import pretrainedmodels
import torchvision
from .unet_parts import *
class CSE(nn.Module):
def __init__(self, in_ch, r):
super(CSE, self).__init__()
self.linear_1 = nn.Linear(in_ch, in_ch // r)
self.linear_2 = nn.Linear(in_ch // r, in_ch)
def forward(self, x):
input_x = x
x = x.view(*(x.shape[:-2]), -1).mean(-1)
x = F.relu(self.linear_1(x), inplace=True)
x = self.linear_2(x)
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.sigmoid(x)
x = input_x * x
return x
class SSE(nn.Module):
def __init__(self, in_ch):
super(SSE, self).__init__()
self.conv = nn.Conv2d(in_ch, 1, kernel_size=1, stride=1)
def forward(self, x):
input_x = x
x = self.conv(x)
x = F.sigmoid(x)
x = input_x * x
return x
class SCSE(nn.Module):
def __init__(self, in_ch, r):
super(SCSE, self).__init__()
self.cSE = CSE(in_ch, r)
self.sSE = SSE(in_ch)
def forward(self, x):
cSE = self.cSE(x)
sSE = self.sSE(x)
x = cSE + sSE
return x
class SqEx(nn.Module):
def __init__(self, n_features, reduction=16):
super(SqEx, self).__init__()
if n_features % reduction != 0:
raise ValueError('n_features must be divisible by reduction (default = 16)')
self.linear1 = nn.Linear(n_features, n_features // reduction, bias=True)
self.nonlin1 = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(n_features // reduction, n_features, bias=True)
self.nonlin2 = nn.Sigmoid()
def forward(self, x):
y = F.avg_pool2d(x, kernel_size=x.size()[2:4])
y = y.permute(0, 2, 3, 1)
y = self.nonlin1(self.linear1(y))
y = self.nonlin2(self.linear2(y))
y = y.permute(0, 3, 1, 2)
y = x * y
return y
class ConvBnRelu(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, with_depth=False):
super(UNet, self).__init__()
self.inc = Inconv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
if with_depth:
self.up1 = Up(1024 + 1, 256)
else:
self.up1 = Up(1024, 256)
self.up2 = Up(512, 128)
self.up3 = Up(256, 64)
self.up4 = Up(128, 64)
self.outc = Outconv(64, n_classes)
def forward(self, x, depth=None):
# we need an input image which sizes are divisible by 32
# [8, 4, 101, 101] -> [8, 64, 101, 101]
x1 = self.inc(x)
# [8, 64, 101, 101]-> [8, 128, 50, 50]
x2 = self.down1(x1)
# [8, 128, 50, 50] -> [8, 256, 25, 25]
x3 = self.down2(x2)
# [8, 256, 25, 25] -> [8, 512, 12, 12]
x4 = self.down3(x3)
# [8, 512, 12, 12] -> [8, 512, 6, 6]
x5 = self.down4(x4)
if depth is not None:
depth_layer = depth.unsqueeze(dim=1)[:, :, :x5.shape[2], :x5.shape[3]]
x5 = torch.cat((x5, depth_layer), dim=1)
x = self.up1(x5, x4) # [8, 256, 12, 12]
x = self.up2(x, x3) # [8, 128, 24, 24]
x = self.up3(x, x2) # [8, 64, 48, 48]
x = self.up4(x, x1) # [8, 64, 96, 96]
x = self.outc(x) # [8, 2, 96, 96]
return x
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding=1)
class ConvRelu(nn.Module):
def __init__(self, in_, out):
super().__init__()
self.conv = conv3x3(in_, out)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
class DecoderBlockV2(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):
super(DecoderBlockV2, self).__init__()
self.in_channels = in_channels
if is_deconv:
"""
Paramaters for Deconvolution were chosen to avoid artifacts, following
link https://distill.pub/2016/deconv-checkerboard/
"""
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,
padding=1),
nn.ReLU(inplace=True)
)
else:
self.block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
ConvRelu(in_channels, middle_channels),
ConvRelu(middle_channels, out_channels),
)
def forward(self, x):
return self.block(x)
class UNetVGG16(nn.Module):
"""PyTorch U-Net model using VGG16 encoder.
UNet: https://arxiv.org/abs/1505.04597
VGG: https://arxiv.org/abs/1409.1556
Proposed by <NAME> and <NAME>: https://github.com/ternaus/TernausNet
Args:
num_classes (int): Number of output classes.
num_filters (int, optional): Number of filters in the last layer of decoder. Defaults to 32.
dropout_2d (float, optional): Probability factor of dropout layer before output layer. Defaults to 0.2.
pretrained (bool, optional):
False - no pre-trained weights are being used.
True - VGG encoder is pre-trained on ImageNet.
Defaults to False.
is_deconv (bool, optional):
False: bilinear interpolation is used in decoder.
True: deconvolution is used in decoder.
Defaults to False.
"""
def __init__(self, num_classes=1, num_filters=32, dropout_2d=0.2,
pretrained=False, is_deconv=False, with_depth=False):
super().__init__()
self.num_classes = num_classes
self.dropout_2d = dropout_2d
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.vgg16(pretrained=pretrained).features
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(self.encoder[0],
nn.BatchNorm2d(num_features=self.encoder[0].out_channels),
self.relu,
self.encoder[2],
nn.BatchNorm2d(num_features=self.encoder[2].out_channels),
self.relu)
self.conv2 = nn.Sequential(self.encoder[5],
nn.BatchNorm2d(num_features=self.encoder[5].out_channels),
self.relu,
self.encoder[7],
nn.BatchNorm2d(num_features=self.encoder[7].out_channels),
self.relu)
self.conv3 = nn.Sequential(self.encoder[10],
nn.BatchNorm2d(num_features=self.encoder[10].out_channels),
self.relu,
self.encoder[12],
nn.BatchNorm2d(num_features=self.encoder[12].out_channels),
self.relu,
self.encoder[14],
nn.BatchNorm2d(num_features=self.encoder[14].out_channels),
self.relu)
self.conv4 = nn.Sequential(self.encoder[17],
nn.BatchNorm2d(num_features=self.encoder[17].out_channels),
self.relu,
self.encoder[19],
nn.BatchNorm2d(num_features=self.encoder[19].out_channels),
self.relu,
self.encoder[21],
nn.BatchNorm2d(num_features=self.encoder[21].out_channels),
self.relu)
self.conv5 = nn.Sequential(self.encoder[24],
nn.BatchNorm2d(num_features=self.encoder[24].out_channels),
self.relu,
self.encoder[26],
nn.BatchNorm2d(num_features=self.encoder[26].out_channels),
self.relu,
self.encoder[28],
nn.BatchNorm2d(num_features=self.encoder[28].out_channels),
self.relu)
if with_depth:
self.center = DecoderBlockV2(512 + 1, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec5 = DecoderBlockV2(512 + num_filters * 8 + 1, num_filters * 8 * 2, num_filters * 8, is_deconv)
else:
self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec4 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec3 = DecoderBlockV2(256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)
self.dec2 = DecoderBlockV2(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv)
self.dec1 = ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x, depth=None):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
if depth is not None:
depth_layer = depth.unsqueeze(dim=1)[:, :, :conv5.shape[2], :conv5.shape[3]]
conv5 = torch.cat((conv5, depth_layer), dim=1)
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
return self.final(F.dropout2d(dec1, p=self.dropout_2d))
class DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.block = nn.Sequential(
ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.block(x)
class UNet11(nn.Module):
def __init__(self, num_classes=1, num_filters=32, pretrained=False, with_depth=False):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network is used
True - encoder is pre-trained with VGG11
"""
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.vgg11(pretrained=pretrained).features
self.relu = self.encoder[1]
self.conv1 = self.encoder[0]
self.conv2 = self.encoder[3]
self.conv3s = self.encoder[6]
self.conv3 = self.encoder[8]
self.conv4s = self.encoder[11]
self.conv4 = self.encoder[13]
self.conv5s = self.encoder[16]
self.conv5 = self.encoder[18]
if with_depth:
self.center = DecoderBlock(num_filters * 8 * 2 + 1, num_filters * 8 * 2, num_filters * 8)
self.dec5 = DecoderBlock(num_filters * (16 + 8) + 1, num_filters * 8 * 2, num_filters * 8)
else:
self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8)
self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8)
self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4)
self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2)
self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters)
self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def forward(self, x, depth=None):
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(self.pool(conv1)))
conv3s = self.relu(self.conv3s(self.pool(conv2)))
conv3 = self.relu(self.conv3(conv3s))
conv4s = self.relu(self.conv4s(self.pool(conv3)))
conv4 = self.relu(self.conv4(conv4s))
conv5s = self.relu(self.conv5s(self.pool(conv4)))
conv5 = self.relu(self.conv5(conv5s))
if depth is not None:
depth_layer = depth.unsqueeze(dim=1)[:, :, :conv5.shape[2], :conv5.shape[3]]
conv5 = torch.cat((conv5, depth_layer), dim=1)
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
return self.final(dec1)
class UNetResNet(nn.Module):
"""PyTorch U-Net model using ResNet(34, 101 or 152) encoder.
UNet: https://arxiv.org/abs/1505.04597
ResNet: https://arxiv.org/abs/1512.03385
Proposed by <NAME>: https://www.linkedin.com/in/al-buslaev/
Args:
encoder_depth (int): Depth of a ResNet encoder (34, 101 or 152).
num_classes (int): Number of output classes.
num_filters (int, optional): Number of filters in the last layer of decoder. Defaults to 32.
dropout_2d (float, optional): Probability factor of dropout layer before output layer. Defaults to 0.2.
pretrained (bool, optional):
False - no pre-trained weights are being used.
True - ResNet encoder is pre-trained on ImageNet.
Defaults to False.
is_deconv (bool, optional):
False: bilinear interpolation is used in decoder.
True: deconvolution is used in decoder.
Defaults to False.
"""
def __init__(self, encoder_depth, num_classes, num_filters=32, dropout_2d=0.2,
pretrained=False, is_deconv=False, with_depth=False):
super().__init__()
self.num_classes = num_classes
self.dropout_2d = dropout_2d
# self.sqex1 = SCSE(in_ch=64, r=16)
# self.sqex2 = SCSE(in_ch=256, r=16)
| |
<reponame>Haiiliin/PyAbaqus
from abaqusConstants import *
from ..BasicGeometry.Face import Face
from ..Feature.Feature import Feature as FeatureBase
from ..Mesh.MeshFace import MeshFace
class Feature(FeatureBase):
"""The following commands operate on Feature objects. For more information about the
Feature object, see Feature object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import assembly
"""
@staticmethod
def AttachmentLines(name: str, points: int, sourceFaces: tuple[Face], sourceElementFaces: tuple[MeshFace],
targetFaces: tuple[Face], targetElementFaces: tuple[MeshFace],
projectionMethod: SymbolicConstant = PROJECT_BY_PROXIMITY,
projectionDirStartPt: float = None, projectionDirEndPt: float = None,
sourceToTargetProjMethod: SymbolicConstant = PROJECT_BY_NUMBER,
numProjections: str = '', projectionDistance: str = '',
flipSourceToTargetDirection: Boolean = OFF, setName: str = '') -> 'Feature':
"""This method creates a Feature object by creating attachment lines between the given set
of source and target faces. The given points are first projected onto the source faces
using the specified projection method. The points are then projected normal to the
source faces onto the target faces. The user can specify the number of projections or
the length of projection vector for projection onto the target faces. The lines are then
created between the source face and the closest target face. Subsequent lines are
created between the target faces.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
name
A String specifying a unique Feature name.
points
A tuple of points. Each point can be a ConstrainedSketchVertex, Datum point, Reference point, an
Attachment point, orphan mesh Node, or an Interesting point object.
sourceFaces
A sequence of Face objects specifying the geometry faces onto which the points are to be
projected.
sourceElementFaces
A sequence of MeshFace objects specifying the orphan mesh element faces onto which the
points are to be projected.
targetFaces
A sequence of Face objects specifying the geometry faces on which the attachment lines
will terminate.
targetElementFaces
A sequence of MeshFace objects specifying the orphan mesh element faces on which the
attachment lines will terminate.
projectionMethod
A SymbolicConstant specifying the method to be used to project onto source faces.
Possible values are PROJECT_BY_PROXIMITY and PROJECT_BY_DIRECTION. The default value is
PROJECT_BY_PROXIMITY.
projectionDirStartPt
A point specifying the start point of the projection direction to project onto source
faces. The point can be a ConstrainedSketchVertex, Datum point, Reference point, Attachment point, orphan
mesh Node, Interesting Point object, or a tuple of Floats representing the coordinates
of a point.
projectionDirEndPt
A point specifying the end point of the projection direction to project onto source
faces. The point can be a ConstrainedSketchVertex, Datum point, Reference point, Attachment point, orphan
mesh Node, Interesting point object, or a tuple of Floats representing the coordinates
of a point.
sourceToTargetProjMethod
A SymbolicConstant specifying the method to be used to project onto target faces.
Possible values are PROJECT_BY_NUMBER and PROJECT_BY_DISTANCE. The default value is
PROJECT_BY_NUMBER.
numProjections
An integer specifying the maximum number of layers each point should be projected onto
when the source to target projection method is PROJECT_BY_NUMBER.
projectionDistance
A float specifying the maximum distance of the projection vector when the source to
target projection method is PROJECT_BY_DISTANCE.
flipSourceToTargetDirection
A Boolean specifying whether the computed projection direction from the source to the
target faces should be flipped.
setName
A String specifying a unique set name.
Returns
-------
A Feature object.
"""
return Feature()
@staticmethod
def Coaxial(movableAxis: str, fixedAxis: str, flip: Boolean) -> 'Feature':
"""This method moves an instance so that its selected face is coaxial with the selected
face of a fixed instance.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
movableAxis
A cylindrical or conical Face on the part instance to be moved.
fixedAxis
A cylindrical or conical Face on the part instance that remains fixed.
flip
A Boolean specifying whether the axes are forward aligned (OFF) or reverse aligned (ON).
Returns
-------
A Feature object.
Raises
------
AbaqusException.
"""
return Feature()
@staticmethod
def CoincidentPoint(movablePoint: str, fixedPoint: str) -> 'Feature':
"""This method moves an instance so that a specified point is coincident with a specified
point of a fixed instance.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
movablePoint
A ConstrainedSketchVertex, a Datum point, or a ReferencePoint or a mesh node from an orphan mesh on the
part instance to be moved.
fixedPoint
A ConstrainedSketchVertex, a Datum point, or a ReferencePoint or a mesh node from an orphan mesh on the
part instance to remain fixed.
Returns
-------
feature: Feature
A Feature object
"""
return Feature()
@staticmethod
def EdgeToEdge(movableAxis: str, fixedAxis: str, flip: Boolean, clearance: float) -> 'Feature':
"""This method moves an instance so that its edge is parallel to an edge of a fixed
instance.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
movableAxis
A straight Edge, a Datum axis, or an element edge from an orphan mesh on the part
instance to be moved.
fixedAxis
A straight Edge, a Datum axis, or an element edge from an orphan mesh on the part
instance to remain fixed.
flip
A Boolean specifying whether the edges are forward aligned (OFF) or reverse aligned
(ON).
clearance
A Float specifying the distance between the two edges (for two-dimensional and
axisymmetric instances only).
Returns
-------
A Feature Object.
Raises
------
AbaqusException.
"""
return Feature()
@staticmethod
def FaceToFace(movablePlane: str, fixedPlane: str, flip: Boolean, clearance: float) -> 'Feature':
"""This method moves an instance so that its face is coincident with a face of a fixed
instance.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
movablePlane
A planar face, a Datum plane, or a face from an orphan mesh on the part instance to be
moved.
fixedPlane
A planar face, a Datum plane, or a face from an orphan mesh on the part instance to
remain fixed.
flip
A Boolean specifying whether the normals to the faces are forward aligned (OFF) or
reverse aligned (ON).
clearance
A Float specifying the distance between the two faces.
Returns
-------
A Feature Object.
Raises
------
AbaqusException.
"""
return Feature()
@staticmethod
def ParallelCsys(movableCsys: str, fixedCsys: str) -> 'Feature':
"""This method moves an instance so that its Datum coordinate system is parallel to a Datum
coordinate system of a fixed instance.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
movableCsys
A Datum coordinate system on the part instance to be moved.
fixedCsys
A Datum coordinate system on the part instance to remain fixed.
Returns
-------
A Feature object.
Raises
------
AbaqusException.
"""
return Feature()
@staticmethod
def ParallelEdge(movableAxis: str, fixedAxis: str, flip: Boolean) -> 'Feature':
"""This method moves an instance so that its edge is parallel to an edge of a fixed
instance.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
movableAxis
A straight Edge, a Datum axis, or an element edge from an orphan mesh on the part
instance to be moved.
fixedAxis
A straight Edge, a Datum axis, or an element edge from an orphan mesh on the part
instance to remain fixed.
flip
A Boolean specifying whether the edges are forward aligned (OFF) or reverse aligned
(ON).
Returns
-------
A Feature object.
Raises
------
AbaqusException.
"""
return Feature()
@staticmethod
def ParallelFace(movablePlane: str, fixedPlane: str, flip: Boolean) -> 'Feature':
"""This method moves an instance so that its face is parallel to a face of a fixed
instance.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].rootAssembly.AttachmentLines
Parameters
----------
movablePlane
A planar face, a Datum plane, or a face from an orphan mesh on the part instance to be
moved.
fixedPlane
| |
8],
[7, 0, 5, 6, 9, 4, 1, 3, 2],
[2, 1, 9, 3, 8, 5, 4, 6, 7],
[4, 6, 2, 5, 3, 1, 8, 7, 9],
[9, 3, 1, 2, 7, 8, 6, 4, 5],
[8, 5, 7, 9, 4, 6, 2, 1, 3],
[5, 9, 8, 4, 1, 3, 7, 2, 6],
[6, 2, 4, 7, 5, 9, 3, 8, 1],
[1, 7, 3, 8, 6, 2, 5, 9, 4]]
partial_spec_str = '''
0 4 6 1 2 7 9 5 8
7 0 5 6 9 4 1 3 2
2 1 9 3 8 5 4 6 7
4 6 2 5 3 1 8 7 9
9 3 1 2 7 8 6 4 5
8 5 7 9 4 6 2 1 3
5 9 8 4 1 3 7 2 6
6 2 4 7 5 9 3 8 1
1 7 3 8 6 2 5 9 4
'''
partial_1_spec_str = '''
3 0 6 1 2 7 9 5 8
7 0 0 6 9 4 1 3 2
2 1 9 3 8 5 4 6 7
4 6 2 5 3 1 8 7 9
9 3 1 2 7 8 6 4 5
0 0 0 9 0 6 2 1 3
5 9 8 4 1 3 7 2 6
6 2 4 7 5 9 3 8 1
1 7 3 8 6 2 5 9 0
'''
full_spec_lrl = [
[3, 4, 6, 1, 2, 7, 9, 5, 8],
[7, 8, 5, 6, 9, 4, 1, 3, 2],
[2, 1, 9, 3, 8, 5, 4, 6, 7],
[4, 6, 2, 5, 3, 1, 8, 7, 9],
[9, 3, 1, 2, 7, 8, 6, 4, 5],
[8, 5, 7, 9, 4, 6, 2, 1, 3],
[5, 9, 8, 4, 1, 3, 7, 2, 6],
[6, 2, 4, 7, 5, 9, 3, 8, 1],
[1, 7, 3, 8, 6, 2, 5, 9, 4]
]
def test_empty_board(self) :
board = Board() # No cells should be solved
# Confirm none solved
for cell in board.cells :
self.assertEqual (cell.value, Cell.unsolved_cell_value,
"Cell# %d should be unsolved, it is: %d" % (cell.cell_num, cell.value))
# All values should be possible
self.assertSetEqual (cell.possible_values, Cell.all_cell_values,
"Cell# %d: num_possibles should have all possible values" %cell.cell_num)
def test_board_name(self) :
name = "I never know what to call you"
board = Board(None, name)
self.assertEqual(name, board.name)
self.assertEqual(board.description, "") # No description supplied, default ""
name = "The world is a tough place"
desc = "Isn't that cynical?"
board = Board(None, name, desc)
self.assertEqual(name, board.name)
self.assertEqual(board.description, desc)
def test_unique_board_names(self) :
# Generate a bunch of boards and make sure names are all different
names_so_far = set()
for cnt in range(100) :
board = Board()
assert board.name not in names_so_far
names_so_far.add(board.name)
def test_str_constructor(self) :
''' Make sure string and list of row-list generate same board '''
lrl_spec = self.partial_spec_lrl
str_spec = self.partial_spec_str
self.assertEqual( Board(lrl_spec), Board(str_spec),
"string and list of row-lists generate different Boards")
def test_copy_constructor(self) :
# Make a couple of copies and insure they copy properly
# and empty board
in_board = Board() # empty
out_board = Board(in_board)
self.assertEqual(in_board, out_board)
# random filled in board
in_board = Board(self.partial_spec_str)
out_board = Board(in_board)
self.assertEqual(in_board, out_board) # produces same board
self.assertEqual(out_board.name, in_board.name + "-cp.1")
self.assertEqual(in_board.description, out_board.description)
# copy # is correct
# We have already made 1 copy
for copy_cnt in range(2, 10) :
out_board = Board(in_board)
self.assertEqual (out_board.name, in_board.name + "-cp." + str(copy_cnt))
# Make sure a specified name overrides picking from copied board
speced_name = "We picked it"
out_board = Board(in_board, speced_name)
self.assertEqual( out_board.name, speced_name )
self.assertEqual( out_board.description,
in_board.description)
# Make sure a specified description overrides picking from copied board
speced_desc = "Just for unittesting!"
out_board = Board(in_board, None, speced_desc)
self.assertEqual( out_board.description, speced_desc )
def test_subset(self) :
# What we test with
some_board_spec = self.full_spec_lrl
some_board = Board(some_board_spec, "some_board" )
# An empty board is a subset of everything
empty_board=Board()
self.assertTrue ( empty_board.is_subset_of(some_board))
# Everyboard is a subset of itself
self.assertTrue (some_board.is_subset_of(some_board))
# Create a legal subset by zeroing some elements
subset_spec = copy.deepcopy(some_board_spec)
subset_spec[0][3] = 0
subset_spec[5][0] = 0
subset_spec[2][3] = 0
subset_spec[8][1] = 0
subset_spec[4][0] = 0
subset_board=Board(subset_spec, "subset_board", "For unit testing")
self.assertTrue (subset_board.is_subset_of(some_board))
# Create a non-subset
non_subset_spec = [ [0]*9, [0]*9, [0]*9,
[0]*9, [0]*9, [0]*9,
[0]*9, [0]*9,
[0]*8 + [9] # was a 4 in some_board
]
non_subset_board=Board(non_subset_spec, "non subset board")
self.assertFalse ( non_subset_board.is_subset_of(some_board) )
def test_str_to_row_lists_errors(self) :
# Number of values expected
board_size = RCB_SIZE * RCB_SIZE
board = Board() # empty board
too_few_spec = '0' * (board_size-1)
self.assertRaises(ExcBadStrToConvert, str_to_list_of_rows,too_few_spec)
# Test too many digits in str
too_many_spec = '0' * (board_size+1)
self.assertRaises(ExcBadStrToConvert, str_to_list_of_rows, too_many_spec)
def test_solve_stats(self) :
# Can't really validate the solve stats
# Just make sure it exists
# unsolvable (empty board)
board = Board()
self.assertIsNone ( board.solve_stats.solve_time_secs )
board.solve()
self.assertIsNotNone( board.solve_stats.solve_time_secs )
# solvable
board_spec = self.full_spec_lrl # fully populated
board = Board(board_spec)
self.assertIsNone ( board.solve_stats.solve_time_secs )
board.solve()
self.assertIsNotNone( board.solve_stats.solve_time_secs )
def test_cell_all_neighbors(self) :
# By rights it should be in the unittest for cell.py
# but it can't because it would mean cyclical imports
# so... we test it here
board = Board()
# Test a few at random
cell = board[3][7]
cell_neighbors_are = cell.all_neighbors()
cell_neighbors_should_be = [board.cells[idx] for idx in range(27, 36 ) if idx != cell.cell_num] # row
cell_neighbors_should_be += [board.cells[idx] for idx in range( 7, 80, 9) if idx != cell.cell_num] # col
cell_neighbors_should_be += [board.cells[idx] for idx in [42, 44, 51, 53]] # blk: that aren't in row/col
cell_neighbors_should_be = set( cell_neighbors_should_be)
self.assertEqual( cell_neighbors_are, cell_neighbors_should_be)
cell = board[2][4]
cell_neighbors_are = cell.all_neighbors()
cell_neighbors_should_be = [board.cells[idx] for idx in range(18, 27 ) if idx != cell.cell_num] # row
cell_neighbors_should_be += [board.cells[idx] for idx in range( 4, 81, 9) if idx != cell.cell_num] # col
cell_neighbors_should_be += [board.cells[idx] for idx in [3,5,12,14]] # blk: that aren't in row/col
cell_neighbors_should_be = set( cell_neighbors_should_be)
self.assertEqual( cell_neighbors_are, cell_neighbors_should_be)
def test_is_solved_and_cnt(self) :
# empty board
board = Board()
self.assertFalse ( board.is_solved() )
self.assertEqual ( board.num_unsolved(), RCB_SIZE * RCB_SIZE )
# Full populated board
input_spec = self.full_spec_lrl
board = Board(input_spec)
self.assertTrue ( board.is_solved() )
self.assertEqual ( board.num_unsolved(), 0 )
# Partially populated board
input_spec = self.partial_1_spec_str
board = Board(input_spec)
self.assertFalse ( board.is_solved() )
self.assertEqual ( board.num_unsolved(), 8 )
def test___eq__(self) :
# Make some boards
empty_board=Board()
board_a = Board(self.partial_spec_str)
board_b = Board(self.partial_1_spec_str)
all_boards = [empty_board, board_a, board_b]
# Board always equals itself
for board in all_boards :
self.assertEqual(board, board)
# None of these boards should equal each other
for ( left_board, right_board) in itertools.combinations( all_boards, 2) :
self.assertNotEqual( left_board, right_board)
# Copied boards should match
board_a_copy = Board(board_a)
self.assertEqual (board_a, board_a_copy)
# Now we mildly diddle one cell and verify not equal
# 0 1 2 3 4 5 6 7 8
# Row#2: 7 0 0 6 9 4 1 3 2
# Change a Cell value
board_diddled = Board(board_b)
board_diddled[2][1].value = 4
self.assertNotEqual( board_a, board_diddled )
# Change a Cell possible_values
board_diddled = Board(board_b)
board_diddled[2][2].possible_values = set([4,5])
self.assertNotEqual( board_a, board_diddled )
def test_common_cell_rcbs(self) :
# Former note:
# we are testing Cell.common_rcbs() which can't
# be testing in cell.py because it doesn't know
# about Boards to avoid a circular import loop
# Well... that isn't true any more. It is now
# tested in Test_cell, but it doesn't hurt to test it again
board=Board()
base_cell = board[4][5]
cell_same_row = board[4][6]
cell_same_col = board[2][5]
cell_same_blk = board[3][3]
cell_with_none = board[8][8]
cell_same_row_blk = board[4][4]
cell_same_col_blk = board[3][5]
all_test_cells = [cell_same_row,
cell_same_col,
cell_same_blk,
cell_with_none
| |
<reponame>knaveofdiamonds/be-schedule<filename>schedule.py
from argparse import ArgumentParser
from itertools import islice
import json
import math
import sys
import pulp
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
class GameDatabase:
def __init__(self, games):
self.games = games
self.default = {
'min_players': 3,
'max_players': 4,
'min_playtime': 240,
'max_playtime': 240,
'adjusted_popularity': [
0.09 * 3,
0.09,
]
}
self._preprocess_game_popularities()
@classmethod
def from_file(cls, path):
with open(path) as f:
return cls({g['name']: g for g in json.load(f)})
def min_players(self, game):
return self._game(game)['min_players']
def max_players(self, game, session=None):
g = self._game(game)
if (
session is None or
g['min_players'] == g['max_players'] or
g['min_playtime'] == g['max_playtime']
):
return g['max_players']
# playtime = a + b * number_of_players_beyond_minimum
#
# so:
#
# (playtime - a) / b = number_of_players_beyond_minimum
a = g['min_playtime']
b = (
(g['max_playtime'] - g['min_playtime']) /
(g['max_players'] - g['min_players'])
)
return min(
g['max_players'],
g['min_players'] + math.floor((session['length'] - a) / b),
)
def min_playtime(self, game):
return self._game(game)['min_playtime']
def max_playtime(self, game):
g = self._game(game)
return max(g['max_playtime'], g['min_playtime'])
def adjusted_popularity(self, game, n):
return self._game(game)['adjusted_popularity'][n]
def _game(self, game):
if game in self.games:
return self.games[game]
else:
return self.default
def _preprocess_game_popularities(self):
for g in self.games:
game = self.games[g]
popularity = []
for i in range(game['min_players'], game['max_players'] + 1):
if 'popularity' in game and str(i) in game['popularity']:
value = game['popularity'][str(i)]
else:
value = 0.9
# Smooth popularity such that games with lots of popularity
# ratings don't effect the objective function. Also multiply
# them by the player count, and multiply them by 0.1 so they do
# not dominate interests.
popularity.append((i, min(value, 0.9) * 0.1 * i))
popularity = [x for _, x in sorted(popularity, key=lambda x: x[0])]
try:
result = [popularity[0]]
except IndexError as e:
print(game)
raise e
for a, b in window(popularity, 2):
result.append(b - a)
game['adjusted_popularity'] = result
class Schedule:
def __init__(self, games_db, players, sessions, shared_games=[], table_limit=10):
self.games_db = games_db
self.players = players
self.sessions = sessions
self.shared_games = shared_games
self.table_limit = table_limit
self.all_games = shared_games.copy()
self.owned_by = [None] * len(shared_games)
for i, player in enumerate(self.players):
self.all_games.extend(player['owns'])
self.owned_by.extend([i] * len(player['owns']))
self.session_ids = list(range(len(self.sessions)))
self.session_players = self._make_session_players()
self.session_games = self._make_session_games()
self.p = pulp.LpProblem('Schedule', pulp.LpMaximize)
# Problem Variables.
self.choices = self._make_choice_variables()
self.games_played = self._make_games_played_variables()
# Objective Function.
self._add_objective_function()
# Constraints.
self._add_logical_play_constraints()
self._add_player_count_constraints()
self._add_uniqueness_constraints()
def solve(self):
"""Returns a solution, if one exists, for the scheduling problem.
The result is: [[(game, [player, ...]), ...], ...] - i.e. each session
has a list of tuples, giving the game and the those playing.
"""
self.p.solve()
if pulp.LpStatus[self.p.status] != 'Optimal':
raise RuntimeError("Problem not solvable")
result = []
for i in self.session_ids:
result.append([])
for g in self.session_games[i]:
game = self.all_games[g]
players = [
self.players[p]
for p in self.session_players[i]
if self.choices[i][p][g].varValue
]
if players:
result[i].append((game, players))
result[i] = sorted(result[i], key=lambda x: x[0])
return result
def _make_session_players(self):
"""Figure out who is available in each session"""
for p in self.players:
if 'sessions' not in p:
p['sessions'] = self.session_ids
session_players = []
for i in self.session_ids:
session_players.append([])
for j, player in enumerate(self.players):
if i in player['sessions']:
session_players[i].append(j)
return session_players
def _make_session_games(self):
"""Figure out what games are available each session"""
session_games = []
for i, session in enumerate(self.sessions):
session_games.append([
j for j, game in enumerate(self.all_games)
if self._game_available(session, game, i, j)
])
return session_games
def _game_available(self, session, game, session_idx, game_idx):
"""Returns true if the game is of appropriate length and exists"""
return (
(
self.owned_by[game_idx] is None or
self.owned_by[game_idx] in self.session_players[session_idx]
) and
self.games_db.min_playtime(game) <= session['length']
)
def _make_choice_variables(self):
"""Returns a nested Dict containing binary decision variables X_i_j_k.
These represent: for each session i, for each player j, for each game
k: `1` if they are playing, `0` otherwise.
"""
result = {}
for i in self.session_ids:
result[i] = {}
for j in self.session_players[i]:
result[i][j] = {}
for k in self.session_games[i]:
result[i][j][k] = pulp.LpVariable(f'X_{i}_{j}_{k}', cat='Binary')
return result
def _make_games_played_variables(self):
"""Returns a nested Dict containing binary decision variables G_i_j_c.
These represent: for each session i, for each game j, for each player
count c-min_player_count: `1` if this game is being played at this
player count _or higher_ in this session, 0 otherwise.
These are necessary to support:
* the disjoint constraints on the minimum number of players in a game
(i.e. any particular game needs >= n players, but _only_ if the game
is being played at all) and to
* the table limit constraints
* including the value of different player counts in the objective
function.
"""
result = {}
for i, session in enumerate(self.sessions):
result[i] = {}
for j in self.session_games[i]:
game = self.all_games[j]
counts = range(
self.games_db.min_players(game),
self.games_db.max_players(game, session) + 1
)
result[i][j] = [
pulp.LpVariable(f'G_{i}_{j}_{c}', cat='Binary')
for c in counts
]
return result
def _add_objective_function(self):
"""Build the objective function (the mathematical function to maximize).
For each possible choice variable, multiply it by a _weight_ and
sum. In the simple case the weight is 1.0 if the game is in the
player's interests list, and 0.0 if it isn't.
"""
objective = []
for i in self.session_ids:
for p in self.session_players[i]:
for k in self.session_games[i]:
game = self.all_games[k]
objective.append(
self.choices[i][p][k] * self.weight(self.players[p], game)
)
for g in self.games_played[i]:
game = self.all_games[g]
for count_idx, count_var in enumerate(self.games_played[i][g]):
objective.append(
self.games_db.adjusted_popularity(game, count_idx) * count_var
)
self.p += pulp.lpSum(objective)
def _add_logical_play_constraints(self):
"""Enforce logical constraints.
* Players can only play one game per-session.
* A game must be played with n players to be played with n+1.
* Do not break the table limit.
"""
for i in self.session_ids:
for j in self.session_players[i]:
self.p += (
pulp.lpSum(self.choices[i][j].values()) == 1,
f"Game Per Session session {i} player {j}",
)
games_played = []
for g in self.games_played[i]:
previous_count = self.games_played[i][g][0]
games_played.append(previous_count) # Store for the table count constraint
for c, count_var in enumerate(self.games_played[i][g][1:]):
self.p += (
previous_count >= count_var,
f"Increasing player count {i} {g} {c}",
)
previous_count = count_var
self.p += (
pulp.lpSum(games_played) <= self.table_limit,
f"Table limit session session {i}",
)
def _add_player_count_constraints(self):
"""Games have a minimum and maximum player count"""
for i in self.session_ids:
for j in self.session_games[i]:
game = self.all_games[j]
game_players = []
for k in self.session_players[i]:
game_players.append(self.choices[i][k][j])
# The minimum for a game, or 0 if not being played
count = self.games_db.min_players(game) * self.games_played[i][j][0]
for var in self.games_played[i][j][1:]:
count += var
self.p += (
pulp.lpSum(game_players) == pulp.lpSum(count),
f"Game count matches players session {i} game {j}"
)
def _add_uniqueness_constraints(self):
"""Make sure that players do not play games more than once"""
for i, _ in enumerate(self.players):
unique_games = set(self.all_games)
for game in unique_games:
indexes = [x for x, g in enumerate(self.all_games) if g == game]
variables = []
for j in indexes:
for k in self.session_ids:
if i in self.choices[k] and j in self.choices[k][i]:
variables.append(self.choices[k][i][j])
# We only need a constraint if there is more than one
# opportunity to play a game.
if len(variables) > 1:
self.p += pulp.lpSum(variables) <= 1, f"Play once player {i} game {game}"
def weight(self, player, game):
"""Returns how interested a player is in a game.
The assumptions are:
* Players are uniformally interested in games that they have not
specified
* All players interests are equal - there is no weighting for someone
who has expressed few interests vs. many.
* A player who brings games is more interested in playing those than
other games, and further, this player is given priority to playing
that game over others.
"""
if game in player['interests']:
if game in player['owns']:
return 1.05
else:
return 1.0
else:
return 0.0
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--spec', action='store_true', help='Print out the problem specification instead of solving')
parser.add_argument('--games', metavar='FILE', default='games.json', help='Games database json')
parser.add_argument('--players', metavar='FILE', default='sample/players.json', help='Player interests json file')
parser.add_argument('--sessions', metavar='FILE', default='sample/sessions.json', help='Session info json file')
parser.add_argument('--table-limit', metavar='N', default=10, type=int, help='Session info json file')
parser.add_argument('--shared-games', nargs='*', metavar='GAMES', default=[], help='Session info json file')
args | |
<filename>main.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import asyncio
import ssl
import websockets
from math import floor
# ###### CONFIG ######
serverIP = "localhost"
secure = False
SSLchain = "/home/pi/ssl/fullchain.pem"
SSLkey = "/home/pi/ssl/key.pem"
# ###### ###### ######
farben = {
0: "Kreuz",
1: "Karo",
2: "Herz",
3: "Pik",
}
werte = {
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "10",
11: "B",
12: "D",
13: "K",
14: "A",
}
# Eine Zeile JSON hinzufügen
def addJson(msg, key, val):
return msg + "\t\"" + key + "\": " + val + ",\n"
# Klasse für das Objekt "Karte"
class Karte:
farbe, zahl = -1, -1
id = -1
# Initialisierung über die ID (0-51)
def __init__(self, kartenId):
if id == -1:
return
self.zahl = floor(kartenId / 4) + 2
self.farbe = kartenId % 4
self.id = kartenId
def getID(self):
return self.id
def __str__(self):
return farben[self.farbe] + werte[self.zahl]
def __repr__(self):
return str(self.id)
# Test ob eine Karte spielbar ist
def spielbar(self, k):
# 2 oder 3 immer spielbar
b = (self.zahl == 2 or self.zahl == 3)
# Bei 7 drunter legen
if k.zahl == 7:
if self.zahl < 7:
b = True
# Sonst gleich oder drueber (Ausnahme 10)
else:
if self.zahl >= k.zahl or self.zahl == 10:
b = True
return b
# Nachziehestapel
class Stapel:
karten = []
# Mische den Kartenstapel
def __init__(self):
for i in range(52):
self.karten.append(Karte(i))
random.shuffle(self.karten)
# Ziehe oberste Karte des Nachziehstapels
def zieheKarte(self):
if len(self.karten) > 0:
return self.karten.pop()
return Karte(-1)
# Gib die oberste Karte der Ablage ohne sie zu nehmen
def oben(self):
if len(self.karten) > 0:
return self.karten[-1].id
return -1
# Verteile die Karten an einen Spieler (3 verdeckt, 3 offen, 3 Handkarten)
def verteileKarten(self):
ret = []
for i in range(9):
ret.append(self.zieheKarte())
return ret
# Ablegestapel
class Ablage:
karten = []
# Nehme alle Karten der Ablage und setze Ablage zurück
def kartenAufnehmen(self):
k = self.karten
self.karten = []
return k
# Gib die oberste Karte der Ablage zurück (3 ist unsichtbar!)
def oben(self):
if len(self.karten) == 0:
return Karte(-1)
for i in range(len(self.karten)):
if self.karten[-1 - i].zahl != 3:
return self.karten[-1 - i]
return Karte(-1)
# Füge Karte der Ablage hinzu
def ablegen(self, karte):
self.karten.append(karte)
# Ist Stapel verbrennbar (weil 4 gleiche Zahlen)?
def verbrennbar(self):
if len(self.karten) >= 4:
k = self.karten[-1]
for i in range(-2, -5, -1):
if k.zahl != self.karten[i].zahl:
return False
return True
return False
class Spieler:
verdeckt, offen, karten = [], [], []
name, ip, websocket = "", None, None
fertig, kartenGetauscht = False, False
def __init__(self, name, karten, socket):
self.verdeckt = karten[0:3]
self.offen = karten[3:6]
self.karten = karten[6:9]
self.websocket = socket
if socket:
self.ip = socket.remote_address[0]
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return self.name
def spielzug(self, karte, ablage, stapel, karten):
if karte not in self.karten and len(self.karten) != 0:
return 0
# Spieler besitzt Karte und darf sie ausspielen
if karte.spielbar(ablage.oben()):
ablage.ablegen(karte)
karten.remove(karte)
anzahlGespielteKarten = 1
# Drei ist unsichtbar und kann mehrfach gelegt werden
if karte.zahl == 3:
return 0
# mehrere Karten ablegen
if karten != self.verdeckt:
for k in list(karten):
if k.zahl == karte.zahl:
ablage.ablegen(k)
karten.remove(k)
anzahlGespielteKarten += 1
# Nachziehen
self.nachziehen(stapel)
# Verbrennen
if karte.zahl == 10 or ablage.verbrennbar():
ablage.karten = []
return 0
return anzahlGespielteKarten
return 0
def nachziehen(self, stapel):
while len(self.karten) < 3:
k = stapel.zieheKarte()
if k is not None and k.id != -1:
self.karten.append(k)
else:
break
# Am Anfang Karten austauschbar machen
def kartenAustauschen(self, neueKarten):
if self.kartenGetauscht:
return
kartenArray = list(map(int, neueKarten.split(",")))
# Teste ob Karten nur vertauscht wurden oder ob gecheatet wurde
kartenArray2 = []
kartenArray3 = kartenArray.copy()
for k1 in self.offen + self.karten:
kartenArray2.append(k1.getID())
kartenArray2.sort()
kartenArray3.sort()
if kartenArray2 != kartenArray3:
return
# Tausche die Karten
for i in range(3):
self.offen[i] = Karte(kartenArray[i])
for i in range(3):
self.karten[i] = Karte(kartenArray[i + 3])
self.kartenGetauscht = True
class Spiel:
st = Stapel()
ablage = Ablage()
dran = 0
spieler = []
def __init__(self):
self.st = Stapel()
self.ablage = Ablage()
self.dran = 0
self.spieler = []
print("Spiel zurückgesetzt!")
# Neuen Spieler hinzufügen
def addSpieler(self, name, socket):
for spieler in self.spieler:
if spieler.name == name:
return
self.spieler.append(Spieler(name, self.st.verteileKarten(), socket))
if len(self.spieler) == 1:
for i in range(3):
self.addSpieler(name, socket)
# Spieler Objekt finden über Spieler-Name
def getSpielerByName(self, name):
for spieler in self.spieler:
if spieler.name == name:
return spieler
return None
# Alle Spieler benachrichtigen
async def benachrichtige(self, spielerFertig=-1):
for i in range(len(self.spieler)):
if self.spieler[i].websocket:
try:
if spielerFertig == -1:
await self.spieler[i].websocket.send(self.socketNachricht(i))
else:
await self.spieler[i].websocket.send("{\n\t\"Message\": \"" + self.spieler[spielerFertig].name +
" ist fertig!\"\n}")
except websockets.ConnectionClosed as exc:
print("Verbindung zu " + self.spieler[i].name + " geschlossen!")
self.spieler[i].websocket = None
# Nächster Spieler dran (incl. Aussetzen)
def naechster(self, kartenZahl):
aussetzen = 1
# Aussetzen durch 4er
for i in range(min(kartenZahl, len(self.ablage.karten))):
if self.ablage.karten[-1 - i].zahl == 4:
aussetzen += 1
else:
break
# Fertige Spieler überspringen
for i in range(aussetzen):
self.dran = (self.dran + 1) % 4
while self.spieler[self.dran].fertig:
self.dran = (self.dran + 1) % 4
# Führe Spielzug aus
def spielzug(self, karteId):
spielerdran = self.spieler[self.dran]
# Karte überhaupt vorhanden in Hand?
if karteId >= len(spielerdran.karten):
return False
spielzugErfolgreich = False
# Normale Hand-Karten: ID >= 0
if karteId >= 0:
k = spielerdran.karten[karteId]
spielzugErfolgreich = spielerdran.spielzug(k, self.ablage, self.st, spielerdran.karten)
# Offene (und verdeckte) Karten: ID < 0
else:
# Ausspielen einer verdeckten Karte...
if karteId == -4:
# ...nur möglich wenn keine Karten in der Hand sind
if len(spielerdran.karten) == 0:
# Wenn die verdeckte Karte nicht spielbar ist: Ablage + aufgedeckte Karte nehmen
if not spielerdran.spielzug(spielerdran.verdeckt[-1], self.ablage, self.st, spielerdran.verdeckt):
self.nehme()
# if len(spielerdran.verdeckt) > 0: !!!
# spielerdran.karten.append(spielerdran.verdeckt.pop())
spielzugErfolgreich = True
elif (-1 - karteId) < len(spielerdran.offen):
k = spielerdran.offen[-1 - karteId]
spielzugErfolgreich = spielerdran.spielzug(k, self.ablage, self.st, spielerdran.offen)
else:
return
spielerdran.fertig = (len(spielerdran.karten) + len(spielerdran.offen) + len(spielerdran.verdeckt)) == 0
# Wenn Spielzug valide: nächster Spieler dran
if spielzugErfolgreich:
self.naechster(spielzugErfolgreich)
# Nehme alle Karten von der Ablage
def nehme(self):
karten = self.ablage.kartenAufnehmen()
self.spieler[self.dran].karten += karten
# Ist Spieler dran?
def istdran(self, name):
return self.spieler[self.dran].name == name
# Erstelle JSON Text für Socket Nachricht
def socketNachricht(self, nr):
msg = "{\n"
msg = addJson(msg, "Dran", "\"" + self.spieler[self.dran].name + "\"")
msg = addJson(msg, "Namen", str(self.spieler).replace("[", "[\"").replace("]", "\"]")).replace(", ", "\", \"")
msg = addJson(msg, "Karten", str(self.spieler[nr].karten))
msg = addJson(msg, "Offen", str(self.spieler[nr].offen))
verdecktArr = []
for x in self.spieler[nr].verdeckt:
verdecktArr.append("x")
msg = addJson(msg, "Verdeckt", str(verdecktArr).replace("'", "\""))
msg = addJson(msg, "Ablage", str(self.ablage.karten))
msg = addJson(msg, "Andere", self.getAndereKarten(nr))
msg = addJson(msg, "Ziehen", str(self.st.oben()))[:-2] + "\n"
msg += "}"
return msg
# Offene Karten der anderen Spieler
def getAndereKarten(self, nr):
k = "["
for i in range(len(self.spieler)):
if i != nr:
if len(self.spieler[i].offen) == 0:
verdecktArr = []
for x in self.spieler[i].verdeckt:
verdecktArr.append("x")
offeneKarten = str(verdecktArr).replace("'", "\"")
else:
offeneKarten = str(self.spieler[i].offen)
k += "\"" + self.spieler[i].name + "\", "
k += str(len(self.spieler[i].karten)) + ", " + offeneKarten + ", "
if len(k) < 2:
return "[]"
return k[:-2] + "]"
# Gibt es bereits einen Sieger?
def laeuft(self):
for s in self.spieler:
if not s.fertig and s.websocket is not None:
return True
return False
def ladeSpiel(spielListe):
for sp in spielListe:
if sp.getSpielerByName:
pass
pass
if __name__ == '__main__':
sp = Spiel()
async def socketLoop(websocket, path):
global sp
print("Neue Verbindung")
spieler = None
# Führe Schleife aus bis Spieler die Verbindung trennt
while True:
# Empfange Nachricht
try:
msg = await websocket.recv()
except websockets.ConnectionClosed as exc:
if spieler:
print("Verbindung geschlossen!")
spieler.websocket = None
if not sp.laeuft():
sp = Spiel();
return
# Trenne Nachricht in verschiedene Teile
msg = str(msg).split(";")
print(msg)
# Spielzüge bestehen aus Name + Spielzug
if len(msg) > 1:
if msg[1] == "kartenTausch":
for spieler in sp.spieler:
if spieler.name == msg[0]:
spieler.kartenAustauschen(msg[2])
break
# Spielzug nur ausführen wenn alle Spieler da und Spieler dran
elif len(sp.spieler) == 4 and sp.istdran(msg[0]):
# Karten aufnehmen weil anderer Zug nicht möglich
if msg[1] == "nehme":
sp.nehme()
# Spieler kann nach einer ausgespielten 3 auch weiter | |
<filename>dataset/dataset/dataset.py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import warnings
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, PowerTransformer, \
OneHotEncoder
from sklearn_pandas import DataFrameMapper
from scipy.stats import skew, boxcox_normmax
from scipy.special import boxcox1p
from dataset.split import Split
from dataset.correlations import cramers_v
warnings.simplefilter(action='ignore')
#
# Correlation ideas taken from:
# https://towardsdatascience.com/the-search-for-categorical-correlation-a1cf7f1888c9
#
class Dataset:
"""
This class allows a simpler representation of the dataset used
to build a model in class. It allows loading a remote CSV by
providing an URL to the initialization method of the object.
my_data = Dataset(URL)
"""
meta = None
data = None
target = None
features = None
meta_tags = ['all', 'numerical', 'categorical', 'complete',
'numerical_na', 'categorical_na', 'features', 'target']
def __init__(self, data_location=None, data_frame=None, *args, **kwargs):
"""
Wrapper over the method read_csv from pandas, so you can user variadic
arguments, as if you were using the actual read_csv
:param data_location: path or url to the file
:param data_frame: in case this method is called from the class method
this parameter is passing the actual dataframe to read data from
:param args: variadic unnamed arguments to pass to read_csv
:param kwargs: variadic named arguments to pass to read_csv
"""
if data_location is not None:
self.data = pd.read_csv(data_location, *args, **kwargs)
else:
if data_frame is not None:
self.data = data_frame
else:
raise RuntimeError(
"No data location, nor DataFrame passed to constructor")
self.features = self.data.copy()
self.metainfo()
@classmethod
def from_dataframe(cls, df):
return cls(data_location=None, data_frame=df)
def set_target(self, target_name):
"""
Set the target variable for this dataset. This will create a new
property of the object called 'target' that will contain the
target column of the dataset, and that column will be removed
from the list of features.
Example:
my_data.set_target('SalePrice')
"""
if target_name in list(self.features):
self.target = self.features.loc[:, target_name].copy()
self.features.drop(target_name, axis=1, inplace=True)
else:
self.target = self.data.loc[:, target_name].copy()
self.metainfo()
return self
def metainfo(self):
"""
Builds metainfromation about the dataset, considering the
features that are categorical, numerical or does/doesn't contain NA's.
"""
meta = dict()
# Build the subsets per data ype (list of names)
descr = pd.DataFrame({'dtype': self.features.dtypes,
'NAs': self.features.isna().sum()})
categorical_features = descr.loc[descr['dtype'] == 'object'].\
index.values.tolist()
numerical_features = descr.loc[descr['dtype'] != 'object'].\
index.values.tolist()
numerical_features_na = descr.loc[(descr['dtype'] != 'object') &
(descr['NAs'] > 0)].\
index.values.tolist()
categorical_features_na = descr.loc[(descr['dtype'] == 'object') &
(descr['NAs'] > 0)].\
index.values.tolist()
complete_features = descr.loc[descr['NAs'] == 0].index.values.tolist()
# Update META-information
meta['description'] = descr
meta['all'] = list(self.data)
meta['features'] = list(self.features)
meta['target'] = self.target.name if self.target is not None else None
meta['categorical'] = categorical_features
meta['categorical_na'] = categorical_features_na
meta['numerical'] = numerical_features
meta['numerical_na'] = numerical_features_na
meta['complete'] = complete_features
self.meta = meta
return self
def outliers(self):
"""
Find outliers, using bonferroni criteria, from the numerical features.
Returns a list of indices where outliers are present
"""
ols = sm.OLS(endog = self.target, exog = self.select('numerical'))
fit = ols.fit()
test = fit.outlier_test()['bonf(p)']
return list(test[test<1e-3].index)
def scale(self, features_of_type='numerical', return_series=False):
"""
Scales numerical features in the dataset, unless the parameter 'what'
specifies any other subset selection primitive.
:param features_of_type: Subset selection primitive
:return: the subset scaled.
"""
assert features_of_type in self.meta_tags
subset = self.select(features_of_type)
mapper = DataFrameMapper([(subset.columns, StandardScaler())])
scaled_features = mapper.fit_transform(subset.copy())
self.features[self.names(features_of_type)] = pd.DataFrame(
scaled_features,
index=subset.index,
columns=subset.columns)
self.metainfo()
if return_series is True:
return self.features[self.names(features_of_type)]
else:
return self
def ensure_normality(self,
features_of_type='numerical',
return_series=False):
"""
Ensures that the numerical features in the dataset, unless the
parameter 'what' specifies any other subset selection primitive,
fit into a normal distribution by applying the Yeo-Johnson transform
:param features_of_type: Subset selection primitive
:param return_series: Return the normalized series
:return: the subset fitted to normal distribution.
"""
assert features_of_type in self.meta_tags
subset = self.select(features_of_type)
mapper = DataFrameMapper([(subset.columns, PowerTransformer(
method='yeo-johnson',
standardize=False))])
normed_features = mapper.fit_transform(subset.copy())
self.features[self.names(features_of_type)] = pd.DataFrame(
normed_features,
index=subset.index,
columns=subset.columns)
self.metainfo()
if return_series is True:
return self.features[self.names(features_of_type)]
def skewness(self, threshold=0.75, fix=False, return_series=False):
"""
Returns the list of numerical features that present skewness
:return: A pandas Series with the features and their skewness
"""
df = self.select('numerical')
feature_skew = df.apply(
lambda x: skew(x)).sort_values(ascending=False)
if fix is True:
high_skew = feature_skew[feature_skew > threshold]
skew_index = high_skew.index
for feature in skew_index:
self.features[feature] = boxcox1p(
df[feature], boxcox_normmax(df[feature] + 1))
if return_series is True:
return feature_skew
def onehot_encode(self):
"""
Encodes the categorical features in the dataset, with OneHotEncode
"""
new_df = self.features[self.names('numerical')].copy()
for categorical_column in self.names('categorical'):
new_df = pd.concat(
[new_df,
pd.get_dummies(
self.features[categorical_column],
prefix=categorical_column)
],
axis=1)
self.features = new_df.copy()
self.metainfo()
return self
def correlated(self, threshold=0.9):
"""
Return the features that are highly correlated to with other
variables, either numerical or categorical, based on the threshold. For
numerical variables Spearman correlation is used, for categorical
cramers_v
:param threshold: correlation limit above which features are considered
highly correlated.
:return: the list of features that are highly correlated, and should be
safe to remove.
"""
corr_categoricals, _ = self.categorical_correlated(threshold)
corr_numericals, _ = self.numerical_correlated(threshold)
return corr_categoricals + corr_numericals
def numerical_correlated(self,
threshold=0.9):
"""
Build a correlation matrix between all the features in data set
:param subset: Specify which subset of features use to build the
correlation matrix. Default 'features'
:param method: Method used to build the correlation matrix.
Default is 'Spearman' (Other options: 'Pearson')
:param threshold: Threshold beyond which considering high correlation.
Default is 0.9
:return: The list of columns that are highly correlated and could be
droped out from dataset.
"""
corr_matrix = np.absolute(
self.select('numerical').corr(method='spearman')).abs()
# Select upper triangle of correlation matrix
upper = corr_matrix.where(
np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than threshold
return [column for column in upper.columns
if any(abs(upper[column]) > threshold)], corr_matrix
def categorical_correlated(self, threshold=0.9):
"""
Generates a correlation matrix for the categorical variables in dataset
:param threshold: Limit from which correlations is considered high.
:return: the list of categorical variables with HIGH correlation and
the correlation matrix
"""
columns = self.meta['categorical']
corr = pd.DataFrame(index=columns, columns=columns)
for i in range(0, len(columns)):
for j in range(i, len(columns)):
if i == j:
corr[columns[i]][columns[j]] = 1.0
else:
cell = cramers_v(self.features[columns[i]],
self.features[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
corr.fillna(value=np.nan, inplace=True)
# Select upper triangle of correlation matrix
upper = corr.where(
np.triu(np.ones(corr.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than threshold
return [column for column in upper.columns
if any(abs(upper[column]) > threshold)], corr
def under_represented_features(self, threshold=0.98):
"""
Returns the list of categorical features with unrepresented categories
or a clear unbalance between the values that can take.
:param threshold: The upper limit of the most represented category
of the feature.
:return: the list of features that with unrepresented categories.
"""
under_rep = []
for column in self.meta['categorical']:
counts = self.features[column].value_counts()
majority_freq = counts.iloc[0]
if (majority_freq / len(self.features)) > threshold:
under_rep.append(column)
return under_rep
def stepwise_selection(self,
initial_list=None,
threshold_in=0.01,
threshold_out=0.05,
verbose=True):
"""
Perform a forward-backward feature selection based on p-value from
statsmodels.api.OLS
Your features must be all numerical, so be sure to onehot_encode them
before calling this method.
Always set threshold_in < threshold_out to avoid infinite looping.
Arguments:
initial_list - list of features to start with (column names of X)
threshold_in - include a feature if its p-value < threshold_in
threshold_out - exclude a feature if its p-value > threshold_out
verbose - whether to print the sequence of inclusions and exclusions
Returns: list of selected features
See https://en.wikipedia.org/wiki/Stepwise_regression for the details
Taken from: https://datascience.stackexchange.com/a/24823
"""
if initial_list is None:
initial_list = []
assert len(self.names('categorical')) == 0
included = list(initial_list)
while True:
changed = False
# forward step
excluded = list(set(self.features.columns) - set(included))
new_pval = pd.Series(index=excluded)
for new_column in excluded:
model = sm.OLS(self.target, sm.add_constant(
pd.DataFrame(self.features[included + [new_column]]))).fit()
new_pval[new_column] = model.pvalues[new_column]
best_pval = new_pval.min()
if best_pval < threshold_in:
best_feature = new_pval.idxmin()
included.append(best_feature)
changed = True
if verbose:
print('Add {:30} with p-value {:.6}'.format(best_feature,
best_pval))
# backward step
model = sm.OLS(self.target, sm.add_constant(
pd.DataFrame(self.features[included]))).fit()
# use all coefs except intercept
pvalues = model.pvalues.iloc[1:]
worst_pval = pvalues.max() # null if p-values is empty
if worst_pval > threshold_out:
changed = True
worst_feature = pvalues.argmax()
included.remove(worst_feature)
if verbose:
print('Drop {:30} with p-value {:.6}'.format(worst_feature,
worst_pval))
if not changed:
break
return included
#
| |
<gh_stars>0
import sys,time
import numpy as np
import os
from datetime import datetime
import psutil
import torch
from copy import deepcopy
import utils
from sklearn import metrics
from torch.autograd import Variable
import re
########################################################################################################################
##
# Knowledge Base for each Domain
##
class RK(object):
def __init__(self,cSM, aSA, wordVocabulary, aspectVocabulary):
self.wordContext_CSM = torch.nn.Embedding.from_pretrained(cSM)
self.aspect_ASA = aSA
self.word_vocabulary = wordVocabulary
self.aspect_vocabulary = aspectVocabulary
self.orderedVocabulary = list(self.word_vocabulary)
self.orderedVocabulary.sort()
def getIndexInWordVocabulary(self, word):
if word == None or word == "" or self.orderedVocabulary == None:
return -1
else:
try:
if len( self.orderedVocabulary ) == 0:
return -1
return self.orderedVocabulary.index(word)
except(ValueError):
return -1
class Appr(object):
def __init__(self,model,nepochs=100,sbatch=64,lr=0.05,lr_min=1e-4,lr_factor=3,lr_patience=5,clipgrad=10000,lamb=0.75,smax=400,args=None):
self.model=model
self.opt = args
self.nepochs=nepochs
self.sbatch=sbatch
self.lr=lr
self.lr_min=lr_min
self.lr_factor=lr_factor
self.lr_patience=lr_patience
self.clipgrad=clipgrad
self.ce=torch.nn.CrossEntropyLoss()
self.lamb=lamb
self.smax=smax
self.logpath = None
self.single_task = False
self.logpath = args.parameter
# Synaptic Implementatio development
self.small_omega_var = {}
self.previous_weights_mu_minus_1 = {}
self.big_omega_var = {}
self.aux_loss = 0.0
self.reset_small_omega_ops = []
self.update_small_omega_ops = []
# Parameters for the intelligence synapses model.
self.param_c = 0.1
self.param_xi = 0.1
self.learning_rate = 0.001
self.exp_pow = torch.tensor(2)
self.exp_pow = 2
if self.model != None:
self.task_size = 1 if self.model.taskcla == None else len(self.model.taskcla)
if self.model != None:
self.task_size = len(self.model.taskcla)
self.wordInDomVocabulary = dict()
self.aspectInDomVocabulary = dict()
#Values taken from ""
optimizer = self.model.get_Optimizer()
if optimizer != None:
self._set_optimizer(optimizer)
self.current_task = -1
self.cse = dict()
self.asa = dict()
self.rkList = dict()
if len(args.parameter)>=1:
params=args.parameter.split(',')
print('Setting parameters to',params)
if len(params)>1:
if utils.is_number(params[0]):
self.lamb=float(params[0])
else:
self.logpath = params[0]
if utils.is_number(params[1]):
self.smax=float(params[1])
else:
self.logpath = params[1]
if len(params)>2 and not utils.is_number(params[2]):
self.logpath = params[2]
if len(params)>3 and utils.is_number(params[3]):
self.single_task = int(params[3])
else:
self.logpath = args.parameter
if self.logpath is not None:
self.logs={}
self.logs['train_loss'] = {}
self.logs['train_acc'] = {}
self.logs['train_reg'] = {}
self.logs['valid_loss'] = {}
self.logs['valid_acc'] = {}
self.logs['valid_reg'] = {}
self.logs['mask'] = {}
self.logs['mask_pre'] = {}
else:
self.logs = None
self.mask_pre=None
self.mask_back=None
return
def find_noleaf(self, list_variables):
print("Parameters")
for i, (name, var) in enumerate(list_variables):
if var.is_leaf == False:
print("Leaf tensor False")
break
return
def _set_optimizer(self, _new_optimize):
if _new_optimize != None: self.optimizer = _new_optimize
def _get_optimizer(self,lr=None):
if lr is None: lr = self.lr
print("!!!!New optmization!!!!!")
# if self.optimizer != None:
# print("--------Optmization---------")
# return self.optimizer
# return torch.optim.SGD(self.tensorVariables, lr=lr)
# return torch.optim.SGD(self.model.parameters(),lr=lr)
return self.opt.optimizer(self.model.parameters(), lr=self.opt.learning_rate, weight_decay=self.opt.l2reg)
def train(self, t, train_data_loader, test_data_loader, val_data_loader):
best_loss=np.inf
#best_model=utils.get_model(self.model)
lr=self.lr
patience=self.lr_patience
task = torch.autograd.Variable(torch.LongTensor([t]).cuda(),
volatile=False, requires_grad=False) if torch.cuda.is_available() \
else torch.autograd.Variable(torch.LongTensor([t]), volatile=False, requires_grad=False)
if t != self.current_task:
###It need that al weights in last output layer are inicialized in zero
###Optimization in original paper
###no usal la inicializacion Gaussiana y de Xavier. Aunque se conoce que los pesos de las
###redes no deben inicializarce a 0 pero esto es para niveles intermedios y no para los niveles
###de salida
self.current_task = t
##
## LA VARIABLE tm se coloca entre los valores a optimizar??????
##
print(
" ###### Update status of last layer weight in current task(domain) AVOID Stocastic Gradient ########")
for name, var in self.model.named_parameters():
if name.find("model.last.") != -1:
var.requires_grad_(False);
if re.match("model.last." + str(t), name) != None:
print("Variable " + name + " update to SGD")
var.requires_grad_(True);
self.optimizer = self._get_optimizer(lr)
# Loop epochs
for e in range(self.nepochs):
# Train
clock0 = time.time()
print("----- Optimizer -----")
print(self.optimizer)
print("----------------------")
#print("1")
self.train_epochesi(t, train_data_loader)
clock1 = time.time()
#print("2")
train_loss, train_acc, train_recall, train_f1, train_cohen_kappa = self.eval_withregsi(t, val_data_loader)
#print("3")
clock2 = time.time()
dataset_size = len(val_data_loader.dataset)
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train-Val: loss={:.3f}, acc={:5.1f}, f1={:5.1f}, cohen_kappa={:5.1f}%|'.format(
e + 1,
1000 * self.sbatch * (
clock1 - clock0) / dataset_size,
1000 * self.sbatch * (
clock2 - clock1) / dataset_size,
train_loss,
100 * train_acc,
100 * train_f1,
100 * train_cohen_kappa),
end='')
# Valid
#print("4")
valid_loss, valid_acc , valid_recall, valid_f1, valid_cohen_kappa = self.eval_withregsi(t, test_data_loader)
print(' Test: loss={:.3f}, acc={:5.1f}, f1={:5.1f}, cohen_kappa={:5.1f}%|'.format(valid_loss, 100 * valid_acc, 100 * valid_f1,100*valid_cohen_kappa),
end='')
#print("5")
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
#best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer=self._get_optimizer(lr)
print()
#print("6")
#self.find_noleaf(self.model.named_parameters())
print(" ###### Show status of last layer weight in current task(domain) ########")
toViewLasLayer = []
for name, var in self.model.named_parameters():
if name.find("model.last.") != -1:
print("Requiere Grand ==> " + str(var.requires_grad))
print("Variable name " + name + " == " + str(var.data))
toViewLasLayer.append((name, var))
return
def train_epochesi(self, t, train_data_loader, thres_cosh=50,thres_emb=6):
self.model.train()
# Loop batches
loop_size = 0
# Task domain:
if t >= 2:
CSEt, _ = self.getCSMNewDomain(t, self.wordInDomVocabulary[t], self.aspectInDomVocabulary[t])
ASAt, similarAspectList = self.getASANewDomain(t, self.wordInDomVocabulary[t], self.aspectInDomVocabulary[t])
self.model.insertKnowBase(ASAt, CSEt)
loop_size = 0
global_step = 0
n_correct, n_total, loss_total = 0, 0, 0
for i_batch, sample_batched in enumerate(train_data_loader):
print("Batch size: " + str (sample_batched.__len__()))
inputs = [sample_batched[col].to(self.opt.device) for col in self.opt.inputs_cols]
# outputs = self.model(inputs)
targets = sample_batched['polarity'].to(self.opt.device)
task = torch.autograd.Variable(torch.LongTensor([t]).cuda(), volatile=False, requires_grad=False) \
if torch.cuda.is_available() \
else torch.autograd.Variable(torch.LongTensor([t]), volatile=False, requires_grad=False)
# Forward current model
startDateTime = datetime.now()
outputs,_=self.model.forward( task, inputs)
#print('Train DataTime', datetime.now() - startDateTime)
#print("Train forward")
self.getMemoryRam()
output = outputs[t]
loss=self.criterion(t,output,targets)
# Backward
self.optimizer.zero_grad()
loss.backward()
n_correct += (torch.argmax(output, -1) == targets).sum().item()
n_total += len(output)
loss_total += loss.item() * len(outputs)
if global_step % self.opt.log_step == 0:
# train_acc = n_correct / n_total
train_loss = loss_total / n_total
# print('loss: {:.4f}, acc: {:.4f}'.format(train_loss, train_acc))
print('loss: {:.4f}'.format(train_loss))
self.optimizer.step()
#Build Context-Sentiment-Effec Matrix by t domain
self.cse[t] = self.buildCSE(self.model.getWordInDomain())
self.asa[t] = self.buildASA(t, train_data_loader.dataset)
self.setDomaiVocabulary(t, self.cse[t], self.asa[t] , self.wordInDomVocabulary[t], self.aspectInDomVocabulary[t] )
# Mean
# 1 4 7
# r = torch.mean(v, 1) # Size 3: Mean in dim 1
#
# r = torch.mean(v, 1, True) # Size 3x1 since keep dimension = True
# ERRROR
#
# File
# "E:/___Dionis_MO/Articulos/IMPLEMENTACION/SOURCE/Inoid_ABSA_DL/ABSA-PyTorch-master\approaches\ar1.py", line
# 355, in train_epochesi
# self.model.last[t][i_output] = self.model.tm[i_output] - torch.mean(self.model.tm)
# TypeError: 'Linear' object does not support indexing
print("1.8 ")
return
def eval_withregsi(self, t, val_data_loader):
total_loss = 0
total_acc = 0
total_num = 0
n_correct, n_total = 0, 0
t_targets_all, t_outputs_all = None, None
self.model.eval()
total_reg = 0
for i_batch, sample_batched in enumerate(val_data_loader):
# clear gradient accumulators
inputs = [sample_batched[col].to(self.opt.device) for col in self.opt.inputs_cols]
# outputs = self.model(inputs)
targets = sample_batched['polarity'].to(self.opt.device)
task = torch.autograd.Variable(torch.LongTensor([t]).cuda(), volatile=False, requires_grad=False) \
if torch.cuda.is_available() \
else torch.autograd.Variable(torch.LongTensor([t]), volatile=False,requires_grad=False)
# Forward
startDateTime = datetime.now()
outputs,_ = self.model.forward(task, inputs)
#print('Eval DataTime', datetime.now() - startDateTime)
#print("Eval forward")
self.getMemoryRam()
output = outputs[t]
loss = self.criterion(t, output, targets)
_, pred = output.max(1)
hits = (pred == targets).float()
n_correct += (torch.argmax(output, -1) == targets).sum().item()
# Log
current_batch_size = len(pred)
total_loss += loss.data.cpu().numpy() * current_batch_size
total_acc += hits.sum().data.cpu().numpy()
total_num += current_batch_size
if t_targets_all is None:
t_targets_all = targets.detach().numpy()
t_outputs_all = output.detach().numpy()
else:
t_targets_all = np.concatenate((t_targets_all, targets.detach().numpy()), axis=0)
t_outputs_all = np.concatenate((t_outputs_all, output.detach().numpy()), axis=0)
#OJOOOO DEBEMOS REVISAR LAS LABELS [0,1,2] Deben corresponder a como las pone la implementacion
##### FALTA LA ETIQUETA PARA CUANDO NO ES ASPECTO
f1 = metrics.f1_score(t_targets_all, np.argmax(t_outputs_all, -1), labels=[0, 1, 2], average='macro')
recall = metrics.recall_score(t_targets_all, np.argmax(t_outputs_all, -1), labels=[0, 1, 2],
average='macro')
cohen_kappa = metrics.cohen_kappa_score(t_targets_all, np.argmax(t_outputs_all, -1))
return total_loss / total_num, total_acc / total_num, recall, f1, cohen_kappa
###-------------------------------------------------------------------------------------------------------------
def eval(self, t, test_data_loader):
return self.eval_withregsi(t, test_data_loader)
def criterion(self,t,output,targets):
# Regularization for all previous tasks
loss_reg=0
#
# for name, var in self.model.named_parameters():
# print ("Variable: ", name)
# if t>0:
# for name, var in self.tensorVariablesTuples:
# loss_reg += torch.sum(torch.mul( self.big_omega_var[name], (self.previous_weights_mu_minus_1[name] - var.data).pow(self.exp_pow)))
# for (name,param),(_,param_old) in zip(self.model.named_parameters(),self.model_old.named_parameters()):
# loss_reg+=torch.sum(self.fisher[name]*(param_old-param).pow(2))/2
# + self.param_c * loss_reg
return self.ce(output,targets)
########################################################################################################################
def setAllAspect(self, all_aspect):
self.all_aspect = all_aspect
def setAllWord(self, all_word_vocabulary):
self.all_word_vocabulary = all_word_vocabulary
def setAspectInDomain(self, task, aspect_vocabulary):
self.aspectInDomVocabulary[task] = aspect_vocabulary
def setWordInDomain(self, task, word_vocabulary):
self.wordInDomVocabulary[task] = word_vocabulary
# Serialize model, optimizer and other parameters to file
def saveModel(self, topath):
torch.save({
'epoch': self.nepochs,
'model_state_dict': self.model.get_Model().state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': self.ce,
'learning_rate': self.lr,
'batch': self.sbatch,
'task_size': self.task_size
}, topath)
return True
# Unserialize model, optimizer and other parameters from file
def loadModel(self, frompath):
if not os.path.exists(frompath):
return False
else:
checkpoint = torch.load(frompath)
self.model.get_Model().load_state_dict(checkpoint['model_state_dict'])
self.optimizer = self.opt.optimizer(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.opt.learning_rate, weight_decay=self.opt.l2reg)
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.ce = checkpoint['loss']
return True
def buildCSE(self, vocabulary):
if type(vocabulary) == type(None):
return None
else:
memory = self.model.getEmbeddingMatrix(vocabulary)
return ( memory)
def buildASA(self, t, dataset):
return self.model.buildASA(t, dataset, self.wordInDomVocabulary[t], self.aspectInDomVocabulary[t])
def updateGlobalCSE(self):
pass
def updateGlobalASA(self, current_domain_asa):
pass
"""
The Aspect-Sentiment Attention (ASA)
"""
def getASANewDomain(self,t, word_context_vocabulary, aspect_vocabulary):
| |
or "
"--predict-cas13a-activity-model must be specified if "
"--obj is 'maximize-activity' (unless "
"--use-simple-binary-activity-prediction is set)"))
if args.predict_activity_degradation:
raise Exception(("--predict-activity-model-path must be "
"specified if --predict-activity-degradation is set "
"(unless --use-simple-binary-activity-prediction is set)"))
# Do not predict activity
predictor = None
# Construct mutator, if necessary
if args.predict_activity_degradation:
mutator = mutate.GTRSubstitutionMutator(
aln, *args.predict_activity_degradation,
args.predict_activity_degradation_mu,
args.predict_activity_degradation_t,
args.predict_activity_degradation_n)
else:
mutator = None
# Find an optimal set of guides for each window in the genome,
# and write them to a file; ensure that the selected guides are
# specific to this alignment
if args.obj == 'minimize-guides':
gs = guide_search.GuideSearcherMinimizeGuides(
aln,
args.guide_length,
args.guide_mismatches,
guide_cover_frac,
args.missing_thres,
seq_groups=seq_groups,
guide_is_suitable_fn=guide_is_suitable,
required_guides=required_guides_for_aln,
blacklisted_ranges=blacklisted_ranges_for_aln,
allow_gu_pairs=allow_gu_pairs,
required_flanking_seqs=required_flanking_seqs,
predictor=predictor,
do_not_memoize_guides=args.do_not_memoize_guide_computations)
elif args.obj == 'maximize-activity':
gs = guide_search.GuideSearcherMaximizeActivity(
aln,
args.guide_length,
args.soft_guide_constraint,
args.hard_guide_constraint,
args.penalty_strength,
args.missing_thres,
algorithm=args.maximization_algorithm,
guide_is_suitable_fn=guide_is_suitable,
required_guides=required_guides_for_aln,
blacklisted_ranges=blacklisted_ranges_for_aln,
allow_gu_pairs=allow_gu_pairs,
required_flanking_seqs=required_flanking_seqs,
predictor=predictor,
do_not_memoize_guides=args.do_not_memoize_guide_computations)
if args.search_cmd == 'sliding-window':
# Find an optimal set of guides for each window in the genome,
# and write them to a file
gs.find_guides_with_sliding_window(args.window_size,
args.out_tsv[i],
window_step=args.window_step,
sort=args.sort_out,
print_analysis=(args.log_level<=logging.INFO))
elif args.search_cmd == 'complete-targets':
# Find optimal targets (primer and guide set combinations),
# and write them to a file
if args.primer_gc_content_bounds is None:
primer_gc_content_bounds = None
else:
primer_gc_content_bounds = tuple(args.primer_gc_content_bounds)
ps = primer_search.PrimerSearcher(aln, args.primer_length,
args.primer_mismatches,
primer_cover_frac,
args.missing_thres,
seq_groups=seq_groups,
primer_gc_content_bounds=primer_gc_content_bounds)
if args.obj == 'minimize-guides':
obj_type = 'min'
elif args.obj == 'maximize-activity':
obj_type = 'max'
ts = target_search.TargetSearcher(ps, gs,
obj_type=obj_type,
max_primers_at_site=args.max_primers_at_site,
max_target_length=args.max_target_length,
obj_weights=args.obj_fn_weights,
only_account_for_amplified_seqs=args.only_account_for_amplified_seqs,
halt_early=args.halt_search_early, mutator=mutator)
ts.find_and_write_targets(args.out_tsv[i],
best_n=args.best_n_targets, no_overlap=args.do_not_overlap,
annotations=args.annotations[i])
else:
raise Exception("Unknown search subcommand '%s'" % args.search_cmd)
# i should no longer be masked from queries
if aq is not None:
aq.unmask_all_aln()
def run(args):
logger = logging.getLogger(__name__)
# Set random seed for entire program
random.seed(args.seed)
np.random.seed(args.seed)
check_obj_args(args)
logger.info("Running design.py with arguments: %s", args)
# Set NCBI API key
if args.input_type in ['auto-from-file', 'auto-from-args']:
if args.ncbi_api_key:
ncbi_neighbors.ncbi_api_key = args.ncbi_api_key
if args.input_type in ['auto-from-file', 'auto-from-args']:
if args.input_type == 'auto-from-file':
if not os.path.isdir(args.out_tsv_dir):
raise Exception(("Output directory '%s' does not exist") %
args.out_tsv_dir)
# Prepare input alignments, stored in temp fasta files
in_fasta, taxid_for_fasta, years_tsv, aln_tmp_dirs, out_tsv, \
design_for, specific_against_metadata_accs, annotations = \
prepare_alignments(args)
args.in_fasta = in_fasta
args.taxid_for_fasta = taxid_for_fasta
args.out_tsv = out_tsv
args.design_for = design_for
args.specific_against_metadata_accs = specific_against_metadata_accs
args.annotations = annotations
if args.cover_by_year_decay:
# args.cover_by_year_decay contains two parameters: the year
# with the highest cover and the decay; add in (to the beginning)
# the file listing the years
year_highest_cover, year_cover_decay = args.cover_by_year_decay
args.cover_by_year_decay = (years_tsv.name, year_highest_cover,
year_cover_decay)
elif args.input_type == 'fasta':
if len(args.in_fasta) != len(args.out_tsv):
raise Exception(("Number output TSVs must match number of input "
"FASTAs"))
args.out_tsv = [out_tsv + '.tsv' for out_tsv in args.out_tsv]
args.design_for = None
args.taxid_for_fasta = list(range(len(args.in_fasta)))
args.specific_against_metadata_accs = [[] for _ in range(len(args.in_fasta))]
args.annotations = [[] for _ in range(len(args.in_fasta))]
else:
raise Exception("Unknown input type subcommand '%s'" % args.input_type)
design_for_id(args)
# Close temporary files storing alignments
if args.input_type in ['auto-from-file', 'auto-from-args']:
for td in aln_tmp_dirs:
td.cleanup()
if years_tsv is not None:
years_tsv.close()
def argv_to_args(argv):
parser = argparse.ArgumentParser()
###########################################################################
# OPTIONS AVAILABLE ACROSS ALL SUBCOMMANDS
###########################################################################
base_subparser = argparse.ArgumentParser(add_help=False)
# Guide length
base_subparser.add_argument('-gl', '--guide-length', type=int, default=28,
help="Length of guide to construct")
# Objective function
base_subparser.add_argument('--obj',
choices=['maximize-activity', 'minimize-guides'],
default='minimize-guides',
help=(("Objective function to solve. 'maximize-activity' maximizes "
"the expected activity of the guide set of the target genomes "
"subject to soft and hard constraints on the size of the guide "
"set. 'minimize-guides' minimizes the number of guides in the "
"guide set subject to coverage constraints across the target "
"genomes.")))
##########
# Parameters for minimization objective
# Number of guide mismatches
base_subparser.add_argument('-gm', '--guide-mismatches', type=int,
help=("Allow for this number of mismatches when "
"determining whether a guide covers a sequence"))
# Desired coverage of target sequences
def check_cover_frac(val):
fval = float(val)
if fval > 0 and fval <= 1:
# a float in (0, 1]
return fval
else:
raise argparse.ArgumentTypeError("%s is an invalid -p value" % val)
base_subparser.add_argument('-gp', '--guide-cover-frac',
type=check_cover_frac,
help=("The fraction of all sequences that must be covered "
"by the guides."))
# Automatically setting desired coverage of target sequences based
# on their year
class ParseCoverDecayWithYearsFile(argparse.Action):
# This is needed because --cover-by-year-decay has multiple args
# of different types
def __call__(self, parser, namespace, values, option_string=None):
a, b, c = values
# Check that b is a valid year
year_pattern = re.compile('^(\d{4})$')
if year_pattern.match(b):
bi = int(b)
else:
raise argparse.ArgumentTypeError(("%s is an invalid 4-digit "
"year") % b)
# Check that c is a valid decay
cf = float(c)
if cf <= 0 or cf >= 1:
raise argparse.ArgumentTypeError(("%s is an invalid decay; it "
"must be a float in (0,1)" % c))
setattr(namespace, self.dest, (a, bi, cf))
class ParseCoverDecayByGeneratingYearsFile(argparse.Action):
# This is needed because --cover-by-year-decay has multiple args
# of different types
def __call__(self, parser, namespace, values, option_string=None):
a, b = values
# Check that a is a valid year
year_pattern = re.compile('^(\d{4})$')
if year_pattern.match(a):
ai = int(a)
else:
raise argparse.ArgumentTypeError(("%s is an invalid 4-digit "
"year") % a)
# Check that b is a valid decay
bf = float(b)
if bf <= 0 or bf >= 1:
raise argparse.ArgumentTypeError(("%s is an invalid decay; it "
"must be a float in (0,1)" % b))
setattr(namespace, self.dest, (ai, bf))
##########
##########
# Parameters for maximization objective
# Soft guide constraint
base_subparser.add_argument('-sgc', '--soft-guide-constraint', type=int,
help=("Soft constraint on the number of guides. There is no "
"penalty for a number of guides <= SOFT_GUIDE_CONSTRAINT, "
"and having a number of guides beyond this is penalized. "
"See --penalty-strength. This value must be <= "
"HARD_GUIDE_CONSTRAINT."))
# Hard guide constraint
base_subparser.add_argument('-hgc', '--hard-guide-constraint', type=int,
help=("Hard constraint on the number of guides. The number of "
"guides designed for a target will be <= "
"HARD_GUIDE_CONSTRAINT."))
# Penalty strength
base_subparser.add_argument('--penalty-strength', type=float,
help=("Importance of the penalty when the number of guides "
"exceeds the soft guide constraint. Namely, for a guide "
"set G, if the penalty strength is L and the soft "
"guide constraint is h, then the penalty in the objective "
"function is L*max(0, |G|-h). Must be >= 0. The value "
"depends on the output of activity model and reflects a "
"tolerance for more guides; for the default activity model "
"reasonable values are in the range [0.1, 0.5]."))
# Algorithm for solving
base_subparser.add_argument('--maximization-algorithm',
choices=['greedy', 'random-greedy'],
help=("Algorithm to use for solving submodular maximization "
"problem. 'greedy' is the canonical deterministic greedy "
"algorithm (Nemhauser 1978) for constrained monotone submodular "
"maximization, which may perform well in practice but has "
"poor theoretical guarantees here because the function is "
"not monotone (unless --penalty-strength is 0). 'random-"
"greedy' is the randomized greedy algorithm (Buchbinder "
"2014) for constrained non-monotone submodular maximization "
"that has good worst-case theoretical guarantees."))
##########
# Handling missing data
base_subparser.add_argument('--missing-thres', nargs=3,
type=float, default=[0.5, 0.05, 1.5],
help=("<A> <B> <C>; parameters governing the threshold on which sites "
"to ignore due to too much missing data. The 3 values specify "
"not to attempt to design guides overlapping sites where the "
"fraction of sequences with missing data is > min(A, max(B, C*m)) "
"where m is the median fraction of sequences with missing data "
"over the alignment. Set a=1 and b=1 to not ignore sites due "
"to missing data."))
# Differential identification
base_subparser.add_argument('--id-m', dest="diff_id_mismatches",
type=int, default=4,
help=("Allow for this number of mismatches when determining whether "
"a guide 'hits' a sequence in a group/taxon other than the "
"for which it is being designed; higher values correspond to more "
"specificity."))
base_subparser.add_argument('--id-frac', dest="diff_id_frac",
type=float, default=0.01,
help=("Decide that a guide 'hits' a group/taxon if it 'hits' a "
"fraction of sequences in that group/taxon that exceeds this "
"value; lower values correspond to more specificity."))
base_subparser.add_argument('--id-method', dest="diff_id_method",
choices=["lshnn", "shard"], default="shard",
help=("Choice of method to query for specificity. 'lshnn' for "
"LSH near-neighbor approach. 'shard' for approach that "
"shards k-mers across small tries."))
base_subparser.add_argument('--specific-against-fastas', nargs='+',
default=[],
help=("Path to one or more FASTA files giving sequences, | |
<reponame>cla7aye15I4nd/awesome-mcu<filename>utils/stm32parser/bes2300.py
import ctypes
class BES2300Spi:
class Type(ctypes.Structure):
_fields_ = [
("SSPCR0" , ctypes.c_uint32), # 0x00000000
("SSPCR1" , ctypes.c_uint32), # 0x00000004
("SSPDR" , ctypes.c_uint32), # 0x00000008
("SSPSR" , ctypes.c_uint32), # 0x0000000C
("SSPCPSR" , ctypes.c_uint32), # 0x00000010
("SSPIMSC" , ctypes.c_uint32), # 0x00000014
("SSPRIS" , ctypes.c_uint32), # 0x00000018
("SSPMIS" , ctypes.c_uint32), # 0x0000001C
("SSPICR" , ctypes.c_uint32), # 0x00000020
("SSPDMACR", ctypes.c_uint32), # 0x00000024
("reserved", ctypes.c_uint32 * 0x18), # 0x00000028
("SSPRXCR" , ctypes.c_uint32), # 0x00000088
]
class BES2300Dma:
class Type(ctypes.Structure):
_fields_ = [
("SRCADDR" , ctypes.c_uint32), # 0x100+N*0x20 DMA Channel Source Address Register
("DSTADDR" , ctypes.c_uint32), # 0x104+N*0x20 DMA Channel Destination Address Register
("LLI" , ctypes.c_uint32), # 0x108+N*0x20 DMA Channel Linked List Item Register
("CONTROL" , ctypes.c_uint32), # 0x10C+N*0x20 DMA Channel Control Register
("CONFIG" , ctypes.c_uint32), # 0x110+N*0x20 DMA Channel Configuration Register
("RESERVED1", ctypes.c_uint32 * 3), # 0x114+N*0x20
]
class BES2300Transq:
class Type(ctypes.Structure):
_fields_ = [
("CTRL" , ctypes.c_uint32), # 0x000
("RMT_INTMASK" , ctypes.c_uint32), # 0x004
("RMT_INTSET" , ctypes.c_uint32), # 0x008
("LDONE_INTMASK", ctypes.c_uint32), # 0x00C
]
class BES2300Rtc:
class Type(ctypes.Structure):
_fields_ = [
("RTCDR" , ctypes.c_uint32), # 0x000
("RTCMR" , ctypes.c_uint32), # 0x004
("RTCLR" , ctypes.c_uint32), # 0x008
("RTCCR" , ctypes.c_uint32), # 0x00C
("RTCIMSC", ctypes.c_uint32), # 0x010
("RTCRIS" , ctypes.c_uint32), # 0x014
("RTCMIS" , ctypes.c_uint32), # 0x018
("RTCICR" , ctypes.c_uint32), # 0x01C
]
class BES2300Norflash:
class Type(ctypes.Structure):
_fields_ = [
("REG_000", ctypes.c_uint32), #
("REG_004", ctypes.c_uint32), #
]
class BES2300Pwm:
class Type(ctypes.Structure):
_fields_ = [
("ID" , ctypes.c_uint32), # 0x000
("EN" , ctypes.c_uint32), # 0x004
("INV" , ctypes.c_uint32), # 0x008
("PHASE01" , ctypes.c_uint32), # 0x00C
("PHASE23" , ctypes.c_uint32), # 0x010
("LOAD01" , ctypes.c_uint32), # 0x014
("LOAD23" , ctypes.c_uint32), # 0x018
("TOGGLE01", ctypes.c_uint32), # 0x01C
("TOGGLE23", ctypes.c_uint32), # 0x020
("PHASEMOD", ctypes.c_uint32), # 0x024
]
class BES2300Uart:
class Type(ctypes.Structure):
_fields_ = [
("UARTDR", ctypes.c_uint32), # 0x000
]
class BES2300Aoncmu:
class Type(ctypes.Structure):
_fields_ = [
("CHIP_ID" , ctypes.c_uint32), # 0x00
("TOP_CLK_ENABLE" , ctypes.c_uint32), # 0x04
("TOP_CLK_DISABLE", ctypes.c_uint32), # 0x08
("RESET_PULSE" , ctypes.c_uint32), # 0x0C
("RESET_SET" , ctypes.c_uint32), # 0x10
("RESET_CLR" , ctypes.c_uint32), # 0x14
("CLK_SELECT" , ctypes.c_uint32), # 0x18
("CLK_OUT" , ctypes.c_uint32), # 0x1C
("WRITE_UNLOCK" , ctypes.c_uint32), # 0x20
("MEMSC" , ctypes.c_uint32 * 4), # 0x24
("MEMSC_STATUS" , ctypes.c_uint32), # 0x34
("BOOTMODE" , ctypes.c_uint32), # 0x38
("RESERVED_03C" , ctypes.c_uint32), # 0x3C
("MOD_CLK_ENABLE" , ctypes.c_uint32), # 0x40
("MOD_CLK_DISABLE", ctypes.c_uint32), # 0x44
("MOD_CLK_MODE" , ctypes.c_uint32), # 0x48
("CODEC_DIV" , ctypes.c_uint32), # 0x4C
("TIMER_CLK" , ctypes.c_uint32), # 0x50
("PWM01_CLK" , ctypes.c_uint32), # 0x54
("PWM23_CLK" , ctypes.c_uint32), # 0x58
("RAM_CFG" , ctypes.c_uint32), # 0x5C
("RESERVED_060" , ctypes.c_uint32), # 0x60
("PCM_I2S_CLK" , ctypes.c_uint32), # 0x64
("SPDIF_CLK" , ctypes.c_uint32), # 0x68
("SLEEP_TIMER_OSC", ctypes.c_uint32), # 0x6C
("SLEEP_TIMER_32K", ctypes.c_uint32), # 0x70
("STORE_GPIO_MASK", ctypes.c_uint32), # 0x74
("CODEC_IIR" , ctypes.c_uint32), # 0x78
("RESERVED_07C" , ctypes.c_uint32 * 0x1D), # 0x7C
("WAKEUP_PC" , ctypes.c_uint32), # 0xF0
("DEBUG_RES" , ctypes.c_uint32 * 2), # 0xF4
("CHIP_FEATURE" , ctypes.c_uint32), # 0xFC
]
class BES2300Codec:
class Type(ctypes.Structure):
_fields_ = [
("REG_000", ctypes.c_uint32), #
("REG_004", ctypes.c_uint32), #
("REG_008", ctypes.c_uint32), #
("REG_00C", ctypes.c_uint32), #
("REG_010", ctypes.c_uint32), #
("REG_014", ctypes.c_uint32), #
("REG_018", ctypes.c_uint32), #
("REG_01C", ctypes.c_uint32), #
("REG_020", ctypes.c_uint32), #
("REG_024", ctypes.c_uint32), #
("REG_028", ctypes.c_uint32), #
("REG_02C", ctypes.c_uint32), #
("REG_030", ctypes.c_uint32), #
("REG_034", ctypes.c_uint32), #
("REG_038", ctypes.c_uint32), #
("REG_03C", ctypes.c_uint32), #
("REG_040", ctypes.c_uint32), #
("REG_044", ctypes.c_uint32), #
("REG_048", ctypes.c_uint32), #
("REG_04C", ctypes.c_uint32), #
("REG_050", ctypes.c_uint32), #
("REG_054", ctypes.c_uint32), #
("REG_058", ctypes.c_uint32), #
("REG_05C", ctypes.c_uint32), #
("REG_060", ctypes.c_uint32), #
("REG_064", ctypes.c_uint32), #
("REG_068", ctypes.c_uint32), #
("REG_06C", ctypes.c_uint32), #
("REG_070", ctypes.c_uint32), #
("REG_074", ctypes.c_uint32), #
("REG_078", ctypes.c_uint32), #
("REG_07C", ctypes.c_uint32), #
("REG_080", ctypes.c_uint32), #
("REG_084", ctypes.c_uint32), #
("REG_088", ctypes.c_uint32), #
("REG_08C", ctypes.c_uint32), #
("REG_090", ctypes.c_uint32), #
("REG_094", ctypes.c_uint32), #
("REG_098", ctypes.c_uint32), #
("REG_09C", ctypes.c_uint32), #
("REG_0A0", ctypes.c_uint32), #
("REG_0A4", ctypes.c_uint32), #
("REG_0A8", ctypes.c_uint32), #
("REG_0AC", ctypes.c_uint32), #
("REG_0B0", ctypes.c_uint32), #
("REG_0B4", ctypes.c_uint32), #
("REG_0B8", ctypes.c_uint32), #
("REG_0BC", ctypes.c_uint32), #
("REG_0C0", ctypes.c_uint32), #
("REG_0C4", ctypes.c_uint32), #
("REG_0C8", ctypes.c_uint32), #
("REG_0CC", ctypes.c_uint32), #
("REG_0D0", ctypes.c_uint32), #
("REG_0D4", ctypes.c_uint32), #
("REG_0D8", ctypes.c_uint32), #
("REG_0DC", ctypes.c_uint32), #
("REG_0E0", ctypes.c_uint32), #
("REG_0E4", ctypes.c_uint32), #
("REG_0E8", ctypes.c_uint32), #
("REG_0EC", ctypes.c_uint32), #
("REG_0F0", ctypes.c_uint32), #
("REG_0F4", ctypes.c_uint32), #
("REG_0F8", ctypes.c_uint32), #
("REG_0FC", ctypes.c_uint32), #
("REG_100", ctypes.c_uint32), #
("REG_104", ctypes.c_uint32), #
("REG_108", ctypes.c_uint32), #
("REG_10C", ctypes.c_uint32), #
("REG_110", ctypes.c_uint32), #
("REG_114", ctypes.c_uint32), #
("REG_118", ctypes.c_uint32), #
("REG_11C", ctypes.c_uint32), #
("REG_120", ctypes.c_uint32), #
("REG_124", ctypes.c_uint32), #
("REG_128", ctypes.c_uint32), #
("REG_12C", ctypes.c_uint32), #
("REG_130", ctypes.c_uint32), #
("REG_134", ctypes.c_uint32), #
("REG_138", ctypes.c_uint32), #
("REG_13C", ctypes.c_uint32), #
("REG_140", ctypes.c_uint32), #
("REG_144", ctypes.c_uint32), #
("REG_148", ctypes.c_uint32), #
("REG_14C", ctypes.c_uint32), #
("REG_150", ctypes.c_uint32), #
("REG_154", ctypes.c_uint32), #
("REG_158", ctypes.c_uint32), #
("REG_15C", ctypes.c_uint32), #
("REG_160", ctypes.c_uint32), #
("REG_164", ctypes.c_uint32), #
("REG_168", ctypes.c_uint32), #
("REG_16C", ctypes.c_uint32), #
("REG_170", ctypes.c_uint32), #
("REG_174", ctypes.c_uint32), #
("REG_178", ctypes.c_uint32), #
("REG_17C", ctypes.c_uint32), #
("REG_180", ctypes.c_uint32), #
("REG_184", ctypes.c_uint32), #
("REG_188", ctypes.c_uint32), #
("REG_18C", ctypes.c_uint32), #
("REG_190", ctypes.c_uint32), #
("REG_194", ctypes.c_uint32), #
("REG_198", ctypes.c_uint32), #
("REG_19C", ctypes.c_uint32), #
("REG_1A0", ctypes.c_uint32), #
("REG_1A4", ctypes.c_uint32), #
("REG_1A8", ctypes.c_uint32), #
("REG_1AC", ctypes.c_uint32), #
("REG_1B0", ctypes.c_uint32), #
("REG_1B4", ctypes.c_uint32), #
("REG_1B8", ctypes.c_uint32), #
("REG_1BC", ctypes.c_uint32), #
("REG_1C0", ctypes.c_uint32), #
("REG_1C4", ctypes.c_uint32), #
("REG_1C8", ctypes.c_uint32), #
("REG_1CC", ctypes.c_uint32), #
("REG_1D0", ctypes.c_uint32), #
("REG_1D4", ctypes.c_uint32), #
("REG_1D8", ctypes.c_uint32), #
("REG_1DC", ctypes.c_uint32), #
("REG_1E0", ctypes.c_uint32), #
("REG_1E4", ctypes.c_uint32), #
("REG_1E8", ctypes.c_uint32), #
("REG_1EC", ctypes.c_uint32), #
("REG_1F0", ctypes.c_uint32), #
("REG_1F4", ctypes.c_uint32), #
("REG_1F8", ctypes.c_uint32), #
("REG_1FC", ctypes.c_uint32), #
("REG_200", ctypes.c_uint32), #
("REG_204", ctypes.c_uint32), #
("REG_208", ctypes.c_uint32), #
("REG_20C", ctypes.c_uint32), #
("REG_210", ctypes.c_uint32), #
("REG_214", ctypes.c_uint32), #
("REG_218", ctypes.c_uint32), #
("REG_21C", ctypes.c_uint32), #
("REG_220", ctypes.c_uint32), #
("REG_224", ctypes.c_uint32), #
]
class BES2300Btcmu:
class Type(ctypes.Structure):
_fields_ = [
("CLK_ENABLE" , ctypes.c_uint32), # 0x00
("CLK_DISABLE" , ctypes.c_uint32), # 0x04
("CLK_MODE" , ctypes.c_uint32), # 0x08
("DIV_TIMER" , ctypes.c_uint32), # 0x0C
("RESET_SET" , ctypes.c_uint32), # 0x10
("RESET_CLR" , ctypes.c_uint32), # 0x14
("DIV_WDT" , ctypes.c_uint32), # 0x18
("RESET_PULSE" , ctypes.c_uint32), # 0x1C
("RESERVED_020", ctypes.c_uint32 * 0x24), # 0x20
("CLK_OUT" , ctypes.c_uint32), # 0x44
("RESERVED_048", ctypes.c_uint32 * 2), # 0x48
("ISIRQ_SET" , ctypes.c_uint32), # 0x50
("ISIRQ_CLR" , ctypes.c_uint32), # 0x54
]
class BES2300Iomux:
class Type(ctypes.Structure):
_fields_ = [
("REG_000", ctypes.c_uint32), #
("REG_004", ctypes.c_uint32), #
("REG_008", ctypes.c_uint32), #
("REG_00C", ctypes.c_uint32), #
("REG_010", ctypes.c_uint32), #
("REG_014", ctypes.c_uint32), #
("REG_018", ctypes.c_uint32), #
("REG_01C", ctypes.c_uint32), #
("REG_020", ctypes.c_uint32), #
("REG_024", ctypes.c_uint32), #
("REG_028", ctypes.c_uint32), #
("REG_02C", ctypes.c_uint32), #
("REG_030", ctypes.c_uint32), #
("REG_034", ctypes.c_uint32), #
("REG_038", ctypes.c_uint32), #
("REG_03C", ctypes.c_uint32), #
("REG_040", ctypes.c_uint32), #
("REG_044", ctypes.c_uint32), #
("REG_048", ctypes.c_uint32), #
("REG_04C", ctypes.c_uint32), #
("REG_050", ctypes.c_uint32), #
("REG_054", ctypes.c_uint32), #
("REG_058", ctypes.c_uint32), #
("REG_05C", ctypes.c_uint32), #
("REG_060", ctypes.c_uint32), #
("REG_064", ctypes.c_uint32), #
("REG_068", ctypes.c_uint32), #
("REG_06C", ctypes.c_uint32), #
("REG_070", ctypes.c_uint32), #
("REG_074", ctypes.c_uint32), #
("REG_078", ctypes.c_uint32), #
]
class BES2300Psc:
class Type(ctypes.Structure):
_fields_ = [
("REG_000", ctypes.c_uint32), #
("REG_004", ctypes.c_uint32), #
("REG_008", ctypes.c_uint32), #
("REG_00C", ctypes.c_uint32), #
("REG_010", ctypes.c_uint32), #
("REG_014", ctypes.c_uint32), #
("REG_018", ctypes.c_uint32), #
("REG_01C", ctypes.c_uint32), #
("REG_020", ctypes.c_uint32), #
("REG_024", ctypes.c_uint32), #
("REG_028", ctypes.c_uint32), #
("REG_02C", ctypes.c_uint32), #
("REG_030", ctypes.c_uint32), #
("REG_034", ctypes.c_uint32), #
("REG_038", ctypes.c_uint32), # | |
*restrict pathname
a1 = self.get_arg_val(iaddr, simstate, "a1") # const char *restrict mode
a0str = self.get_arg_string(iaddr, simstate, "a0")
a1str = self.get_arg_string(iaddr, simstate, "a1")
pargs = (
",".join(str(a) + ':' + str(s)
for (a, s) in [(a0, a0str), (a1, a1str)]))
if simstate.simsupport.file_operations_enabled:
if SFU.sim_file_exists(a0str) or a1str.startswith("w"):
fp = SFU.sim_openfile(a0str, a1str)
simstate.set_register(iaddr, "v0", fp)
return self.add_logmsg(
iaddr, simstate, pargs, returnval=str(fp))
else:
return self.simulate_failure(iaddr, simstate, pargs, "file not found")
else:
return self.simulate_failure(iaddr, simstate, pargs)
class MIPStub_fopen64(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'fopen64')
def is_io_operation(self) -> bool:
return True
def simulate_success(
self,
iaddr: str,
simstate: "SimulationState",
pargs: str,
filename: str,
filepointer: IO[Any],
comment: str = "") -> str:
returnval = SSV.mk_filepointer(filename, filepointer)
simstate.set_register(iaddr, 'v0', returnval)
return self.add_logmsg(iaddr, simstate, pargs, returnval=str(returnval))
def simulate_failure(
self,
iaddr: str,
simstate: "SimulationState",
pargs: str,
comment: str = "") -> str:
returnval = SV.simZero
simstate.set_register(iaddr, 'v0', returnval)
return self.add_logmsg(iaddr, simstate, pargs, returnval=str(returnval))
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
"""Logs i/o; returns 0 in v0."""
a0 = self.get_arg_val(iaddr, simstate, 'a0')
a1 = self.get_arg_val(iaddr, simstate, 'a1')
a0str = self.get_arg_string(iaddr, simstate, 'a0')
a1str = self.get_arg_string(iaddr, simstate, 'a1')
pargs = (
','.join(str(a) + ':' + str(s)
for (a, s) in [(a0, a0str), (a1, a1str)]))
if a0str == '/dev/console':
fpconsole = open('devconsole', 'w')
return self.simulate_success(
iaddr,
simstate,
pargs,
'devconsole',
fpconsole,
comment='assume access to /dev/console is always enabled')
else:
return self.simulate_failure(iaddr, simstate, pargs)
class MIPStub_freeaddrinfo(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'freeaddrinfo')
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, 'a0')
simstate.set_register(iaddr, 'v0', SV.simZero)
return self.add_logmsg(iaddr, simstate, str(a0))
class MIPStub_fscanf(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'fscanf')
def is_io_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, 'a0')
a1 = self.get_arg_val(iaddr, simstate, 'a1')
a1str = self.get_arg_string(iaddr, simstate, 'a1')
pargs = str(a0) + ',' + str(a1) + ':' + a1str
result = -1
simstate.set_register(iaddr, 'v0', SV.mk_simvalue(result))
return self.add_logmsg(iaddr, simstate, pargs, returnval=str(result))
class MIPStub_fstat(MIPSimStub):
"""Partially fills in the file information in the provided buffer.
Currently only the file size is set at offset 52.
From MIPS ABI supplement:
struct stat {
dev_t st_dev; 0
long st_pad1[3]; 4
ino_t st_ino; 16
mode_t st_mode;
nlink_t st_nlink;
uid_t st_uid; 32
gid_t st_gid; 36
dev_t st_rdev; 40
long st_pad2[2]; 44
off_t st_size; 52
long st_pad3; 56
timestruc_t st_atim;
timestruc_t st_mtim;
timestruc_t st_ctim;
long st_blksize;
long st_blocks;
char st_fstype[_ST_FSTYPSZ];
long st_pad4[8];
}
"""
def __init__(self) -> None:
MIPSimStub.__init__(self, "fstat")
def is_io_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, "a0") # int fildes
a1 = self.get_arg_val(iaddr, simstate, "a1") # struct stat *buf
pargs = str(a0) + ", " + str(a1)
if a0.is_undefined or a1.is_undefined:
raise SU.CHBSimError(
simstate,
iaddr,
"fstat: some arguments are undefined: " + pargs)
if a1.is_address:
buf = cast(SSV.SimAddress, a1)
elif a1.is_literal:
buf = simstate.resolve_literal_address(iaddr, a1.literal_value)
if buf.is_undefined:
raise SU.CHBSimError(
simstate,
iaddr,
"fstat: address of buf cannot be resolved: " + str(a1))
pargs = str(a0) + ", " + str(buf)
if a0.is_file_descriptor:
a0 = cast(SSV.SimSymbolicFileDescriptor, a0)
fd = a0.filedescriptor
fdstat = os.stat(a0.filename)
simstate.set_memval(
iaddr, buf.add_offset(52), SV.mk_simvalue(fdstat.st_size))
result = 0
else:
result = -1
simstate.set_register(iaddr, "v0", SV.mk_simvalue(result))
return self.add_logmsg(iaddr, simstate, pargs, returnval=str(result))
class MIPStub_fstat64(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'fstat64')
def is_io_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, 'a0')
a1 = self.get_arg_val(iaddr, simstate, 'a1')
pargs = str(a0) + ',' + str(a1)
simstate.set_register(iaddr, 'v0', SV.simZero)
return self.add_logmsg(iaddr, simstate, pargs)
class MIPStub_fwrite(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'fwrite')
def is_io_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
"""Logs i/o, returns 1 in v0 for now."""
a0 = self.get_arg_val(iaddr, simstate, 'a0')
a1 = self.get_arg_val(iaddr, simstate, 'a1')
a2 = self.get_arg_val(iaddr, simstate, 'a2')
a3 = self.get_arg_val(iaddr, simstate, 'a3')
simstate.set_register(iaddr, 'v0', SV.simOne)
pargs = ','.join(str(a) for a in [a0, a1, a2, a3])
return self.add_logmsg(iaddr, simstate, pargs, returnval='1')
class MIPStub_fread(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'fread')
def is_io_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, "a0") # void *restrict ptr
a1 = self.get_arg_val(iaddr, simstate, "a1") # size_t size
a2 = self.get_arg_val(iaddr, simstate, "a2") # size_t nitems
a3 = self.get_arg_val(iaddr, simstate, "a3") # FILE *restrict stream
pargs = ','.join(str(a) for a in [a0, a1, a2, a3])
if (
a0.is_undefined
or a1.is_undefined
or a2.is_undefined
or a3.is_undefined):
raise SU.CHBSimError(
simstate,
iaddr,
"some arguments to fread are undefined")
if a0.is_address:
ptr = cast(SSV.SimAddress, a0)
elif a0.is_literal:
ptr = simstate.resolve_literal_address(iaddr, a0.literal_value)
if ptr.is_undefined:
raise SU.CHBSimError(
simstate,
iaddr,
"fread: dstaddr: " + str(a0) + " cannot be resolved")
if a3.is_file_pointer:
fp = cast(SSV.SimSymbolicFilePointer, a3).fp
else:
raise SU.CHBSimError(
simstate,
iaddr,
"fread: stream: " + str(a3) + " is not a file pointer")
if a1.is_literal:
size = a1.literal_value
else:
raise SU.CHBSimError(
simstate,
iaddr,
"fread: size: " + str(a1) + " is not a literal value")
if a2.is_literal:
nitems = a2.literal_value
else:
raise SU.CHBSimError(
simstate,
iaddr,
"fread: nitems: " + str(a2) + " is not a literal value")
for i in range(0, size * nitems):
c = fp.read(1)
simstate.set_memval(iaddr, ptr.add_offset(i), SV.mk_simcharvalue(c))
returnval = size * nitems
simstate.set_register(iaddr, "v0", SV.mk_simvalue(returnval))
return self.add_logmsg(iaddr, simstate, pargs, returnval=str(returnval))
class MIPStub_free(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'free')
def is_memalloc_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, 'a0')
return self.add_logmsg(iaddr, simstate, str(a0))
class MIPStub_fork(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'fork')
def is_process_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
if iaddr in simstate.simsupport.forkchoices:
result = simstate.simsupport.forkchoices[iaddr]
else:
result = 0
simresult = SV.mk_simvalue(result)
simstate.set_register(iaddr, 'v0', simresult)
return self.add_logmsg(iaddr, simstate, '', returnval=str(result))
class MIPStub_fprintf(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, "fprintf")
def is_io_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, "a0") # FILE *restrict stream
a1 = self.get_arg_val(iaddr, simstate, "a1") # const char *restrict format
a1str = self.get_arg_string(iaddr, simstate, "a1")
if simstate.simsupport.file_operations_enabled:
if a0.is_file_pointer:
a0 = cast(SSV.SimSymbolicFilePointer, a0)
a0.fp.write(a1str)
returnval = len(a1str)
else:
returnval = -1
else:
returnval = -1
simstate.set_register(iaddr, "v0", SV.mk_simvalue(returnval))
pargs = str(a0) + ',' + str(a1) + ':' + a1str
return self.add_logmsg(iaddr, simstate, pargs, returnval=str(returnval))
class MIPStub_getaddrinfo(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'getaddrinfo')
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
a0 = self.get_arg_val(iaddr, simstate, 'a0')
# a0str = self.get_arg_string(iaddr, simstate,'a0')
a1 = self.get_arg_val(iaddr, simstate, 'a1')
a1str = self.get_arg_string(iaddr, simstate, 'a1')
a2 = self.get_arg_val(iaddr, simstate, 'a2')
a3 = self.get_arg_val(iaddr, simstate, 'a3')
simstate.set_register(iaddr, 'v0', SV.simZero)
if a3.is_address:
a3 = cast(SSV.SimAddress, a3)
simstate.set_memval(iaddr, a3, a2)
else:
simstate.add_logmsg(iaddr, "Not able to set side effect of getaddrinfo")
pargs = (
str(a0) + ',' + str(a1) + ':' + a1str + ',' + str(a2) + ',' + str(a3))
return self.add_logmsg(iaddr, simstate, pargs)
class MIPStub_get_current_dir_name(MIPSimStub):
"""Allocates memory on the heap to hold the absolute path name.
Doc: https://man7.org/linux/man-pages/man3/getcwd.3.html
get_current_dir_name() will malloc(3) an array big enough to hold
the absolute pathname of the current working directory. If the
environment variable PWD is set, and its value is correct, then
that value will be returned. The caller should free(3) the
returned buffer.
"""
def __init__(self) -> None:
MIPSimStub.__init__(self, "get_current_dir_name")
def is_memalloc_operation(self) -> bool:
return True
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
cwd = simstate.simsupport.cwd()
a0 = self.get_arg_val(iaddr, simstate, "a0")
a1 = self.get_arg_val(iaddr, simstate, "a1")
pargs = str(a0) + ", " + str(a1)
if a0.is_literal and a0.is_defined and a1.is_literal and a1.is_defined:
a0 = cast(SV.SimLiteralValue, a0)
a1 = cast(SV.SimLiteralValue, a1)
if a0.value == 0 and a1.value == 0:
base = "get_current_dir_name_" + iaddr
buffersize = len(cwd) + 1
address = SSV.mk_base_address(base, 0, buffersize=buffersize)
for i in range(0, buffersize - 1):
tgtaddr = address.add_offset(i)
simstate.set_memval(iaddr, tgtaddr, SV.mk_simcharvalue(cwd[i]))
simstate.set_memval(iaddr, address.add_offset(len(cwd)), SV.simZerobyte)
simstate.set_register(iaddr, "v0", address)
returnval: str = str(address)
else:
simstate.set_register(iaddr, "v0", SV.simZero)
returnval = "0"
else:
simstate.set_register(iaddr, "v0", SV.simZero)
returnval = "0"
return self.add_logmsg(iaddr, simstate, pargs, returnval=returnval)
class MIPStub_getcwd(MIPSimStub):
def __init__(self) -> None:
MIPSimStub.__init__(self, 'getcwd')
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
cwd = simstate.simsupport.cwd()
a0 = self.get_arg_val(iaddr, simstate, 'a0')
a1 = self.get_arg_val(iaddr, simstate, 'a1')
if a0.is_address:
a0 | |
group_name
self.is_dirty = True
db_group_name = property(__get_db_group_name, __set_db_group_name)
def db_add_group_name(self, group_name):
self._db_group_name = group_name
def db_change_group_name(self, group_name):
self._db_group_name = group_name
def db_delete_group_name(self, group_name):
self._db_group_name = None
def __get_db_group_type(self):
return self._db_group_type
def __set_db_group_type(self, group_type):
self._db_group_type = group_type
self.is_dirty = True
db_group_type = property(__get_db_group_type, __set_db_group_type)
def db_add_group_type(self, group_type):
self._db_group_type = group_type
def db_change_group_type(self, group_type):
self._db_group_type = group_type
def db_delete_group_type(self, group_type):
self._db_group_type = None
def __get_db_completed(self):
return self._db_completed
def __set_db_completed(self, completed):
self._db_completed = completed
self.is_dirty = True
db_completed = property(__get_db_completed, __set_db_completed)
def db_add_completed(self, completed):
self._db_completed = completed
def db_change_completed(self, completed):
self._db_completed = completed
def db_delete_completed(self, completed):
self._db_completed = None
def __get_db_error(self):
return self._db_error
def __set_db_error(self, error):
self._db_error = error
self.is_dirty = True
db_error = property(__get_db_error, __set_db_error)
def db_add_error(self, error):
self._db_error = error
def db_change_error(self, error):
self._db_error = error
def db_delete_error(self, error):
self._db_error = None
def __get_db_machine_id(self):
return self._db_machine_id
def __set_db_machine_id(self, machine_id):
self._db_machine_id = machine_id
self.is_dirty = True
db_machine_id = property(__get_db_machine_id, __set_db_machine_id)
def db_add_machine_id(self, machine_id):
self._db_machine_id = machine_id
def db_change_machine_id(self, machine_id):
self._db_machine_id = machine_id
def db_delete_machine_id(self, machine_id):
self._db_machine_id = None
def __get_db_annotations(self):
return self._db_annotations
def __set_db_annotations(self, annotations):
self._db_annotations = annotations
self.is_dirty = True
db_annotations = property(__get_db_annotations, __set_db_annotations)
def db_get_annotations(self):
return self._db_annotations
def db_add_annotation(self, annotation):
self.is_dirty = True
self._db_annotations.append(annotation)
self.db_annotations_id_index[annotation.db_id] = annotation
def db_change_annotation(self, annotation):
self.is_dirty = True
found = False
for i in xrange(len(self._db_annotations)):
if self._db_annotations[i].db_id == annotation.db_id:
self._db_annotations[i] = annotation
found = True
break
if not found:
self._db_annotations.append(annotation)
self.db_annotations_id_index[annotation.db_id] = annotation
def db_delete_annotation(self, annotation):
self.is_dirty = True
for i in xrange(len(self._db_annotations)):
if self._db_annotations[i].db_id == annotation.db_id:
if not self._db_annotations[i].is_new:
self.db_deleted_annotations.append(self._db_annotations[i])
del self._db_annotations[i]
break
del self.db_annotations_id_index[annotation.db_id]
def db_get_annotation(self, key):
for i in xrange(len(self._db_annotations)):
if self._db_annotations[i].db_id == key:
return self._db_annotations[i]
return None
def db_get_annotation_by_id(self, key):
return self.db_annotations_id_index[key]
def db_has_annotation_with_id(self, key):
return key in self.db_annotations_id_index
def __get_db_loop_execs(self):
return self._db_loop_execs
def __set_db_loop_execs(self, loop_execs):
self._db_loop_execs = loop_execs
self.is_dirty = True
db_loop_execs = property(__get_db_loop_execs, __set_db_loop_execs)
def db_get_loop_execs(self):
return self._db_loop_execs
def db_add_loop_exec(self, loop_exec):
self.is_dirty = True
self._db_loop_execs.append(loop_exec)
self.db_loop_execs_id_index[loop_exec.db_id] = loop_exec
def db_change_loop_exec(self, loop_exec):
self.is_dirty = True
found = False
for i in xrange(len(self._db_loop_execs)):
if self._db_loop_execs[i].db_id == loop_exec.db_id:
self._db_loop_execs[i] = loop_exec
found = True
break
if not found:
self._db_loop_execs.append(loop_exec)
self.db_loop_execs_id_index[loop_exec.db_id] = loop_exec
def db_delete_loop_exec(self, loop_exec):
self.is_dirty = True
for i in xrange(len(self._db_loop_execs)):
if self._db_loop_execs[i].db_id == loop_exec.db_id:
if not self._db_loop_execs[i].is_new:
self.db_deleted_loop_execs.append(self._db_loop_execs[i])
del self._db_loop_execs[i]
break
del self.db_loop_execs_id_index[loop_exec.db_id]
def db_get_loop_exec(self, key):
for i in xrange(len(self._db_loop_execs)):
if self._db_loop_execs[i].db_id == key:
return self._db_loop_execs[i]
return None
def db_get_loop_exec_by_id(self, key):
return self.db_loop_execs_id_index[key]
def db_has_loop_exec_with_id(self, key):
return key in self.db_loop_execs_id_index
def __get_db_module_execs(self):
return self._db_module_execs
def __set_db_module_execs(self, module_execs):
self._db_module_execs = module_execs
self.is_dirty = True
db_module_execs = property(__get_db_module_execs, __set_db_module_execs)
def db_get_module_execs(self):
return self._db_module_execs
def db_add_module_exec(self, module_exec):
self.is_dirty = True
self._db_module_execs.append(module_exec)
self.db_module_execs_id_index[module_exec.db_id] = module_exec
def db_change_module_exec(self, module_exec):
self.is_dirty = True
found = False
for i in xrange(len(self._db_module_execs)):
if self._db_module_execs[i].db_id == module_exec.db_id:
self._db_module_execs[i] = module_exec
found = True
break
if not found:
self._db_module_execs.append(module_exec)
self.db_module_execs_id_index[module_exec.db_id] = module_exec
def db_delete_module_exec(self, module_exec):
self.is_dirty = True
for i in xrange(len(self._db_module_execs)):
if self._db_module_execs[i].db_id == module_exec.db_id:
if not self._db_module_execs[i].is_new:
self.db_deleted_module_execs.append(self._db_module_execs[i])
del self._db_module_execs[i]
break
del self.db_module_execs_id_index[module_exec.db_id]
def db_get_module_exec(self, key):
for i in xrange(len(self._db_module_execs)):
if self._db_module_execs[i].db_id == key:
return self._db_module_execs[i]
return None
def db_get_module_exec_by_id(self, key):
return self.db_module_execs_id_index[key]
def db_has_module_exec_with_id(self, key):
return key in self.db_module_execs_id_index
def __get_db_group_execs(self):
return self._db_group_execs
def __set_db_group_execs(self, group_execs):
self._db_group_execs = group_execs
self.is_dirty = True
db_group_execs = property(__get_db_group_execs, __set_db_group_execs)
def db_get_group_execs(self):
return self._db_group_execs
def db_add_group_exec(self, group_exec):
self.is_dirty = True
self._db_group_execs.append(group_exec)
self.db_group_execs_id_index[group_exec.db_id] = group_exec
def db_change_group_exec(self, group_exec):
self.is_dirty = True
found = False
for i in xrange(len(self._db_group_execs)):
if self._db_group_execs[i].db_id == group_exec.db_id:
self._db_group_execs[i] = group_exec
found = True
break
if not found:
self._db_group_execs.append(group_exec)
self.db_group_execs_id_index[group_exec.db_id] = group_exec
def db_delete_group_exec(self, group_exec):
self.is_dirty = True
for i in xrange(len(self._db_group_execs)):
if self._db_group_execs[i].db_id == group_exec.db_id:
if not self._db_group_execs[i].is_new:
self.db_deleted_group_execs.append(self._db_group_execs[i])
del self._db_group_execs[i]
break
del self.db_group_execs_id_index[group_exec.db_id]
def db_get_group_exec(self, key):
for i in xrange(len(self._db_group_execs)):
if self._db_group_execs[i].db_id == key:
return self._db_group_execs[i]
return None
def db_get_group_exec_by_id(self, key):
return self.db_group_execs_id_index[key]
def db_has_group_exec_with_id(self, key):
return key in self.db_group_execs_id_index
def getPrimaryKey(self):
return self._db_id
class DBPackage(object):
vtType = 'package'
def __init__(self, id=None, name=None, identifier=None, codepath=None, load_configuration=None, version=None, description=None, module_descriptors=None):
self._db_id = id
self._db_name = name
self._db_identifier = identifier
self._db_codepath = codepath
self._db_load_configuration = load_configuration
self._db_version = version
self._db_description = description
self.db_deleted_module_descriptors = []
self.db_module_descriptors_id_index = {}
self.db_module_descriptors_name_index = {}
if module_descriptors is None:
self._db_module_descriptors = []
else:
self._db_module_descriptors = module_descriptors
for v in self._db_module_descriptors:
self.db_module_descriptors_id_index[v.db_id] = v
self.db_module_descriptors_name_index[(v.db_name,v.db_namespace,v.db_version)] = v
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBPackage.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBPackage(id=self._db_id,
name=self._db_name,
identifier=self._db_identifier,
codepath=self._db_codepath,
load_configuration=self._db_load_configuration,
version=self._db_version,
description=self._db_description)
if self._db_module_descriptors is None:
cp._db_module_descriptors = []
else:
cp._db_module_descriptors = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_module_descriptors]
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
cp.db_module_descriptors_id_index = dict((v.db_id, v) for v in cp._db_module_descriptors)
cp.db_module_descriptors_name_index = dict(((v.db_name,v.db_namespace,v.db_version), v) for v in cp._db_module_descriptors)
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBPackage()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'name' in class_dict:
res = class_dict['name'](old_obj, trans_dict)
new_obj.db_name = res
elif hasattr(old_obj, 'db_name') and old_obj.db_name is not None:
new_obj.db_name = old_obj.db_name
if 'identifier' in class_dict:
res = class_dict['identifier'](old_obj, trans_dict)
new_obj.db_identifier = res
elif hasattr(old_obj, 'db_identifier') and old_obj.db_identifier is not None:
new_obj.db_identifier = old_obj.db_identifier
if 'codepath' in class_dict:
res = class_dict['codepath'](old_obj, trans_dict)
new_obj.db_codepath = res
elif hasattr(old_obj, 'db_codepath') and old_obj.db_codepath is not None:
new_obj.db_codepath = old_obj.db_codepath
if 'load_configuration' in class_dict:
res = class_dict['load_configuration'](old_obj, trans_dict)
new_obj.db_load_configuration = res
elif hasattr(old_obj, 'db_load_configuration') and old_obj.db_load_configuration is not None:
new_obj.db_load_configuration = old_obj.db_load_configuration
if 'version' in class_dict:
res = class_dict['version'](old_obj, trans_dict)
new_obj.db_version = res
elif hasattr(old_obj, 'db_version') and old_obj.db_version is not None:
new_obj.db_version = old_obj.db_version
if 'description' in class_dict:
res = class_dict['description'](old_obj, trans_dict)
new_obj.db_description = res
elif hasattr(old_obj, 'db_description') and old_obj.db_description is not None:
new_obj.db_description = old_obj.db_description
if 'module_descriptors' in class_dict:
res = class_dict['module_descriptors'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_module_descriptor(obj)
elif hasattr(old_obj, 'db_module_descriptors') and old_obj.db_module_descriptors is not None:
for obj in old_obj.db_module_descriptors:
new_obj.db_add_module_descriptor(DBModuleDescriptor.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_module_descriptors') and hasattr(new_obj, 'db_deleted_module_descriptors'):
for obj in old_obj.db_deleted_module_descriptors:
n_obj = DBModuleDescriptor.update_version(obj, trans_dict)
new_obj.db_deleted_module_descriptors.append(n_obj)
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
children = []
to_del = []
for child in self.db_module_descriptors:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_module_descriptor(child)
children.append((self, parent[0], parent[1]))
return children
def db_deleted_children(self, remove=False):
children = []
children.extend(self.db_deleted_module_descriptors)
if remove:
self.db_deleted_module_descriptors = []
return children
def has_changes(self):
if self.is_dirty:
return True
for child in self._db_module_descriptors:
if child.has_changes():
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_name(self):
return self._db_name
def __set_db_name(self, name):
self._db_name = name
self.is_dirty = True
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self._db_name = name
def db_change_name(self, name):
self._db_name = name
def db_delete_name(self, name):
self._db_name = None
def __get_db_identifier(self):
return self._db_identifier
def __set_db_identifier(self, identifier):
self._db_identifier = identifier
self.is_dirty = True
db_identifier = property(__get_db_identifier, __set_db_identifier)
def db_add_identifier(self, identifier):
self._db_identifier = identifier
def db_change_identifier(self, identifier):
self._db_identifier = identifier
def db_delete_identifier(self, identifier):
self._db_identifier = None
def __get_db_codepath(self):
return self._db_codepath
def __set_db_codepath(self, codepath):
self._db_codepath = codepath
self.is_dirty = True
db_codepath = property(__get_db_codepath, __set_db_codepath)
def db_add_codepath(self, codepath):
self._db_codepath = codepath
def db_change_codepath(self, codepath):
self._db_codepath = codepath
def db_delete_codepath(self, codepath):
self._db_codepath = None
def __get_db_load_configuration(self):
return self._db_load_configuration
def __set_db_load_configuration(self, load_configuration):
self._db_load_configuration = load_configuration
self.is_dirty = True
db_load_configuration = property(__get_db_load_configuration, __set_db_load_configuration)
def db_add_load_configuration(self, load_configuration):
self._db_load_configuration = load_configuration
def db_change_load_configuration(self, load_configuration):
self._db_load_configuration = load_configuration
def db_delete_load_configuration(self, load_configuration):
self._db_load_configuration = None
def __get_db_version(self):
return self._db_version
def __set_db_version(self, version):
self._db_version = version
self.is_dirty = True
db_version = property(__get_db_version, __set_db_version)
def db_add_version(self, version):
| |
of the tree (gray/black/buffer)
# Post-condition: returns the y-1 neighbor (P/G logic if already at the top)
def top(self,n):
if n is None or n.y==0:
return None
return self[n.x,n.y-1]
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: returns the next-highest non-invis neighbor
def r_top(self,n):
top = self.top(n)
return top if (node._exists(top) or top is None) else self.r_top(top)
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: returns the y+1 neighbor (stops before post-processing logic)
def bot(self,n):
if n is None or n.y>len(self.node_list)-2:
return None
return self[n.x,n.y+1]
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: returns the next-lowest non-invis neighbor
def r_bot(self,n):
bot = self.bot(n)
return bot if (node._exists(bot) or bot is None) else self.r_bot(bot)
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: returns the x-1 neighbor (stops before post-processing logic)
def right(self,n):
if n is None or n.x==0:
return None
return self[n.x-1,n.y]
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: returns the next-rightest non-invis + non-buffer neighbor
def r_right(self,n,c=[]):
right = self.right(n)
if (node._exists(right) and not node._isbuf(right)) or \
right is None or right in c:
return right
return self.r_right(right,c)
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: returns the diagonal predecessor (or top(n) if n is a buffer)
def pre(self,n):
# for x in self._possible_pres(n):
for x in self.node_list[n.y-1][:n.x]:
if n in self.post(x): return x
return None
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: goes up the chain of predecessors as far as it can
def r_pre(self,n):
pre = self.pre(n)
return n if pre is None else self.r_pre(pre)
# Pre-condition: n is a valid node in the main part of the tree (gray/black/buffer)
# Post-condition: returns the list of diagonal successors
def post(self,n):
return [a for a in self.adj[n] if a.x>n.x]
# Helper function that checks whether n2 is below n1
# Same column, higher row, or second node straight-up does not exist
def _is_below(self,n1,n2):
return (n2 is None) or (n1 is not None and n2.x==n1.x and n2.y>n1.y)
# Pre-condition: n is a valid node
# Post-condition: returns a list of all nodes it could connect to as a pre
def _possible_pres(self,n,c=[],d=[]):
# Figure out bounds so that wires don't cross illegaly
post = self.r_right(n,c)
if post in c:
bound = d[c.index(post)]
else:
bound = None if post is None else self.pre(post)
bound = 0 if bound is None else bound.x
# Return all possible nodes in that range
return self.node_list[n.y-1][bound:n.x]
# Helper function
# Pre-condition:
# We have a pair of nodes, a and b, such that pre(a) = b
# We want to disconnect a from b, which makes for a new pre(a)
# So we need to add a series of cells on top of a so that
# is_pg([top(a),pre(a)],[top(b),pre(b)])
# The very first term in that expression, top(a), can be further
# broken down into tops and pres
# Post-condition:
# Returns a tuple of lists of tops and pres that need be created
# Or None,None if the requirement is impossible
# Note that in the function call, a and pre are the new a and pre
def _valid_tops(self,a,a_bits,b,x,c=[],d=[],path=[],result=(None,None)):
# If our current solution is worse than a pre-existing one, abort
if result[0] is not None and len(result[0])<len(c):
return None,None
# If we are done, we are done!
if self._remains_pg_valid((a.x,a.y),(*d,*a_bits)):
return c,d
# If x is not part of the body,
# fork back up the recursion tree
if not node._in_tree(x):
# If we've reached the end, this fork is dead
if len(path)==0: return None,None
# Otherwise fork up
return self._valid_tops(a,a_bits,b, \
self.top(path[-1]),c,d,path[:-1],result)
# Forking down into the recursion tree
# Figure out x's pre, or attempted pre
pre = self.pre(x)
# If x has no pre, try forking through possible pre's
if pre is None and x not in c:
# Iterate over all possible pre's
possi = self._possible_pres(x,c,d)
#possi = sorted(possi, key = lambda x: x.m in ['buffer_node','invis_node'])
for y in possi:
# Fork up through the prefix tree
tmp = self._valid_tops(a,a_bits,b,y,c+[x],d+[y],path+[x],result)
if tmp[0] is not None: result = tmp;
# If x has a pre, try forking thru the pre chain
elif pre is not None:
tmp = self._valid_tops(a,a_bits,b,pre,c,d,path+[x],result)
if tmp[0] is not None: result = tmp;
# Either way, fork up through the top as well
tmp = self._valid_tops(a,a_bits,b,self.top(x),c,d,path,result)
if tmp[0] is not None: result = tmp;
# Return list of candidates
return result
# Pre-condition: x,y are valid co-ordinates
# (if y is not provided, searches entire column from bottom-up)
# Post-condition: checks whether the given x,y node satisfies the transform's
# initial requirements; if so, returns the two transform pivots
def _checkLF(self,x,y=None):
if not isinstance(x,int) or (y is not None and not isinstance(y,int)):
raise TypeError("x,y values provided to the internal-use-only check function are invalid!")
# If no y is provided, check whole column from bottom up
if y is None:
for a in range(len(self.node_list)-1,-1,-1):
if not node._exists(self[x,a]):
continue
a,b,c,d=self._checkLF(x,a)
if b is not None:
return a,b,c,d
return (None,None,None,None)
# Main clause of the function
a = self[x,y]
# ∃ b = pre(a)
b = self.pre(a)
if not node._exists(b):
return (None,None,None,None)
# ∄ top(a)
top = self.top(a)
if node._exists(top):
return (None,None,None,None)
pre = self.top(b) if node._isbuf(b) else self.pre(b)
c,d = self._valid_tops(a,(self.top(top),pre),b,self.top(top))
if c is None:
return (None,None,None,None)
return (a,b,c,d)
def _checkFL(self,x,y=None):
if not isinstance(x,int) or (y is not None and not isinstance(y,int)):
raise TypeError("x,y values provided to the internal-use-only check function are invalid!")
# If no y is provided, check whole column from bottom up
if y is None:
for a in range(len(self.node_list)-1,-1,-1):
a,b=self._checkFL(x,a)
if b is not None:
return a,b
return (None,None)
# Main clause of the function
a = self[x,y]
pre = self.pre(a)
if pre is None:
return (None,None)
b = None
for x in reversed(self.node_list[y][pre.x:x]):
top = self.top(x)
# ∃ b s.t pre(a)=pre(b),
if self.pre(x)==pre or \
(top==pre and x.m in ['invis_node','buffer_node']):
# We have to account for post(a) remapping
# ∀ post(a), is_pg([top(post),a],[top(post),b]) or ∄ top(post)
flag=True
for y in self.post(a):
topy = self.top(y)
if not self._is_pg_subset((topy,x),(topy,a)) and \
node._exists(topy):
flag=False; break;
if flag:
b=x; break;
bot = self.bot(a)
# ∄ bot(a) or bot(a) = post-processing
if b is not None and (not node._exists(bot) or \
bot.m in ['post_node'] or \
node._isbuf(bot)):
return (a,b)
return (None,None)
def _checkTF(self,x,y=None):
if not isinstance(x,int) or (y is not None and not isinstance(y,int)):
raise TypeError("x,y values provided to the internal-use-only check function are invalid!")
# If no y is provided, check whole column from bottom up
if y is None:
for a in range(len(self.node_list)-1,-1,-1):
a,b,c,d=self._checkTF(x,a)
if b is not None:
return a,b,c,d
return (None,None,None,None)
# Main clause of the function
a = self[x,y]
pre = self.pre(a)
if pre is None:
return (None,None,None,None)
b = None; c = None; d = None;
top = self.top(a)
# ∃ b s.t. b.x > pre(a).x
poss_b = self._possible_pres(a)
poss_b = [x for x in poss_b if x.x>pre.x]
if any(node._exists(x) for x in poss_b):
poss_b = [x for x in poss_b if node._exists(x)]
for x in poss_b:
# Figure out if a valid remapping exists
c,d = self._valid_tops(a,(top,x),pre,top)
if c is not None:
# Ignore possibilities that are immediately redundant
if not self._is_pg_subset((*c,*d),(x,)):
b=x; break;
if b is None: return (None,None,None,None)
return (a,b,c,d)
def _checkFT(self,x,y=None):
if not isinstance(x,int) or (y is not None and not isinstance(y,int)):
raise TypeError("x,y values provided to the internal-use-only check function are invalid!")
# If no y is provided, check whole column from bottom up
if y is | |
(int, optional): Scale of the output gaussians.
shuffle (bool, optional): If True, shuffle the samples.
camnames (List, optional): List of camera names.
crop_width (Tuple, optional): (first, last) pixels in image width
crop_height (Tuple, optional): (first, last) pixels in image height
vmin (int, optional): Minimum box dim (relative to the COM)
vmax (int, optional): Maximum box dim (relative to the COM)
nvox (int, optional): Number of voxels per box side
gpu_id (Text, optional): Identity of GPU to use.
interp (Text, optional): Interpolation method.
depth (bool): If True, appends voxel depth to sampled image features [DEPRECATED]
channel_combo (Text): Method for shuffling camera input order
mode (Text): Toggles output label format to match MAX vs. AVG network requirements.
samples_per_cluster (int, optional): Samples per cluster
immode (Text): Toggles using 'video' or 'tif' files as image input [DEPRECATED]
rotation (bool, optional): If True, use simple rotation augmentation.
vidreaders (Dict, optional): Dict containing video readers.
distort (bool, optional): If true, apply camera undistortion.
expval (bool, optional): If True, process an expected value network (AVG)
multicam (bool): If True, formats data to work with multiple cameras as input.
var_reg (bool): If True, adds a variance regularization term to the loss function.
COM_aug (bool, optional): If True, augment the COM.
crop_im (bool, optional): If True, crop images.
norm_im (bool, optional): If True, normalize images.
chunks (int, optional): Size of chunks when using chunked mp4.
mono (bool, optional): If True, use grayscale image.
predict_flag (bool, optional): If True, use imageio for reading videos, rather than OpenCV
"""
DataGenerator.__init__(
self,
list_IDs,
labels,
clusterIDs,
batch_size,
dim_in,
n_channels_in,
n_channels_out,
out_scale,
shuffle,
camnames,
crop_width,
crop_height,
samples_per_cluster,
vidreaders,
chunks,
mono,
mirror,
predict_flag,
)
self.vmin = vmin
self.vmax = vmax
self.nvox = nvox
self.vsize = (vmax - vmin) / nvox
self.dim_out_3d = (nvox, nvox, nvox)
self.labels_3d = labels_3d
self.camera_params = camera_params
self.interp = interp
self.depth = depth
self.channel_combo = channel_combo
print(self.channel_combo)
self.gpu_id = gpu_id
self.mode = mode
self.immode = immode
self.tifdirs = tifdirs
self.com3d = com3d
self.rotation = rotation
self.distort = distort
self.expval = expval
self.multicam = multicam
self.var_reg = var_reg
self.COM_aug = COM_aug
self.crop_im = crop_im
# If saving npy as uint8 rather than training directly, dont normalize
self.norm_im = norm_im
self.config = tf.compat.v1.ConfigProto()
self.config.gpu_options.per_process_gpu_memory_fraction = 0.8
self.config.gpu_options.allow_growth = True
self.session = tf.compat.v1.InteractiveSession(config=self.config)
self.device = "/GPU:" + self.gpu_id
self.threadpool = ThreadPool(len(self.camnames[0]))
with tf.device(self.device):
ts = time.time()
for i, ID in enumerate(list_IDs):
experimentID = int(ID.split("_")[0])
for camname in self.camnames[experimentID]:
# M only needs to be computed once for each camera
K = self.camera_params[experimentID][camname]["K"]
R = self.camera_params[experimentID][camname]["R"]
t = self.camera_params[experimentID][camname]["t"]
self.camera_params[experimentID][camname]["M"] = np.array(
ops.camera_matrix(K, R, t), dtype="float32"
)
print("Init took {} sec.".format(time.time() - ts))
def __getitem__(self, index):
"""Generate one batch of data.
Args:
index (int): Frame index
Returns:
Tuple[np.ndarray, np.ndarray]: One batch of data
X (np.ndarray): Input volume
y (np.ndarray): Target
"""
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
@tf.function
def rot90(self, X):
"""Rotate X by 90 degrees CCW.
Args:
X (np.ndarray): Volume
Returns:
X (np.ndarray): Rotated volume
"""
X = tf.transpose(X, [1, 0, 2, 3])
X = X[:, ::-1, :, :]
return X
@tf.function
def rot180(self, X):
"""Rotate X by 180 degrees.
Args:
X (np.ndarray): Volume
Returns:
X (np.ndarray): Rotated volume
"""
X = X[::-1, ::-1, :, :]
return X
def project_grid(self, X_grid, camname, ID, experimentID, device):
"""Projects 3D voxel centers and sample images as projected 2D pixel coordinates
Args:
X_grid (np.ndarray): 3-D array containing center coordinates of each voxel.
camname (Text): camera name
ID (Text): string denoting a sample ID
experimentID (int): identifier for a video recording session.
Returns:
np.ndarray: projected voxel centers, now in 2D pixels
"""
ts = time.time()
with tf.device(device):
# Need this copy so that this_y does not change
this_y = np.round(self.labels[ID]["data"][camname]).copy()
if np.all(np.isnan(this_y)):
com_precrop = np.zeros_like(this_y[:, 0]) * np.nan
else:
# For projecting points, we should not use this offset
com_precrop = np.nanmean(this_y, axis=1)
if self.immode == "vid":
ts = time.time()
thisim = self.load_frame.load_vid_frame(
self.labels[ID]["frames"][camname],
camname,
extension=self.extension,
)[
self.crop_height[0] : self.crop_height[1],
self.crop_width[0] : self.crop_width[1],
]
# print("Frame loading took {} sec.".format(time.time()-ts))
this_y[0, :] = this_y[0, :] - self.crop_width[0]
this_y[1, :] = this_y[1, :] - self.crop_height[0]
com = np.nanmean(this_y, axis=1)
if self.crop_im:
# Cropping takes negligible time
if np.all(np.isnan(com)):
thisim = np.zeros(
(self.dim_in[1], self.dim_in[0], self.n_channels_in),
dtype="uint8",
)
else:
thisim = processing.cropcom(thisim, com, size=self.dim_in[0])
# Project de novo
ts = time.time()
X_grid = tf.convert_to_tensor(X_grid)
pts1 = tf.ones((X_grid.shape[0], 1), dtype="float32")
projPts = tf.concat((X_grid, pts1), 1)
M = tf.convert_to_tensor(
self.camera_params[experimentID][camname]["M"], dtype="float32"
)
proj_grid = ops.project_to2d_tf(projPts, M)
# print("2D Project took {} sec.".format(time.time() - ts))
if self.distort:
ts = time.time()
proj_grid = ops.distortPoints_tf(
proj_grid,
tf.constant(
self.camera_params[experimentID][camname]["K"],
dtype="float32",
),
tf.squeeze(
tf.constant(
self.camera_params[experimentID][camname]["RDistort"],
dtype="float32",
)
),
tf.squeeze(
tf.constant(
self.camera_params[experimentID][camname]["TDistort"],
dtype="float32",
)
),
)
proj_grid = tf.transpose(proj_grid, (1, 0))
# print("tf Distort took {} sec.".format(time.time() - ts))
if self.crop_im:
proj_grid = proj_grid - com_precrop + self.dim_in[0] // 2
# Now all coordinates should map properly to the image
# cropped around the COM
else:
# Then the only thing we need to correct for is crops at the borders
proj_grid = proj_grid - tf.cast(
tf.stack([self.crop_width[0], self.crop_height[0]]),
"float32",
)
ts = time.time()
rgb = ops.sample_grid_tf(thisim, proj_grid, device, method=self.interp)
# print("Sample grid tf took {} sec".format(time.time() - ts))
X = tf.reshape(rgb, (self.nvox, self.nvox, self.nvox, 3))
return X
# TODO(nesting): There is pretty deep locigal nesting in this function,
# might be useful to break apart
def __data_generation(self, list_IDs_temp):
"""Generate data containing batch_size samples.
X : (n_samples, *dim, n_channels)
Args:
list_IDs_temp (List): List of experiment Ids
Returns:
Tuple: Batch_size training samples
X: Input volumes
y_3d: Targets
rotangle: Rotation angle
Raises:
Exception: Invalid generator mode specified.
"""
# Initialization
ts = time.time()
first_exp = int(self.list_IDs[0].split("_")[0])
with tf.device(self.device):
if self.mode == "3dprob":
y_3d = tf.zeros(
(self.batch_size, self.n_channels_out, *self.dim_out_3d),
dtype="float32",
)
elif self.mode == "coordinates":
y_3d = tf.zeros(
(self.batch_size, 3, self.n_channels_out), dtype="float32"
)
else:
raise Exception("not a valid generator mode")
# sz = self.dim_out_3d[0] * self.dim_out_3d[1] * self.dim_out_3d[2]
# X_grid = tf.zeros((self.batch_size, sz, 3), dtype = 'float32')
# Generate data
for i, ID in enumerate(list_IDs_temp):
sampleID = int(ID.split("_")[1])
experimentID = int(ID.split("_")[0])
# For 3D ground truth
this_y_3d = self.labels_3d[ID]
this_COM_3d = self.com3d[ID]
with tf.device(self.device):
xgrid = tf.range(
self.vmin + this_COM_3d[0] + self.vsize / 2,
this_COM_3d[0] + self.vmax,
self.vsize,
dtype="float32",
)
ygrid = tf.range(
self.vmin + this_COM_3d[1] + self.vsize / 2,
this_COM_3d[1] + self.vmax,
self.vsize,
dtype="float32",
)
zgrid = tf.range(
self.vmin + this_COM_3d[2] + self.vsize / 2,
this_COM_3d[2] + self.vmax,
self.vsize,
dtype="float32",
)
(x_coord_3d, y_coord_3d, z_coord_3d) = tf.meshgrid(xgrid, ygrid, zgrid)
if self.mode == "coordinates":
if this_y_3d.shape == y_3d.shape:
if i == 0:
y_3d = tf.expand_dims(y_3d, 0)
else:
y_3d = tf.stack(y_3d, tf.expand_dims(this_y_3d, 0), axis=0)
else:
msg = "Note: ignoring dimension mismatch in 3D labels"
warnings.warn(msg)
xg = tf.stack(
(
tf.keras.backend.flatten(x_coord_3d),
tf.keras.backend.flatten(y_coord_3d),
tf.keras.backend.flatten(z_coord_3d),
),
axis=1,
)
if i == 0:
X_grid = tf.expand_dims(xg, 0)
else:
X_grid = tf.concat([X_grid, tf.expand_dims(xg, 0)], axis=0)
# print('Initialization took {} sec.'.format(time.time() - ts))
if tf.executing_eagerly():
# Compute projection grids using multithreading
num_cams = int(len(self.camnames[experimentID]))
arglist = []
for c in range(num_cams):
arglist.append(
[
xg,
self.camnames[experimentID][c],
ID,
experimentID,
self.device,
]
)
result = self.threadpool.starmap(self.project_grid, arglist)
for c in range(num_cams):
if i == 0 and c == 0:
X = tf.expand_dims(result[c], 0)
else:
X = tf.concat([X, tf.expand_dims(result[c], 0)], axis=0)
else:
for c in range(num_cams):
if c == 0:
X = tf.expand_dims(
self.project_grid(
xg,
self.camnames[experimentID][c],
ID,
experimentID,
self.device,
),
0,
)
else:
X = tf.concat(
(
X,
tf.expand_dims(
self.project_grid(
xg,
self.camnames[experimentID][c],
ID,
experimentID,
self.device,
),
0,
),
),
axis=0,
)
ts = time.time()
with tf.device(self.device):
if self.multicam:
X = tf.reshape(
X,
(
self.batch_size,
len(self.camnames[first_exp]),
X.shape[1],
X.shape[2],
X.shape[3],
X.shape[4],
),
)
X = tf.transpose(X, [0, 2, 3, 4, 5, 1])
if self.channel_combo == "avg":
X = tf.mean(X, axis=-1)
# Randomly reorder the cameras | |
= phash.hexdigest()
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
def newview(context, request):
return 'OK'
config.add_view(view=newview, xhr=True, context=RuntimeError,
renderer=null_renderer)
wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertFalse(IMultiView.providedBy(wrapper))
request = DummyRequest()
request.is_xhr = True
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_default_phash_overrides_no_phash(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IMultiView
view = lambda *arg: 'NOT OK'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface), IView, name='')
def newview(context, request):
return 'OK'
config.add_view(view=newview, renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertFalse(IMultiView.providedBy(wrapper))
request = DummyRequest()
request.is_xhr = True
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_exc_default_phash_overrides_no_phash(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IExceptionViewClassifier
from pyramid.interfaces import IMultiView
view = lambda *arg: 'NOT OK'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
def newview(context, request):
return 'OK'
config.add_view(view=newview, context=RuntimeError,
renderer=null_renderer)
wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertFalse(IMultiView.providedBy(wrapper))
request = DummyRequest()
request.is_xhr = True
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_default_phash_overrides_default_phash(self):
from pyramid.renderers import null_renderer
from pyramid.config.util import DEFAULT_PHASH
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IMultiView
view = lambda *arg: 'NOT OK'
view.__phash__ = DEFAULT_PHASH
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface), IView, name='')
def newview(context, request):
return 'OK'
config.add_view(view=newview, renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertFalse(IMultiView.providedBy(wrapper))
request = DummyRequest()
request.is_xhr = True
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_exc_default_phash_overrides_default_phash(self):
from pyramid.renderers import null_renderer
from pyramid.config.util import DEFAULT_PHASH
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IExceptionViewClassifier
from pyramid.interfaces import IMultiView
view = lambda *arg: 'NOT OK'
view.__phash__ = DEFAULT_PHASH
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
def newview(context, request):
return 'OK'
config.add_view(view=newview, context=RuntimeError,
renderer=null_renderer)
wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertFalse(IMultiView.providedBy(wrapper))
request = DummyRequest()
request.is_xhr = True
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_multiview_replaces_existing_view(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IMultiView
view = lambda *arg: 'OK'
view.__phash__ = 'abc'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface), IView, name='')
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(wrapper(None, None), 'OK')
def test_add_view_exc_multiview_replaces_existing_view(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IExceptionViewClassifier
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IMultiView
view = lambda *arg: 'OK'
view.__phash__ = 'abc'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view,
(IViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
config.registry.registerAdapter(
view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
config.add_view(view=view, context=RuntimeError,
renderer=null_renderer)
wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(wrapper(None, None), 'OK')
def test_add_view_multiview_replaces_existing_securedview(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import ISecuredView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
view = lambda *arg: 'OK'
view.__phash__ = 'abc'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface),
ISecuredView, name='')
config.add_view(view=view, renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(wrapper(None, None), 'OK')
def test_add_view_exc_multiview_replaces_existing_securedview(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import ISecuredView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IExceptionViewClassifier
view = lambda *arg: 'OK'
view.__phash__ = 'abc'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view,
(IViewClassifier, IRequest, implementedBy(RuntimeError)),
ISecuredView, name='')
config.registry.registerAdapter(
view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
ISecuredView, name='')
config.add_view(view=view, context=RuntimeError, renderer=null_renderer)
wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(wrapper(None, None), 'OK')
def test_add_view_with_accept_multiview_replaces_existing_view(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
def view(context, request):
return 'OK'
def view2(context, request):
return 'OK2'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface), IView, name='')
config.add_view(view=view2, accept='text/html', renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(len(wrapper.views), 1)
self.assertEqual(len(wrapper.media_views), 1)
self.assertEqual(wrapper(None, None), 'OK')
request = DummyRequest()
request.accept = DummyAccept('text/html', 'text/html')
self.assertEqual(wrapper(None, request), 'OK2')
def test_add_view_mixed_case_replaces_existing_view(self):
from pyramid.renderers import null_renderer
def view(context, request): return 'OK'
def view2(context, request): return 'OK2'
def view3(context, request): return 'OK3'
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
config.add_view(view=view2, accept='text/html', renderer=null_renderer)
config.add_view(view=view3, accept='text/HTML', renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(len(wrapper.media_views.items()),1)
self.assertFalse('text/HTML' in wrapper.media_views)
self.assertEqual(wrapper(None, None), 'OK')
request = DummyRequest()
request.accept = DummyAccept('text/html', 'text/html')
self.assertEqual(wrapper(None, request), 'OK3')
def test_add_views_with_accept_multiview_replaces_existing(self):
from pyramid.renderers import null_renderer
def view(context, request): return 'OK'
def view2(context, request): return 'OK2'
def view3(context, request): return 'OK3'
config = self._makeOne(autocommit=True)
config.add_view(view=view, renderer=null_renderer)
config.add_view(view=view2, accept='text/html', renderer=null_renderer)
config.add_view(view=view3, accept='text/html', renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertEqual(len(wrapper.media_views['text/html']), 1)
self.assertEqual(wrapper(None, None), 'OK')
request = DummyRequest()
request.accept = DummyAccept('text/html', 'text/html')
self.assertEqual(wrapper(None, request), 'OK3')
def test_add_view_exc_with_accept_multiview_replaces_existing_view(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IExceptionViewClassifier
def view(context, request):
return 'OK'
def view2(context, request):
return 'OK2'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view,
(IViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
config.registry.registerAdapter(
view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
config.add_view(view=view2, accept='text/html', context=RuntimeError,
renderer=null_renderer)
wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(len(wrapper.views), 1)
self.assertEqual(len(wrapper.media_views), 1)
self.assertEqual(wrapper(None, None), 'OK')
request = DummyRequest()
request.accept = DummyAccept('text/html', 'text/html')
self.assertEqual(wrapper(None, request), 'OK2')
def test_add_view_multiview_replaces_existing_view_with___accept__(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
def view(context, request):
return 'OK'
def view2(context, request):
return 'OK2'
view.__accept__ = 'text/html'
view.__phash__ = 'abc'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface), IView, name='')
config.add_view(view=view2, renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(len(wrapper.views), 1)
self.assertEqual(len(wrapper.media_views), 1)
self.assertEqual(wrapper(None, None), 'OK2')
request = DummyRequest()
request.accept = DummyAccept('text/html')
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_exc_mulview_replaces_existing_view_with___accept__(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IExceptionViewClassifier
def view(context, request):
return 'OK'
def view2(context, request):
return 'OK2'
view.__accept__ = 'text/html'
view.__phash__ = 'abc'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view,
(IViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
config.registry.registerAdapter(
view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IView, name='')
config.add_view(view=view2, context=RuntimeError,
renderer=null_renderer)
wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual(len(wrapper.views), 1)
self.assertEqual(len(wrapper.media_views), 1)
self.assertEqual(wrapper(None, None), 'OK2')
request = DummyRequest()
request.accept = DummyAccept('text/html')
self.assertEqual(wrapper(None, request), 'OK')
def test_add_view_multiview_replaces_multiview(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
view = DummyMultiView()
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, Interface),
IMultiView, name='')
view2 = lambda *arg: 'OK2'
config.add_view(view=view2, renderer=null_renderer)
wrapper = self._getViewCallable(config)
self.assertTrue(IMultiView.providedBy(wrapper))
self.assertEqual([x[:2] for x in wrapper.views], [(view2, None)])
self.assertEqual(wrapper(None, None), 'OK1')
def test_add_view_exc_multiview_replaces_multiviews(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IExceptionViewClassifier
hot_view = DummyMultiView()
exc_view = DummyMultiView()
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
hot_view,
(IViewClassifier, IRequest, implementedBy(RuntimeError)),
IMultiView, name='')
config.registry.registerAdapter(
exc_view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IMultiView, name='')
view2 = lambda *arg: 'OK2'
config.add_view(view=view2, context=RuntimeError,
renderer=null_renderer)
hot_wrapper = self._getViewCallable(
config, ctx_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(hot_wrapper))
self.assertEqual([x[:2] for x in hot_wrapper.views], [(view2, None)])
self.assertEqual(hot_wrapper(None, None), 'OK1')
exc_wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(exc_wrapper))
self.assertEqual([x[:2] for x in exc_wrapper.views], [(view2, None)])
self.assertEqual(exc_wrapper(None, None), 'OK1')
def test_add_view_exc_multiview_replaces_only_exc_multiview(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IExceptionViewClassifier
hot_view = DummyMultiView()
exc_view = DummyMultiView()
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
hot_view,
(IViewClassifier, IRequest, implementedBy(RuntimeError)),
IMultiView, name='')
config.registry.registerAdapter(
exc_view,
(IExceptionViewClassifier, IRequest, implementedBy(RuntimeError)),
IMultiView, name='')
view2 = lambda *arg: 'OK2'
config.add_view(view=view2, context=RuntimeError, exception_only=True,
renderer=null_renderer)
hot_wrapper = self._getViewCallable(
config, ctx_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(hot_wrapper))
self.assertEqual(len(hot_wrapper.views), 0)
self.assertEqual(hot_wrapper(None, None), 'OK1')
exc_wrapper = self._getViewCallable(
config, exc_iface=implementedBy(RuntimeError))
self.assertTrue(IMultiView.providedBy(exc_wrapper))
self.assertEqual([x[:2] for x in exc_wrapper.views], [(view2, None)])
self.assertEqual(exc_wrapper(None, None), 'OK1')
def test_add_view_multiview_context_superclass_then_subclass(self):
from pyramid.renderers import null_renderer
from zope.interface import Interface
from pyramid.interfaces import IRequest
from pyramid.interfaces import IView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IViewClassifier
class ISuper(Interface):
pass
class ISub(ISuper):
pass
view = lambda *arg: 'OK'
view2 = lambda *arg: 'OK2'
config = self._makeOne(autocommit=True)
config.registry.registerAdapter(
view, (IViewClassifier, IRequest, ISuper), IView, name='')
config.add_view(view=view2, for_=ISub, renderer=null_renderer)
wrapper = self._getViewCallable(config, ctx_iface=ISuper,
request_iface=IRequest)
self.assertFalse(IMultiView.providedBy(wrapper))
self.assertEqual(wrapper(None, None), 'OK')
wrapper = self._getViewCallable(config, ctx_iface=ISub,
request_iface=IRequest)
self.assertFalse(IMultiView.providedBy(wrapper))
self.assertEqual(wrapper(None, None), 'OK2')
def test_add_view_multiview_exception_superclass_then_subclass(self):
from pyramid.renderers import null_renderer
from zope.interface import implementedBy
from pyramid.interfaces import IRequest
| |
R environment
Only checked if a new R environment is created
Default: False
_r_object : Optional[RObject]
An R environment to use
Default: new RObject
_mat_object : Optional[MatlabObject]
A Matlab environment to use
Default: new MatlabObject
_environ : dict[str: str,int,float]
Variables to be used in bash
Default: os.environ
_timeout : int
Number of seconds until time out
Only used if a new R or Matlab environment is being created
Default: 600
_verbosity : int
How much to print while this function runs
0 <= _verbosity <= 3
If 0: silent
If 1: output from each environment
If 2: plus when switching between environments
If 3: plus additional information
**kwargs : dict[str:object]
Add as variables to the Python environment by calling `load`
Returns
--------
Master
A Master object with the resulting environments loaded
Raises
------
ValueError
If any multilang statement is improperly formatted
NameError
If any variable being passed doesn't exist
TypeError
If any variable passed to bash is not str,int,float
Scripts
=======
"Shebangs" (i.e. #! or %!) are used as the statements to both identify
multilang code and to switch between the different environments.
_lines should read as so:
[1] #! multilang [R, Python, Matlab, bash]
[2] # code here`
[3] #! R/Python/Matlab/bash -> [<vars>]
[4] # code here
[.] # ...
[n] #! Python -> [<vars>]
All multilang scripts start with `#! multilang` then an optional language.
If no initial language is given, Python is assumed.
Scripts should end with a Python switch line to retrieve any variables back
into the Python environment.
The suggested extension for a multilang file is .mul.
To switch languages, `#! <lang> -> [<vars>]` is used to switched to <lang>.
<vars> is an optional comma-separated list of variables to bring.
Language names are NOT case-sensitive and depend only on the existence
of 'r', 'p', 'm', or 'b'.
`print` only works in the Python and bash environments.
Outputs in R and Matlab are not currently captured.
Comments
--------
Line comments can be marked with either '#' or '%'
Block comments are surrounded by '%{'/'#{' and '%}'/'#}' on their own lines.
In Python, the modulo operator uses a bare %, which is overridden by
the multilang comment feature.
Use multilang's builtin `mod(a,b)` instead of a%b.
Use ''.format() instead of '' % ().
Python's `%=`is not affected.
R's `%...%` operators are not affected either.
Builtins
--------
All of multilang is available as builtins in the Python environment.
These can be extended by a Python function with the @multilang wrapper.
This is particularly useful when passing objects between environments.
As multilang's function are only available in Python, these functions are
only available when switching out of Python.
All inputs should be str, with the first being the name of the variable.
Local variables can be accessed by _VARIABLES[name], see example.
It should return a dict of {name: value} of things to pass through
`sio.savemat` into the next environment.
The definition of `mutlilang.as_array` follows as an example:
[1] #! multilang
[2] @multilang
[3] def as_array(var: str, extras: str = 'True'):
[4] obj = _VARIABLES[var]
[5] if extras != 'True':
[6] return {var: np.array(obj)}
[7] elif type(obj) is pd.core.frame.DataFrame:
[8] return {var: np.array(obj),
[9] var+'_index': obj.index.values.tolist(),
[10] var+'_columns': obj.columns.values.tolist()}
[11] else:
[12] return {var: np.array(obj)}
"""
# load the code
if hasattr(_lines, 'readlines'): # preferred utility
_file = _lines
_lines = __file.readlines()
elif hasattr(_lines, 'read'): # acceptable file usage
_file = _lines
_lines = _file.readlines()
elif type(_lines) is str and _lines[:2] not in ['#!','%!']: # file name
_fname = _lines
with open(_fname, 'r') as _file:
_lines = _file.readlines()
# make sure is Iterable[str] without line breaks
if type(_lines) in [bytes, str]: # handle not lists
_lines = str(_lines).replace('\r\n','\n').replace('\r','\n').split('\n')
elif type(_lines[0]) is bytes: # if List[bytes]
_lines = [str(i) for i in _lines]
if type(_lines[0] is str): # if List[str]
_lines = [i.strip('\n') for i in _lines]
# format validation
while _lines[0][:2] not in ['#!','%!'] or 'multilang' not in _lines[0].lower():
# find the multilang call
_lines = _lines[1:]
for _n, _i in enumerate(_lines[1:]):
if len(_i) > 2 and _i[:2] in ['#!', '%!']:
# check statements
_l = _i[2:].strip().replace(' ','').split('->')
if not any([i in _l[0].lower() for i in 'rpmb']) or len(_l) != 2:
raise ValueError('Improperly formatted call in line ' + str(_n+2))
# get the starting environment
_temp = _lines[0].split(' ')[-1].lower()
if 'multilang' in _temp or 'p' in _temp:
_lang = 'p'
elif 'r' in _temp:
_lang = 'r'
elif 'm' in _temp:
_lang = 'm'
elif 'b' in _temp and not 'matlab' in _temp:
# avoid b from matlab
_lang = 'b'
else:
raise ValueError('Unknown language was specified')
# deal with loading kwargs
if kwargs: _VARIABLES.update(kwargs)
# defaults
if not _environ: _environ = os.environ.copy()
if not _r_object: _r_object = RObject(load=_load_r, timeout=_timeout)
if not _mat_object: _mat_object = MatlabObject(timeout=_timeout)
# check in range
if _verbosity < 0: _verbosity = 0
elif _verbosity > 3: _verbosity = 3
# loop through code
# each endpoint increments counter and continues
if _verbosity >= 2: print('Starting in ' + ('Python' if _lang == 'p' else 'R' if _lang == 'r' else 'Matlab' if _lang == 'm' else 'bash'))
_counter = 1 # skip multilang declaration
while _counter < len(_lines):
_current_line = _lines[_counter].strip()
if _current_line in ['%{','#{']:
# block comment
_i = _counter+1
while _i < len(_lines) and _lines[_i].strip() not in ['%}','#}']:
_i += 1
_counter = _i+1
continue
elif not _current_line or (_current_line[0] in '#%' and _current_line[1] != '!'):
# line comment
_counter += 1
continue
# if currently in python
elif _lang == 'p':
if _current_line[:2] in ['#!','%!']: # if switching
if 'r' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to R')
_lang = 'r'
_r_object = py_to_r(_current_line, _r_object)
elif 'm' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to Matlab')
_lang = 'm'
_mat_object = py_to_mat(_current_line, _mat_object)
elif 'b' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to bash')
_lang = 'b'
_environ = py_to_bash(_current_line, _environ)
_counter += 1
continue
elif '@multilang' in _current_line and re.search(r'^def\s*[a-zA-Z_]+\s*\(.*?\)\s*:$', _lines[_counter+1].strip()):
# declaring function in the local space
# get the next line
_end = _counter + 1
_l = _lines[_end].strip(' ')
# look for comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
_ignore = not _ignore
elif not _ignore and (_l[_i] == '#' or (_l[_i] == '%' and _l[_i+1] != '=')):
break
_i += 1
_l = _l[:_i]
# get the function name
_name = _l.split('def ')[1].split('(')[0].strip()
# find the indent so we know when to stop
_search = re.search(r'\t+(?:.)', _l)
_tabs = _search.end() if _search and _search.end() > 0 else 0
# get the code
_to_exec = [_l[_tabs:]]
while _l and _l[:2] not in ['#!', '%!'] and _end < len(_lines)-1:
# get the line
_end += 1
_l = _lines[_end]
# get indentation
_search = re.search(r'[\t(?: {4})]+(?:.)', _l)
_curr_tabs = _search.end() if _search and _search.end() > 0 else 0
if _curr_tabs <= _tabs: # done!
break
elif _l and _l[0] not in '%#':
# ignore comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
_ignore = not _ignore
elif not _ignore and (_l[_i] == '#' or (_l[_i] == '%' and _l[_i+1] != '=')):
break
_i += 1
# push it!
_to_exec.append(_l[:_i])
# define it and add it
if _verbosity == 0:
_old = sys.stdout
sys.stdout = None
try:
exec('\n'.join(_to_exec))
except Exception as e:
sys.stdout = _old
raise e
else:
sys.stdout = _old
del _old
else:
exec('\n'.join(_to_exec))
globals().update({_name: locals()[_name]})
_counter = _end
continue
elif '@multilang' in _current_line:
# skip if the next line isn't a `def`
_counter += 1
continue
else: # otherwise, do the thing
# make sure we're up to date
globals().update(_VARIABLES)
_end = _counter
_l = _lines[_end].strip(' ')
# remove comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
# ignore comment markers in strings
_ignore = not _ignore
elif not _ignore and (_l[_i] == '#' or (_l[_i] == '%' and _l[_i+1] != '=')):
# if we're not in a string and it's a comment but not %=
break # stop before here
_i += 1
_l = _l[:_i]
# get the code to run
# have to build it up for exec
_to_exec = [_l] if _l and _l[0] not in '%#' else []
while _l and _l[:2] not in ['#!','%!'] and '@multilang' not in _l and _end < len(_lines)-1:
# stop at statements or local function declaration
_end += 1
_l = _lines[_end]
if _l and _l[0] not in '%#':
# ignore comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
# ignore if in string
_ignore = not _ignore
elif not _ignore and (_l[_i] == '#' or (_l[_i] == '%' and _l[_i+1] != '=')):
break # stop before here
_i += 1
_to_exec.append(_l[:_i])
# define it and add it
if _verbosity == 0:
_old = sys.stdout
sys.stdout = None
try:
exec('\n'.join(_to_exec))
except Exception as e:
sys.stdout = _old
raise e
else:
sys.stdout = _old
del _old
else:
exec('\n'.join(_to_exec))
_VARIABLES.update({k:v for k,v in locals().items() if not k[0] is '_'})
_counter = _end+1 if _end == len(_lines)-1 else _end
continue
# if currently in bash
elif _lang == 'b':
if _current_line[:2] in ['#!', '%!']: # switching environments
if 'p' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to Python')
_lang = 'p'
mat_to_py(_current_line, _mat_object)
elif 'r' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to R')
_lang = 'r'
_r_object = mat_to_r(_current_line, _mat_object, _r_object)
elif 'm' in _current_line.lower().split('->')[0]:
if _verbosity >= 2: print('Switching to Matlab')
_lang = 'm'
_mat_object = py_to_mat(_current_line, _mat_object)
_counter += 1
continue
else: # otherwise do the thing
# get the line
_end = _counter
_l = _lines[_end].strip(' ')
# remove comments
_i = 0
_ignore = False
while _i < len(_l):
if _l[_i] in '\'"':
# ignore comment markers in strings
_ignore | |
new displacements after integration.
:rtype: :class:`numpy.ndarray`
"""
def runtime(self, model):
# Calc bond forces k1dn
self.cl_kernel_calc_bond_force(self.queue, (model.nnodes, model.max_horizon_length), None, self.d_forces,
self.d_un5, self.d_vols, self.d_horizons, self.d_coords, self.d_bond_stiffness, self.d_bond_critical_stretch)
# Reduction of bond forces onto nodal forces
self.cl_kernel_reduce_force(self.queue, (model.max_horizon_length * model.degrees_freedom * model.nnodes,),
(model.max_horizon_length,), self.d_forces, self.d_k1dn, self.d_force_bc_types, self.d_force_bc_values, self.local_mem, self.h_force_load_scale)
# Partial update of displacements
self.cl_kernel_partial_displacement_update(self.queue, (model.nnodes * model.degrees_freedom,),
None, self.d_k1dn, self.d_un5, self.d_un_temp, self.h_dt)
# Calc bond forces k2dn
self.cl_kernel_calc_bond_force(self.queue, (model.nnodes, model.max_horizon_length), None, self.d_forces,
self.d_un_temp, self.d_vols, self.d_horizons, self.d_coords, self.d_bond_stiffness, self.d_bond_critical_stretch)
# Reduction of bond forces onto nodal forces
self.cl_kernel_reduce_force(self.queue, (model.max_horizon_length * model.degrees_freedom * model.nnodes,),
(model.max_horizon_length,), self.d_forces, self.d_k2dn, self.d_force_bc_types, self.d_force_bc_values, self.local_mem, self.h_force_load_scale)
# Partial update of displacements 2
self.cl_kernel_partial_displacement_update2(self.queue, (model.nnodes * model.degrees_freedom,),
None, self.d_k1dn, self.d_k2dn, self.d_un5, self.d_un_temp, self.h_dt)
# Calc bond forces k3dn
self.cl_kernel_calc_bond_force(self.queue, (model.nnodes, model.max_horizon_length), None, self.d_forces,
self.d_un_temp, self.d_vols, self.d_horizons, self.d_coords, self.d_bond_stiffness, self.d_bond_critical_stretch)
# Reduction of bond forces onto nodal forces
self.cl_kernel_reduce_force(self.queue, (model.max_horizon_length * model.degrees_freedom * model.nnodes,),
(model.max_horizon_length,), self.d_forces, self.d_k3dn, self.d_force_bc_types, self.d_force_bc_values, self.local_mem, self.h_force_load_scale)
# Partial update of displacements 3
self.cl_kernel_partial_displacement_update3(self.queue, (model.nnodes * model.degrees_freedom,),
None, self.d_k1dn, self.d_k2dn, self.d_k3dn, self.d_un5, self.d_un_temp, self.h_dt)
# Calc bond forces k4dn
self.cl_kernel_calc_bond_force(self.queue, (model.nnodes, model.max_horizon_length), None, self.d_forces,
self.d_un_temp, self.d_vols, self.d_horizons, self.d_coords, self.d_bond_stiffness, self.d_bond_critical_stretch)
# Reduction of bond forces onto nodal forces
self.cl_kernel_reduce_force(self.queue, (model.max_horizon_length * model.degrees_freedom * model.nnodes,),
(model.max_horizon_length,), self.d_forces, self.d_k4dn, self.d_force_bc_types, self.d_force_bc_values, self.local_mem, self.h_force_load_scale)
# Partial update of displacements 4
self.cl_kernel_partial_displacement_update4(self.queue, (model.nnodes * model.degrees_freedom,),
None, self.d_k1dn, self.d_k2dn, self.d_k3dn, self.d_k4dn, self.d_un5, self.d_un_temp, self.h_dt)
# Calc bond forces k5dn
self.cl_kernel_calc_bond_force(self.queue, (model.nnodes, model.max_horizon_length), None, self.d_forces,
self.d_un_temp, self.d_vols, self.d_horizons, self.d_coords, self.d_bond_stiffness, self.d_bond_critical_stretch)
# Reduction of bond forces onto nodal forces
self.cl_kernel_reduce_force(self.queue, (model.max_horizon_length * model.degrees_freedom * model.nnodes,),
(model.max_horizon_length,), self.d_forces, self.d_k5dn, self.d_force_bc_types, self.d_force_bc_values, self.local_mem, self.h_force_load_scale)
# Partial update of displacements 5
self.cl_kernel_partial_displacement_update5(self.queue, (model.nnodes * model.degrees_freedom,),
None, self.d_k1dn, self.d_k2dn, self.d_k3dn, self.d_k4dn, self.d_k5dn, self.d_un5, self.d_un_temp, self.h_dt)
# Calc bond forces k6dn
self.cl_kernel_calc_bond_force(self.queue, (model.nnodes, model.max_horizon_length), None, self.d_forces,
self.d_un_temp, self.d_vols, self.d_horizons, self.d_coords, self.d_bond_stiffness, self.d_bond_critical_stretch)
# Reduction of bond forces onto nodal forces
self.cl_kernel_reduce_force(self.queue, (model.max_horizon_length * model.degrees_freedom * model.nnodes,),
(model.max_horizon_length,), self.d_forces, self.d_k6dn, self.d_force_bc_types, self.d_force_bc_values, self.local_mem, self.h_force_load_scale)
# Partial update of displacements 6
self.cl_kernel_partial_displacement_update6(self.queue, (model.nnodes * model.degrees_freedom,),
None, self.d_k1dn, self.d_k3dn, self.d_k4dn, self.d_k5dn, self.d_k6dn, self.d_un5, self.d_un_temp, self.h_dt)
# Calc bond forces k7dn
self.cl_kernel_calc_bond_force(self.queue, (model.nnodes, model.max_horizon_length), None, self.d_forces,
self.d_un_temp, self.d_vols, self.d_horizons, self.d_coords, self.d_bond_stiffness, self.d_bond_critical_stretch)
# Reduction of bond forces onto nodal forces
self.cl_kernel_reduce_force(self.queue, (model.max_horizon_length * model.degrees_freedom * model.nnodes,),
(model.max_horizon_length,), self.d_forces, self.d_k7dn, self.d_force_bc_types, self.d_force_bc_values, self.local_mem, self.h_force_load_scale)
if self.adapt_time_step(model) == 1:
pass
else:
# Full update of displacements
self.cl_kernel_displacement_update(self.queue,
(model.nnodes * model.degrees_freedom,),
None, self.d_bc_types, self.d_bc_values, self.d_un_temp, self.d_un5, self.h_dt)
# Check for broken bonds
self.cl_kernel_check_bonds(self.queue,
(model.nnodes, model.max_horizon_length),
None, self.d_horizons, self.d_un5, self.d_coords, self.d_bond_critical_stretch)
def incrementLoad(self, model, load_scale):
if model.num_force_bc_nodes != 0:
# update the host force load scale
self.h_force_load_scale = np.float64(load_scale)
def incrementDisplacement(self, model, displacement_scale):
# update the host force load scale
self.h_displacement_load_scale = np.float64(displacement_scale)
def write(self, model, t, sample):
""" Write a mesh file for the current timestep
"""
self.cl_kernel_reduce_damage(self.queue, (model.nnodes * model.max_horizon_length,),
(model.max_horizon_length,), self.d_horizons,
self.d_horizons_lengths, self.d_damage, self.local_mem)
cl.enqueue_copy(self.queue, self.h_damage, self.d_damage)
cl.enqueue_copy(self.queue, self.h_un5, self.d_un5)
cl.enqueue_copy(self.queue, self.h_k1dn, self.d_k1dn)
# TODO define a failure criterion, idea: rate of change of damage goes to 0 after it has started increasing
tip_displacement = 0
tip_shear_force = 0
tmp = 0
for i in range(model.nnodes):
if self.h_tip_types[i] == 1:
tmp +=1
tip_displacement += self.h_un5[i][2]
tip_shear_force += self.h_k1dn[i][2]
if tmp != 0:
tip_displacement /= tmp
else:
tip_displacement = None
vtk.write("output/U_"+"sample" + str(sample) +"t"+str(t) + ".vtk", "Solution time step = "+str(t),
model.coords, self.h_damage, self.h_un5)
#vtk.writeDamage("output/damage_" + str(t)+ "sample" + str(sample) + ".vtk", "Title", self.h_damage)
return self.h_damage, tip_displacement, tip_shear_force
def adapt_time_step(self, model):
adapt = 0
# Check for error size
self.cl_kernel_check_error(self.queue,
(model.nnodes * model.degrees_freedom,),
None, self.d_k1dn, self.d_k3dn, self.d_k4dn, self.d_k5dn, self.d_k6dn, self.d_k7dn, self.d_un_temp, self.d_un5, self.d_errorn, self.h_dt)
cl.enqueue_copy(self.queue, self.h_errorn, self.d_errorn)
error = np.linalg.norm(self.h_errorn, axis=1)
error = np.mean(error)
if error > self.error_size_max:
self.h_dt /= 1.1
print('Time step size reduced')
print('time step is {}s, error size is {}'.format(self.h_dt, error))
adapt = 1
elif error < self.error_size_min:
self.h_dt *= 1.1
print('Time step size increased')
print('time step is {}s, error size is {}'.format(self.h_dt, error))
return adapt
class EulerStochastic(Integrator):
r"""
Stochastic Euler integrator for quasi-static loading, using optimised OpenCL kernels.
The Euler method is a first-order numerical integration method. The
integration is given by,
.. math::
u(t + \delta t) = u(t) + \delta t f(t) d
where :math:`u(t)` is the displacement at time :math:`t`, :math:`f(t)` is
the force at time :math:`t`, :math:`\delta t` is the time step and
:math:`d` is a dampening factor.
"""
def __init__(self, model):
""" Initialise the integration scheme
"""
# Initializing OpenCL
self.context = cl.create_some_context()
self.queue = cl.CommandQueue(self.context)
# Print out device info
output_device_info(self.context.devices[0])
# Build the OpenCL program from file
kernelsource = open(pathlib.Path(__file__).parent.absolute() / "kernels/opencl_euler_stochastic.cl").read()
SEP = " "
options_string = (
"-cl-fast-relaxed-math" + SEP
+ "-DPD_DPN_NODE_NO=" + str(model.degrees_freedom * model.nnodes) + SEP
+ "-DPD_NODE_NO=" + str(model.nnodes) + SEP
+ "-DMAX_HORIZON_LENGTH=" + str(model.max_horizon_length) + SEP
+ "-DPD_DT=" + str(model.dt) + SEP)
program = cl.Program(self.context, kernelsource).build([options_string])
self.cl_kernel_mmul = program.mmul
self.cl_kernel_update_displacement = program.UpdateDisplacement
self.cl_kernel_calc_bond_force = program.CalcBondForce
self.cl_kernel_reduce_force = program.ReduceForce
self.cl_kernel_reduce_damage = program.ReduceDamage
self.cl_kernel_matrix_vector_mul1 = program.gemv1
self.cl_kernel_matrix_vector_mul2 = program.gemv2
# Set initial values in host memory
# horizons and horizons lengths
self.h_horizons = model.horizons
self.h_horizons_lengths = model.horizons_lengths
# Nodal coordinates
self.h_coords = np.ascontiguousarray(model.coords, dtype=np.float64)
# Displacement boundary conditions types and delta values
self.h_bc_types = model.bc_types
self.h_bc_values = model.bc_values
self.h_tip_types = model.tip_types
# Force boundary conditions types and values
self.h_force_bc_types = model.force_bc_types
self.h_force_bc_values = model.force_bc_values
# Nodal volumes
self.h_vols = model.V
# Bond stiffnesses
self.h_bond_stiffness = np.ascontiguousarray(model.bond_stiffness, dtype=np.float64)
self.h_bond_critical_stretch = np.ascontiguousarray(model.bond_critical_stretch, dtype=np.float64)
# Displacements
self.h_un = np.empty((model.nnodes, model.degrees_freedom), dtype=np.float64)
# Forces
self.h_udn = np.empty((model.nnodes, model.degrees_freedom), dtype=np.float64)
self.h_udn1 = np.empty((model.nnodes, model.degrees_freedom), dtype=np.float64)
# Bond forces
self.h_forces = np.empty((model.nnodes, model.degrees_freedom, model.max_horizon_length), dtype=np.float64)
self.local_mem = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
# Damage vector
self.h_damage = np.empty(model.nnodes).astype(np.float64)
# Covariance matrix
self.h_K = model.K
# For applying force in incriments
self.h_force_load_scale = np.float64(0.0)
#
self.h_m = np.intc(
1<<(model.nnodes-1).bit_length()
)
self.h_n = np.intc(model.nnodes)
# Build OpenCL data structures
# Read only
self.d_coords = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_coords)
self.d_bc_types = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bc_types)
self.d_bc_values = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bc_values)
self.d_force_bc_types = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_force_bc_types)
self.d_force_bc_values = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_force_bc_values)
self.d_vols = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_vols)
self.d_bond_stiffness = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bond_stiffness)
self.d_bond_critical_stretch = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bond_critical_stretch)
self.d_horizons_lengths = cl.Buffer(
self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_horizons_lengths)
self.d_K = cl.Buffer(
self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_K)
# Read and write
self.d_horizons = cl.Buffer(
self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_horizons)
self.d_un = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_un.nbytes)
self.d_udn = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn1.nbytes)
self.d_udn1 = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn1.nbytes)
self.d_forces = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_forces.nbytes)
# Write only
self.d_damage = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.h_damage.nbytes)
# Initialize kernel parameters
self.cl_kernel_update_displacement.set_scalar_arg_dtypes(
[None, None, None, None, None, None])
self.cl_kernel_calc_bond_force.set_scalar_arg_dtypes(
[None, None, None, None, None, None, None])
self.cl_kernel_reduce_force.set_scalar_arg_dtypes([None, None, None, None, None, None])
self.cl_kernel_reduce_damage.set_scalar_arg_dtypes([None, None, None, None])
self.cl_kernel_mmul.set_scalar_arg_dtypes([None, None, None])
self.cl_kernel_matrix_vector_mul1.set_scalar_arg_dtypes(
[None, None, None, None, None])
self.cl_kernel_matrix_vector_mul2.set_scalar_arg_dtypes(
[None, None, None, None, None, None])
def __call__(self):
"""
Conduct one iteration of the integrator.
:arg u: A (`nnodes`, 3) array containing the displacements of all
nodes.
:type u: :class:`numpy.ndarray`
:arg f: A (`nnodes`, 3) array containing the components of the force
acting on each node.
:type f: :class:`numpy.ndarray`
:returns: The new displacements after integration.
:rtype: :class:`numpy.ndarray`
"""
def noise(self, C, K, num_nodes, num_steps, degrees_freedom = 3):
"""Takes sample from multivariate normal distribution
with covariance matrix whith Cholesky factor, L
:arg L: Cholesky factor, C
:arg C: Covariance matrix, K
:arg samples: The number of degrees of freedom (read: dimensions) the
noise is generated in, degault 3 i.e. x,y and z directions.
:returns: num_nodes * 3 * num_steps array of noise
:rtype: np.array dtype=float64
"""
def multivar_normal(self, L, num_nodes):
""" Fn for taking a single multivar normal sample covariance matrix with Cholesky factor, L
"""
# Pad L
shape = np.shape(L)
padded_L = np.zeros((self.h_m, self.h_n))
padded_L[:shape[0],:shape[1]] = L
# OpenCL kernel reads L in column major not row major order
h_L = np.ascontiguousarray(np.transpose(padded_L), dtype=np.float64)
h_x = np.ascontiguousarray(np.random.normal(0, 1, | |
<filename>habitat_extensions/measures.py
import gzip
import json
import pickle
from typing import Any, List, Union
import numpy as np
from dtw import dtw
from fastdtw import fastdtw
from habitat.config import Config
from habitat.core.dataset import Episode
from habitat.core.embodied_task import Action, EmbodiedTask, Measure
from habitat.core.logging import logger
from habitat.core.registry import registry
from habitat.core.simulator import Simulator
from habitat.core.utils import try_cv2_import
from habitat.tasks.nav.nav import DistanceToGoal, Success
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat.utils.visualizations import fog_of_war
from habitat.utils.visualizations import maps as habitat_maps
from numpy import ndarray
from habitat_extensions import maps
from habitat_extensions.task import RxRVLNCEDatasetV1
cv2 = try_cv2_import()
def euclidean_distance(
pos_a: Union[List[float], ndarray], pos_b: Union[List[float], ndarray]
) -> float:
return np.linalg.norm(np.array(pos_b) - np.array(pos_a), ord=2)
@registry.register_measure
class PathLength(Measure):
"""Path Length (PL)
PL = sum(geodesic_distance(agent_prev_position, agent_position)
over all agent positions.
"""
cls_uuid: str = "path_length"
def __init__(self, sim: Simulator, *args: Any, **kwargs: Any):
self._sim = sim
super().__init__(**kwargs)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, **kwargs: Any):
self._previous_position = self._sim.get_agent_state().position
self._metric = 0.0
def update_metric(self, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position
self._metric += euclidean_distance(
current_position, self._previous_position
)
self._previous_position = current_position
@registry.register_measure
class OracleNavigationError(Measure):
"""Oracle Navigation Error (ONE)
ONE = min(geosdesic_distance(agent_pos, goal)) over all points in the
agent path.
"""
cls_uuid: str = "oracle_navigation_error"
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
task.measurements.check_measure_dependencies(
self.uuid, [DistanceToGoal.cls_uuid]
)
self._metric = float("inf")
self.update_metric(task=task)
def update_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
distance_to_target = task.measurements.measures[
DistanceToGoal.cls_uuid
].get_metric()
self._metric = min(self._metric, distance_to_target)
@registry.register_measure
class OracleSuccess(Measure):
"""Oracle Success Rate (OSR). OSR = I(ONE <= goal_radius)"""
cls_uuid: str = "oracle_success"
def __init__(self, *args: Any, config: Config, **kwargs: Any):
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
task.measurements.check_measure_dependencies(
self.uuid, [DistanceToGoal.cls_uuid]
)
self._metric = 0.0
self.update_metric(task=task)
def update_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
d = task.measurements.measures[DistanceToGoal.cls_uuid].get_metric()
self._metric = float(self._metric or d < self._config.SUCCESS_DISTANCE)
@registry.register_measure
class OracleSPL(Measure):
"""OracleSPL (Oracle Success weighted by Path Length)
OracleSPL = max(SPL) over all points in the agent path.
"""
cls_uuid: str = "oracle_spl"
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
task.measurements.check_measure_dependencies(self.uuid, ["spl"])
self._metric = 0.0
def update_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
spl = task.measurements.measures["spl"].get_metric()
self._metric = max(self._metric, spl)
@registry.register_measure
class StepsTaken(Measure):
"""Counts the number of times update_metric() is called. This is equal to
the number of times that the agent takes an action. STOP counts as an
action.
"""
cls_uuid: str = "steps_taken"
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, **kwargs: Any):
self._metric = 0.0
def update_metric(self, *args: Any, **kwargs: Any):
self._metric += 1.0
@registry.register_measure
class WaypointRewardMeasure(Measure):
"""A reward measure used for training VLN-CE agents via RL."""
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
) -> None:
self._sim = sim
self._slack_reward = config.slack_reward
self._use_distance_scaled_slack_reward = (
config.use_distance_scaled_slack_reward
)
self._scale_slack_on_prediction = config.scale_slack_on_prediction
self._success_reward = config.success_reward
self._distance_scalar = config.distance_scalar
self._prev_position = None
super().__init__()
def reset_metric(
self, *args: Any, task: EmbodiedTask, **kwargs: Any
) -> None:
task.measurements.check_measure_dependencies(
self.uuid, [DistanceToGoal.cls_uuid, Success.cls_uuid]
)
self._previous_distance_to_goal = task.measurements.measures[
"distance_to_goal"
].get_metric()
self._metric = 0.0
self._prev_position = np.take(
self._sim.get_agent_state().position, [0, 2]
)
def _get_scaled_slack_reward(self, action: Action) -> float:
if isinstance(action["action"], int):
return self._slack_reward
if not self._use_distance_scaled_slack_reward:
return self._slack_reward
agent_pos = np.take(self._sim.get_agent_state().position, [0, 2])
slack_distance = (
action["action_args"]["r"]
if self._scale_slack_on_prediction and action["action"] != "STOP"
else np.linalg.norm(self._prev_position - agent_pos)
)
scaled_slack_reward = self._slack_reward * slack_distance / 0.25
self._prev_position = agent_pos
return min(self._slack_reward, scaled_slack_reward)
def _progress_to_goal(self, task: EmbodiedTask) -> float:
distance_to_goal = task.measurements.measures[
"distance_to_goal"
].get_metric()
distance_to_goal_delta = (
self._previous_distance_to_goal - distance_to_goal
)
if np.isnan(distance_to_goal_delta) or np.isinf(
distance_to_goal_delta
):
l = self._sim.get_agent_state().position
logger.error(
f"\nNaN or inf encountered in distance measure. agent location: {l}",
)
distance_to_goal_delta = -1.0
self._previous_distance_to_goal = distance_to_goal
return self._distance_scalar * distance_to_goal_delta
def update_metric(
self, *args: Any, action: Action, task: EmbodiedTask, **kwargs: Any
) -> None:
reward = self._get_scaled_slack_reward(action)
reward += self._progress_to_goal(task)
reward += (
self._success_reward
* task.measurements.measures["success"].get_metric()
)
self._metric = reward
@staticmethod
def _get_uuid(*args: Any, **kwargs: Any) -> str:
return "waypoint_reward_measure"
@registry.register_measure
class NDTW(Measure):
"""NDTW (Normalized Dynamic Time Warping)
ref: https://arxiv.org/abs/1907.05446
"""
cls_uuid: str = "ndtw"
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
self._sim = sim
self._config = config
self.dtw_func = fastdtw if config.FDTW else dtw
if "{role}" in config.GT_PATH:
self.gt_json = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
with gzip.open(
config.GT_PATH.format(split=config.SPLIT, role=role), "rt"
) as f:
self.gt_json.update(json.load(f))
else:
with gzip.open(
config.GT_PATH.format(split=config.SPLIT), "rt"
) as f:
self.gt_json = json.load(f)
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, episode, **kwargs: Any):
self.locations = []
self.gt_locations = self.gt_json[episode.episode_id]["locations"]
self.update_metric()
def update_metric(self, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position.tolist()
if len(self.locations) == 0:
self.locations.append(current_position)
else:
if current_position == self.locations[-1]:
return
self.locations.append(current_position)
dtw_distance = self.dtw_func(
self.locations, self.gt_locations, dist=euclidean_distance
)[0]
nDTW = np.exp(
-dtw_distance
/ (len(self.gt_locations) * self._config.SUCCESS_DISTANCE)
)
self._metric = nDTW
@registry.register_measure
class SDTW(Measure):
"""SDTW (Success Weighted be nDTW)
ref: https://arxiv.org/abs/1907.05446
"""
cls_uuid: str = "sdtw"
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
task.measurements.check_measure_dependencies(
self.uuid, [NDTW.cls_uuid, Success.cls_uuid]
)
self.update_metric(task=task)
def update_metric(self, *args: Any, task: EmbodiedTask, **kwargs: Any):
ep_success = task.measurements.measures[Success.cls_uuid].get_metric()
nDTW = task.measurements.measures[NDTW.cls_uuid].get_metric()
self._metric = ep_success * nDTW
@registry.register_measure
class TopDownMapVLNCE(Measure):
"""A top down map that optionally shows VLN-related visual information
such as MP3D node locations and MP3D agent traversals.
"""
cls_uuid: str = "top_down_map_vlnce"
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
) -> None:
self._sim = sim
self._config = config
self._step_count = None
self._map_resolution = config.MAP_RESOLUTION
self._previous_xy_location = None
self._top_down_map = None
self._meters_per_pixel = None
self.current_node = ""
with open(self._config.GRAPHS_FILE, "rb") as f:
self._conn_graphs = pickle.load(f)
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def get_original_map(self) -> ndarray:
top_down_map = habitat_maps.get_topdown_map_from_sim(
self._sim,
map_resolution=self._map_resolution,
draw_border=self._config.DRAW_BORDER,
meters_per_pixel=self._meters_per_pixel,
)
self._fog_of_war_mask = None
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = np.zeros_like(top_down_map)
return top_down_map
def reset_metric(
self, *args: Any, episode: Episode, **kwargs: Any
) -> None:
self._scene_id = episode.scene_id.split("/")[-2]
self._step_count = 0
self._metric = None
self._meters_per_pixel = habitat_maps.calculate_meters_per_pixel(
self._map_resolution, self._sim
)
self._top_down_map = self.get_original_map()
agent_position = self._sim.get_agent_state().position
scene_id = episode.scene_id.split("/")[-1].split(".")[0]
a_x, a_y = habitat_maps.to_grid(
agent_position[2],
agent_position[0],
self._top_down_map.shape[0:2],
sim=self._sim,
)
self._previous_xy_location = (a_y, a_x)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = fog_of_war.reveal_fog_of_war(
self._top_down_map,
self._fog_of_war_mask,
np.array([a_x, a_y]),
self.get_polar_angle(),
fov=self._config.FOG_OF_WAR.FOV,
max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST
/ habitat_maps.calculate_meters_per_pixel(
self._map_resolution, sim=self._sim
),
)
if self._config.DRAW_FIXED_WAYPOINTS:
maps.draw_mp3d_nodes(
self._top_down_map,
self._sim,
episode,
self._conn_graphs[scene_id],
self._meters_per_pixel,
)
if self._config.DRAW_SHORTEST_PATH:
shortest_path_points = self._sim.get_straight_shortest_path_points(
agent_position, episode.goals[0].position
)
maps.draw_straight_shortest_path_points(
self._top_down_map,
self._sim,
self._map_resolution,
shortest_path_points,
)
if self._config.DRAW_REFERENCE_PATH:
maps.draw_reference_path(
self._top_down_map,
self._sim,
episode,
self._map_resolution,
self._meters_per_pixel,
)
# draw source and target points last to avoid overlap
if self._config.DRAW_SOURCE_AND_TARGET:
maps.draw_source_and_target(
self._top_down_map,
self._sim,
episode,
self._meters_per_pixel,
)
# MP3D START NODE
self._nearest_node = maps.get_nearest_node(
self._conn_graphs[scene_id], np.take(agent_position, (0, 2))
)
nn_position = self._conn_graphs[self._scene_id].nodes[
self._nearest_node
]["position"]
self.s_x, self.s_y = habitat_maps.to_grid(
nn_position[2],
nn_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
self.update_metric()
def update_metric(self, *args: Any, **kwargs: Any) -> None:
self._step_count += 1
(
house_map,
map_agent_pos,
) = self.update_map(self._sim.get_agent_state().position)
self._metric = {
"map": house_map,
"fog_of_war_mask": self._fog_of_war_mask,
"agent_map_coord": map_agent_pos,
"agent_angle": self.get_polar_angle(),
"bounds": {
k: v
for k, v in zip(
["lower", "upper"],
self._sim.pathfinder.get_bounds(),
)
},
"meters_per_px": self._meters_per_pixel,
}
def get_polar_angle(self) -> float:
agent_state = self._sim.get_agent_state()
# quaternion is in x, y, z, w format
ref_rotation = agent_state.rotation
heading_vector = quaternion_rotate_vector(
ref_rotation.inverse(), np.array([0, 0, -1])
)
phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
z_neg_z_flip = np.pi
return np.array(phi) + z_neg_z_flip
def update_map(self, agent_position: List[float]) -> None:
a_x, a_y = habitat_maps.to_grid(
agent_position[2],
agent_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
# Don't draw over the source point
gradient_color = 15 + min(
self._step_count * 245 // self._config.MAX_EPISODE_STEPS, 245
)
if self._top_down_map[a_x, a_y] != maps.MAP_SOURCE_POINT_INDICATOR:
maps.drawline(
self._top_down_map,
self._previous_xy_location,
(a_y, a_x),
gradient_color,
thickness=int(
self._map_resolution * 1.4 / maps.MAP_THICKNESS_SCALAR
),
style="filled",
)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = fog_of_war.reveal_fog_of_war(
self._top_down_map,
self._fog_of_war_mask,
np.array([a_x, a_y]),
self.get_polar_angle(),
self._config.FOG_OF_WAR.FOV,
max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST
/ habitat_maps.calculate_meters_per_pixel(
self._map_resolution, sim=self._sim
),
)
point_padding = int(0.2 / self._meters_per_pixel)
prev_nearest_node = self._nearest_node
self._nearest_node = maps.update_nearest_node(
self._conn_graphs[self._scene_id],
self._nearest_node,
np.take(agent_position, (0, 2)),
)
if (
self._nearest_node != prev_nearest_node
and self._config.DRAW_MP3D_AGENT_PATH
):
nn_position = self._conn_graphs[self._scene_id].nodes[
self._nearest_node
]["position"]
(prev_s_x, prev_s_y) = (self.s_x, self.s_y)
self.s_x, self.s_y = habitat_maps.to_grid(
nn_position[2],
nn_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
self._top_down_map[
self.s_x
- int(2.0 / 3.0 * point_padding) : self.s_x
+ int(2.0 / 3.0 * | |
testnode_dict is None and candidate_domain.depth >= 4)
else:
get_testnode = (use_auto_iters and testnode_dict is None and (not next_net_buffer))
if get_testnode:
# Use as test node the first picked node with a depth of 4 (this way split constraints are represented)
# If stratifying, it's applied only to the tighter bounding algorithm
testnode_dict = {"domain": candidate_domain.domain, "intermediate_lbs": candidate_domain.lower_all,
"intermediate_ubs": candidate_domain.upper_all}
if out_bounds_dict["parent_init"]:
testnode_dict["pinit"] = candidate_domain.parent_solution
# Bounding improvement estimations for automatic number of bounds iterations, and for stratified bounds
# Automatic number of iterations.
tighter_lb = bab.assess_impr_margin(
bounds_net, candidate_domain.domain, candidate_domain.lower_all, candidate_domain.upper_all,
net_info, intermediate_dict, c_lb=candidate_domain.lower_bound)
net_info["+iters_lb"] = tighter_lb
# Stratified bounding related.
# Postpone evaluation to when domains might be indeed marked as hard.
evaluate_auto_strat = next_net_info and next_net_info["lb_impr"] == -1 and \
((expans_factor**(candidate_domain.easy_away+1))/batch_size >
next_net_info["hard_overhead"])
# if doing autoiters, start autostrat only at the max number of iterations for the looser bounds
auto_iters_check = net_info["max_iters_reached"] or (not use_auto_iters)
if evaluate_auto_strat and auto_iters_check:
# Automatically infer stratification parameters by estimating the hard bounding gain.
if not gurobi:
bounds_net.initialize_from(copy.deepcopy(candidate_domain.parent_solution))
bounds_net.build_model_using_bounds(
candidate_domain.domain, (candidate_domain.lower_all, candidate_domain.upper_all))
next_net_info["net"].initialize_from(copy.deepcopy(candidate_domain.parent_solution))
next_net_info["net"].build_model_using_bounds(
candidate_domain.domain, (candidate_domain.lower_all, candidate_domain.upper_all))
else:
cpu_domain, cpu_intermediate_lbs, cpu_intermediate_ubs = bab.subproblems_to_cpu(
candidate_domain.domain, candidate_domain.lower_all, candidate_domain.upper_all,
squeeze=True)
next_net_info["net"].build_model_using_bounds(
cpu_domain, (cpu_intermediate_lbs, cpu_intermediate_ubs))
bounds_net.build_model_using_bounds(cpu_domain, (cpu_intermediate_lbs, cpu_intermediate_ubs))
if use_auto_iters:
# If using autoiters, need to recompute a LB for the test problem (the tightness might have changed)
looser_lb = bounds_net.compute_lower_bound(node=(-1, 0), counterexample_verification=True)
else:
looser_lb = candidate_domain.lower_bound
tighter_lb = next_net_info["net"].compute_lower_bound(node=(-1, 0), counterexample_verification=True)
lb_impr = (tighter_lb - looser_lb).mean()
print(f"Hard bounding improvement over looser LB: {lb_impr}")
next_net_info["lb_impr"] = lb_impr.cpu()
# get parent's dual solution from the candidate domain
parent_init_stacks.set_stack_parent_entries(candidate_domain.parent_solution, batch_idx)
bounds_net.unbuild()
# Compute branching choices
# TODO: branching will return IndexError in case no ambiguous ReLU is left. Should catch and get the LP solution
branch_start = time.time()
branching_decision_list = brancher.branch(domain_stack, lbs_stacks, ubs_stacks)
branch_time = time.time() - branch_start
print(f"Branching requires {branch_time}[s]")
# DOMAIN SPLITTING.
# Create stacks for the bounding of the split subproblems (duplicates the subdomains, with the two copies for
# the i-th subdomain in position 2*i, 2*i + 1.
domain_stack, lbs_stacks, ubs_stacks = bab.create_split_stacks(
domain_stack, lbs_stacks, ubs_stacks)
# Dict of sets storing for each layer index, the batch entries that are splitting a ReLU there
branching_layer_log = {}
for idx in range(-1, len(lbs_stacks) - 1):
branching_layer_log[idx] = set()
for batch_idx, branching_decision in enumerate(branching_decision_list):
branching_layer_log[branching_decision[0]] |= {2*batch_idx, 2*batch_idx+1}
# Binary branching.
for choice in [0, 1]:
# print(f'splitting decision: {branching_decision} - choice {choice}')
nb_visited_states += 1
# split the domain with the current branching decision
brancher.split_subdomain(
branching_decision, choice, 2*batch_idx + choice, domain_stack, lbs_stacks, ubs_stacks)
print(f"Running Nb states visited: {nb_visited_states}")
print(f"N. infeasible nodes {infeasible_count}")
relu_start = time.time()
# compute the bounds on the batch of splits, at once
dom_ub, dom_lb, dom_ub_point, dom_lb_all, dom_ub_all, dual_solutions, expected_improvement = \
compute_bounds(intermediate_dict, bounds_net, branching_layer_log, domain_stack, lbs_stacks,
ubs_stacks, parent_init_stacks, out_bounds_dict["parent_init"], gurobi_dict, expected_improvement,
testnode_dict=testnode_dict, compute_last_ubs=out_bounds_dict["do_ubs"], net_info=net_info)
# update the global upper bound (if necessary) comparing to the best of the batch
batch_ub, batch_ub_point_idx = torch.min(dom_ub, dim=0)
batch_ub_point = dom_ub_point[batch_ub_point_idx]
if batch_ub < global_ub:
global_ub = batch_ub
global_ub_point = batch_ub_point
# sequentially add all the domains to the queue (ordered list)
batch_global_lb = dom_lb[0]
added_domains = 0
for batch_idx in range(dom_lb.shape[0]):
# print('dom_lb: ', dom_lb[batch_idx])
# print('dom_ub: ', dom_ub[batch_idx])
if dom_lb[batch_idx] == float('inf') or dom_ub[batch_idx] == float('inf') or \
dom_lb[batch_idx] > dom_ub[batch_idx]:
infeasible_count += 1
elif dom_lb[batch_idx] < min(global_ub, decision_bound):
added_domains += 1
c_dom_lb_all = [lb[batch_idx].unsqueeze(0) for lb in dom_lb_all]
c_dom_ub_all = [ub[batch_idx].unsqueeze(0) for ub in dom_ub_all]
c_dual_solutions = dual_solutions.get_stack_entry(batch_idx)
if stratified_bab:
# store statistics that determine when to move to tighter last layer bounding
dom_to_add = ReLUDomain(
dom_lb[batch_idx].unsqueeze(0), dom_ub[batch_idx].unsqueeze(0), c_dom_lb_all,
c_dom_ub_all, parent_solution=c_dual_solutions,
parent_depth=depth_list[batch_idx], c_imp_avg=impr_avg_list[batch_idx],
c_imp=dom_lb[batch_idx].item() - parent_lb_list[batch_idx].item(), dec_thr=decision_bound,
hard_info=next_net_info, domain=domain_stack[batch_idx].unsqueeze(0)
).to_cpu()
else:
dom_to_add = ReLUDomain(
dom_lb[batch_idx].unsqueeze(0), dom_ub[batch_idx].unsqueeze(0),
c_dom_lb_all, c_dom_ub_all, parent_solution=c_dual_solutions,
parent_depth=depth_list[batch_idx], domain=domain_stack[batch_idx].unsqueeze(0)).to_cpu()
# if the problem is hard, add "difficult" domains to the hard queue
if next_net_info and bab.is_difficult_domain(dom_to_add, next_net_info, expansion=expans_factor):
bab.add_domain(dom_to_add, harder_domains)
else:
if next_net_buffer:
# use a buffer so that when the buffer is empty, we can use the hard problem's parent init
bab.add_domain(dom_to_add, harder_domains)
else:
bab.add_domain(dom_to_add, domains)
batch_global_lb = min(dom_lb[batch_idx], batch_global_lb)
expans_factor = max(float(added_domains) / (dom_lb.shape[0]/2), 1.0)
print(f"Batch expansion factor: {expans_factor}")
bound_time = time.time() - relu_start
print('A batch of relu splits requires: ', bound_time)
if next_net_info and "postpone_batches" in next_net_info:
next_net_info["postpone_batches"] -= 1
# Remove domains clearly on the right side of the decision threshold: our goal is to which side of it is the
# minimum, no need to know more for these domains.
prune_value = min(global_ub.cpu() - eps, decision_bound + eps)
domains = bab.prune_domains(domains, prune_value)
# read/write domains from secondary memory if necessary.
domains = bab.dump_domains_to_file(domains, dumped_domain_filelblist, max_domains_cpu, dumped_domain_blocksize)
dumped_domain_filelblist = bab.get_domains_from_file(domains, dumped_domain_filelblist, dumped_domain_blocksize)
# Update global LB.
if len(domains) + len(harder_domains) > 0:
lb_candidate = harder_domains[0] if harder_domains else domains[0]
lb_candidate = min(lb_candidate, domains[0]) if domains else lb_candidate
global_lb = lb_candidate.lower_bound.to(bounds_net_device)
if dumped_domain_filelblist:
dumped_lb_min = min(dumped_domain_filelblist, key=lambda x: x[1])[1].to(bounds_net_device)
global_lb = min(global_lb, dumped_lb_min)
else:
# If we've run out of domains, it means we included no newly splitted domain
global_lb = torch.ones_like(global_lb) * (decision_bound + eps) if batch_global_lb > global_ub \
else batch_global_lb
if harder_domains or next_net_buffer:
harder_domains = bab.prune_domains(harder_domains, prune_value)
# Switch to the harder domains if there's a next net or we're in the transition buffer
if len(domains) == 0 and (next_net_info or next_net_buffer):
domains = harder_domains
harder_domains = []
# Check whether it's worth switching network
if next_net_buffer:
# The buffer has been emptied -- will now initialize from parent (handled automatically via PInit)
print('tighter bounding buffer emptied')
next_net_buffer = False
else:
do_switch, expected_batches = bab.switch_to_hard(domains, next_net_info, batch_size)
if do_switch:
print('shifting to tighter bounding')
# Move to the following net in the provided list, updating the corresponding dictionaries of info
next_net_info["net"].lower_bounds = bounds_net.lower_bounds
next_net_info["net"].upper_bounds = bounds_net.upper_bounds
current_net += 1
bounds_net, net_info, next_net_info, use_auto_iters, batch_size = bab.get_lb_net_info(
out_bounds_dict, current_net, n_stratifications)
if out_bounds_dict["parent_init"]:
# use a buffer so that when the buffer is empty, we can use the hard problem's parent init
next_net_buffer = True
if gurobi:
bab.gurobi_switch_bounding_net(gurobi_dict)
if use_auto_iters:
testnode_dict = None
else:
# Postpone adding to the harder queue for expected_batches batches.
next_net_info["postpone_batches"] = expected_batches
# run attacks
# Try falsification only in the first 50 batch iterations. TODO: better UB heuristic?
if ub_method is not None and global_ub > decision_bound and n_iter < 50:
# Use the 10 best points from the LB initialization amongst the falsification initializers
val, ind = torch.topk(dom_ub, dim=0, k=min(10, dom_ub.size()[0]))
init_tensor = dom_ub_point[ind.squeeze()]
# perform attacks for property falsification
adv_examples, is_adv, scores = ub_method.create_adv_examples(
return_criterion='one', gpu=True, multi_targets=True, init_tensor=init_tensor)
# Update upper bound and its associated point.
attack_ub, attack_point_idx = torch.min(scores, dim=0)
attack_point = adv_examples[attack_point_idx]
if attack_ub < global_ub:
global_ub = attack_ub
global_ub_point = attack_point
if is_adv.sum() > 0:
print("Found a counter-example.")
print(f"Current: lb:{global_lb}\t ub: {global_ub}")
# Stopping criterion
if global_lb >= decision_bound:
break
elif global_ub < decision_bound:
break
bab.join_children(gurobi_dict, timeout)
print(f"Terminated in {time.time() - start_time}[s]; {nb_visited_states} nodes.")
print(f"Infeasible count: {infeasible_count}")
print(f"N. batches: {n_iter}")
bab.delete_dumped_domains(dumped_domain_filelblist)
return global_lb, global_ub, global_ub_point, nb_visited_states
def compute_bounds(intermediate_dict, bounds_net, branching_layer_log, splitted_domain, splitted_lbs,
splitted_ubs, parent_init_stacks, parent_init_flag, gurobi_dict, expected_improvement,
testnode_dict=None, compute_last_ubs=False, net_info=None):
"""
Split domain according to branching decision and compute all the necessary quantities for it.
Splitting on the input domain will never happen as it'd be done on l1-u1, rather than l0-u0 (representing the
conditioned input domain). So conditioning is not problematic, here.
:param intermediate_dict: Dictionary of networks (and info on how to select them) used for intermediate bounds
:param bounds_net: Network used for last bounds
:param branching_layer_log: List of sets storing for each layer index, the set of batch entries that are
splitting a ReLU there (stored like x_idx-1)
:param choice: 0/1 for whether to clip on a blocking/passing ReLU
:param splitted_lbs: list of tensors for the (pre-activation) lower bounds relative to all the activations of the
network, for all the domain batches
:param splitted_ubs:list of tensors for the (pre-activation) upper bounds relative to all the activations of the
network, for all the domain batches
| |
curses.KEY_HOME: {'name': 'scroll home',
'func': self.__first_page},
curses.KEY_UP: {'name': 'prev creature',
'func': self.__view_prev},
curses.KEY_DOWN: {'name': 'next creature',
'func': self.__view_next},
curses.KEY_NPAGE: {'name': 'scroll down',
'func': self.__next_page,
'help': 'Scroll DOWN on the current pane ' +
'which may may be either the ' +
'character pane (on the left) or ' +
'the details pane (on the right)'},
curses.KEY_PPAGE: {'name': 'scroll up',
'func': self.__prev_page,
'help': 'Scroll UP on the current pane which ' +
'may may be either the character ' +
'pane (on the left) or the details ' +
'pane (on the right)'},
curses.KEY_LEFT: {'name': 'scroll char list',
'func': self.__left_pane,
'help': 'Choose the character pane (on the ' +
'left) for scrolling.'},
curses.KEY_RIGHT: {'name': 'scroll char detail',
'func': self.__right_pane,
'help': 'Choose the details pane (on the ' +
'right) for scrolling.'},
ord('a'): {'name': 'add creature',
'func': self.__add_creature,
'help': 'Add a creature to the current group.'},
ord('c'): {'name': 'change creature',
'func': self.__change_creature,
'help': 'Modify the currently selected creature by ' +
'changing attributes, adding or removing' +
'equipment, or changing some other feature.'},
ord('d'): {'name': 'delete creature',
'func': self.__delete_creature,
'help': 'Delete the currently selected creature ' +
'from the current group.'},
ord('e'): {'name': 'equip creature',
'func': self.__equip,
'help': 'Modify the currently selected creature by ' +
'changing attributes, adding or removing' +
'equipment, or changing some other feature.'},
ord('g'): {'name': 'create template group',
'func': self.__create_template_group,
'help': 'Make a new collection of templates (like ' +
'bad guys or space marines).'},
ord('I'): {'name': 'Import character.',
'func': self.__import_creature,
'help': 'Import character into combat accountant.'},
# TODO (now): ruleset-based
ord('s'): {'name': 'list Spells',
'func': self.__list_spells,
'help': 'Display the list of available ' +
'spells.'},
ord('t'): {'name': 'change template group',
'func': self.__change_template_group,
'help': 'Change the group of templates on which ' +
'you will base newly created creatures ' +
'going forward.'},
ord('T'): {'name': 'make into template',
'func': self.__create_template,
'help': 'Convert the currently selected creature ' +
'into a template and add that template into ' +
'the currently selected template group.'},
ord('U'): {'name': 'Update character.',
'func': self.__update_creature,
'help': 'Update character from .GCS file.'},
ord('q'): {'name': 'quit',
'func': self.__quit,
'help': 'Quit changing personnel.'},
})
if creature_type == PersonnelHandler.NPCs:
self._add_to_choice_dict({
ord('p'): {'name': 'NPC joins PCs',
'func': self.NPC_joins_PCs,
'help': 'Make the currently selected NPC join ' +
'the player characters. The NPC will ' +
'be listed in both groups but they will ' +
'both refer to the same creature ' +
'(changing one will change the other).'},
ord('P'): {'name': 'NPC leaves PCs',
'func': self.__NPC_leaves_PCs,
'help': 'If the current NPC has joined the ' +
'party, this will cause him/her to leave ' +
'the party.'},
ord('m'): {'name': 'NPC joins Monsters',
'func': self.NPC_joins_monsters,
'help': 'Make the currently selected NPC join ' +
'one of the groups of monsters. The NPC ' +
'will ' +
'be listed in both groups but they will ' +
'both refer to the same creature ' +
'(changing one will change the other).'},
})
self._window = self._window_manager.get_build_fight_gm_window(
self._choices)
self.__current_pane = PersonnelHandler.CHAR_DETAIL
# Name of templates we'll use to create new creatures.
self.__template_group = None
# The following is a dict of the Fighters/Venues in a group (PCs,
# NPCs, or monster group) sorted by the the Fighters' names (with the
# venue, if it exists, stuffed at the top). The dict is:
# {
# 'data': array of dict found in the data file
# 'obj': array of Fighter/Venue object
# }
# NOTE: [data][n] is the same creature as [obj][n]
self.__critters = None
self.__deleted_critter_count = 0
self.__equipment_manager = ca_equipment.EquipmentManager(
self.world, window_manager)
self.__new_char_name = None
self.__viewing_index = None
if creature_type == PersonnelHandler.NPCs:
self.__group_name = 'NPCs'
self.__existing_group(creature_type)
elif creature_type == PersonnelHandler.PCs:
self.__group_name = 'PCs'
self.__existing_group(creature_type)
else: # creature_type == PersonnelHandler.MONSTERs:
# This is the name of the monsters or 'PCs' that will ultimately
# take these creatures.
self.__group_name = monster_group_name
new_existing = 'existing' # If group_name != None
if self.__group_name is None:
new_existing_menu = [('new monster group', 'new')]
if len(self.world.get_fights()) > 0:
new_existing_menu.append(('existing monster group',
'existing'))
new_existing = None
while new_existing is None:
new_existing, ignore = self._window_manager.menu(
'New or Pre-Existing', new_existing_menu)
if new_existing == 'new':
self.__new_group()
else:
self.__existing_group(creature_type,
monster_group_name)
self.__viewing_index = (0 if self.__critters_contains_critters()
else None)
self._draw_screen()
if not self.__critters_contains_critters():
self.__add_creature()
return
#
# Public Methods
#
def draw_screen(self):
'''
Draws the complete screen for the FightHandler. This is here so a
widget can redraw its parent's screen.
Returns: nothing.
'''
self._draw_screen()
def get_group_name(self):
'''
Returns the name of the group ('PCs', 'NPCs', or the monster group)
that is currently being modified.
'''
return self.__group_name
def get_obj_from_index(self): # Public to support testing
'''
Returns the Fighter/Venue object from the current viewing index into
the __critters list.
'''
if self.__viewing_index is None:
return None
fighter = self.__critters['obj'][self.__viewing_index]
return fighter
def NPC_joins_monsters(self): # Public to support testing
'''
Command ribbon method.
Adds an existing NPC to a monster list (that NPC also stays in the NPC
list). This is useful if an NPC wishes to fight alongside a group of
monsters against the party.
Operates on the currently selected NPC.
Returns: False to exit the current ScreenHandler, True to stay.
'''
# Make sure the person is an NPC
npc = self.get_obj_from_index()
if npc is None:
return True
if npc.group != 'NPCs':
self._window_manager.error(['"%s" not an NPC' % npc.name])
return True
# Select the fight
fight_menu = [(fight_name, fight_name)
for fight_name in self.world.get_fights()]
fight_name, ignore = self._window_manager.menu('Join Which Fight',
fight_menu)
# Make sure the person isn't already in the fight
fight = self.world.get_creature_details_list(fight_name)
if npc.name in fight:
self._window_manager.error(['"%s" already in fight "%s"' %
(npc.name, fight_name)])
return True
fight[npc.name] = {'redirect': 'NPCs'}
self._window.show_creatures(self.__critters['obj'],
self.__new_char_name,
self.__viewing_index)
return True
def NPC_joins_PCs(self): # Public to support testing
'''
Command ribbon method.
Adds an existing NPC to the PC list (that NPC also stays in the NPC
list). This is useful if an NPC wishes to fight alongside the party.
Operates on the currently selected NPC.
Returns: False to exit the current ScreenHandler, True to stay.
'''
npc = self.get_obj_from_index()
if npc is None:
return True
if npc.group != 'NPCs':
self._window_manager.error(['"%s" not an NPC' % npc.name])
return True
if npc.name in self.world.details['PCs']:
self._window_manager.error(['"%s" already a PC' % npc.name])
return True
self.world.details['PCs'][npc.name] = {'redirect': 'NPCs'}
self._window.show_creatures(self.__critters['obj'],
self.__new_char_name,
self.__viewing_index)
return True
def set_viewing_index(self, # Public to support testing.
new_index # int: new viewing index
):
''' Sets the viewing index. Used for testing. '''
self.__viewing_index = new_index
#
# Private and Protected Methods
#
#
# Page navigation methods
#
def __first_page(self,
pane=None, # CHAR_LIST, CHAR_DETAIL,
# CHAR_LIST|CHAR_DETAIL
):
'''
Command ribbon method.
Scrolls to the top of the current pain
Returns: False to exit the current ScreenHandler, True to stay.
'''
if pane is None:
pane = self.__current_pane
if (pane & PersonnelHandler.CHAR_DETAIL) != 0:
self._window.char_detail_home()
if (pane & PersonnelHandler.CHAR_LIST) != 0:
self.__char_index = 0
self._window.char_list_home()
self._draw_screen()
return True
def __import_creature(self):
'''
Command ribbon method.
Imports a character into this program.
Returns: False to exit the current ScreenHandler, True to stay.
'''
# get the filename to import
extension = self.world.ruleset.get_import_file_extension()
filename_window = ca_gui.GetFilenameWindow(self._window_manager)
filename = filename_window.get_filename(extension)
if filename is None:
return True
# actually import the new creature
name, creature = self.world.ruleset.import_creature_from_file(filename)
if creature is None:
return True
# get the creature's name
if name in self.__critters['data']:
self._window_manager.error(
['Creature with name "%s" already exists' % name])
name = None
if name is None:
keep_going = True
while keep_going:
lines, cols = self._window.getmaxyx()
name = self._window_manager.input_box(1, # height
cols-4, # width
'Creature Name')
if name is None:
return True
elif name in self.__critters['data']:
self._window_manager.error(
['Creature with name "%s" already exists' % name])
else:
keep_going = False
# install in the current group
self.__critters['data'][name] = creature
self.__critters['obj'].append(self.world.get_creature(
name,
self.__group_name))
self.__viewing_index = len(self.__critters['obj']) - 1
self.__new_char_name = name
self._window.show_creatures(self.__critters['obj'],
self.__new_char_name,
self.__viewing_index)
return True
def __left_pane(self):
'''
Command ribbon method.
Changes the currently active pane | |
<filename>tensorlayerx/backend/ops/torch_nn.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F
from torch import _VF
from torch.nn import Module
def padding_format(padding):
"""
Checks that the padding format correspond format.
Parameters
----------
padding : str
Must be one of the following:"same", "SAME", "VALID", "valid"
Returns
-------
str "SAME" or "VALID"
"""
if padding in ["SAME", "same"]:
padding = "same"
elif padding in ["VALID", "valid"]:
padding = "valid"
elif padding == None:
padding = None
elif isinstance(padding, tuple) or isinstance(padding, int):
return padding
else:
raise Exception("Unsupported padding: " + str(padding))
return padding
def preprocess_1d_format(data_format, padding):
"""
Checks that the 1-D dataformat format correspond format.
Parameters
----------
data_format : str
Must be one of the following:"channels_last","NWC","NCW","channels_first"
padding : str
Must be one of the following:"same","valid","SAME","VALID"
Returns
-------
str "NWC" or "NCW" and "SAME" or "VALID"
"""
if data_format in ["channels_last", "NWC", "NLC"]:
data_format = "NLC"
elif data_format in ["channels_first", "NCW", "NCL"]:
data_format = "NCL"
elif data_format == None:
data_format = None
else:
raise Exception("Unsupported data format: " + str(data_format))
padding = padding_format(padding)
return data_format, padding
def preprocess_2d_format(data_format, padding):
"""
Checks that the 2-D dataformat format correspond format.
Parameters
----------
data_format : str
Must be one of the following:"channels_last","NHWC","NCHW","channels_first"
padding : str
Must be one of the following:"same","valid","SAME","VALID"
Returns
-------
str "NHWC" or "NCHW" and "SAME" or "VALID"
"""
if data_format in ["channels_last", "NHWC"]:
data_format = "NHWC"
elif data_format in ["channels_first", "NCHW"]:
data_format = "NCHW"
elif data_format == None:
data_format = None
else:
raise Exception("Unsupported data format: " + str(data_format))
padding = padding_format(padding)
return data_format, padding
def preprocess_3d_format(data_format, padding):
"""
Checks that the 3-D dataformat format correspond format.
Parameters
----------
data_format : str
Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first"
padding : str
Must be one of the following:"same","valid","SAME","VALID"
Returns
-------
str "NDHWC" or "NCDHW" and "SAME" or "VALID"
"""
if data_format in ['channels_last', 'NDHWC']:
data_format = 'NDHWC'
elif data_format in ['channels_first', 'NCDHW']:
data_format = 'NCDHW'
elif data_format == None:
data_format = None
else:
raise Exception("Unsupported data format: " + str(data_format))
padding = padding_format(padding)
return data_format, padding
def nchw_to_nhwc(x):
"""
Channels first to channels last
Parameters
----------
x : tensor
channels first tensor data
Returns
-------
channels last tensor data
"""
# if len(x.shape) == 3:
# x = torch.transpose(x, 1, 2)
# elif len(x.shape) == 4:
# x = torch.transpose(x, 1, 2)
# x = torch.transpose(x, 2, 3)
# elif len(x.shape) == 5:
# x = torch.transpose(x, 1, 2)
# x = torch.transpose(x, 2, 3)
# x = torch.transpose(x, 3, 4)
# else:
# raise Exception("Unsupported dimensions")
# return x
return torch.moveaxis(x, 1, -1)
def nhwc_to_nchw(x):
""" # TODO permute x.contiguous
Channles last to channels first
Parameters
----------
x : tensor
channels last tensor data
Returns
-------
channels first tensor data
"""
# if len(x.shape) == 3:
# x = torch.transpose(x, 1, 2)
# elif len(x.shape) == 4:
# x = torch.transpose(x, 2, 3)
# x = torch.transpose(x, 1, 2)
# elif len(x.shape) == 5:
# x = torch.transpose(x, 3, 4)
# x = torch.transpose(x, 2, 3)
# x = torch.transpose(x, 1, 2)
# else:
# raise Exception("Unsupported dimensions")
return torch.moveaxis(x, -1, 1)
class ReLU(object):
def __init__(self):
pass
def __call__(self, x):
return F.relu(x)
def relu(x):
"""
Computes rectified linear: max(features, 0).
Parameters
----------
x : tensor
Must be one of the following types: float32, float64, int32, uint8, int16,
int8, int64, bfloat16, uint16, half, uint32, uint64, qint8.
Returns
-------
A Tensor. Has the same type as features.
"""
return F.relu(x)
class ELU(object):
def __init__(self, alpha=1.0):
self.alpha = alpha
def __call__(self, x):
return F.elu(x, alpha=self.alpha)
def elu(x, alpha=1.0):
"""
Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
](http://arxiv.org/abs/1511.07289)
Parameters
----------
x : tensor
Must be one of the following types: half, bfloat16, float32, float64.
Returns
-------
A Tensor with the same type as features.
"""
return F.elu(x, alpha=alpha)
class ReLU6(object):
def __init__(self):
pass
def __call__(self, x):
return F.relu6(x)
def relu6(x):
"""
Computes Rectified Linear 6: min(max(features, 0), 6).
Parameters
----------
x : tensor
Must be one of the following types: float32, float64, int32, uint8, int16,
int8, int64, bfloat16, uint16, half, uint32, uint64, qint8.
Returns
-------
A Tensor with the same type as features.
"""
return F.relu6(x)
class LeakyReLU(object):
def __init__(self, negative_slope=0.01):
self.negative_slope = negative_slope
def __call__(self, x):
return F.leaky_relu(x, negative_slope=self.negative_slope)
def leaky_relu(x, negative_slope=0.01):
"""
Compute the Leaky ReLU activation function.
Parameters
----------
x : tensor
representing preactivation values. Must be one of the following types:
float16, float32, float64, int32, int64.
Returns
-------
The activation value.
"""
return F.leaky_relu(x, negative_slope=negative_slope)
class Softplus(object):
def __init__(self):
pass
def __call__(self, x):
return F.softplus(x)
class Tanh(object):
def __init__(self):
pass
def __call__(self, x):
return F.tanh(x)
class Sigmoid(object):
def __init__(self):
pass
def __call__(self, x):
return torch.sigmoid(x)
def sigmoid(x):
"""
Computes sigmoid of x element-wise.
Parameters
----------
x : tensor
A Tensor with type float16, float32, float64, complex64, or complex128.
Returns
-------
A Tensor with the same type as x.
"""
return F.sigmoid(x)
class Softmax(object):
def __init__(self, axis=None):
self.axis = axis
def __call__(self, x):
return F.softmax(x, dim=self.axis)
def softmax(logits, axis=None):
"""
Computes softmax activations.
Parameters
----------
logits : tensor
Must be one of the following types: half, float32, float64.
axis : int
The dimension softmax would be performed on. The default is -1 which indicates the last dimension.
Returns
-------
A Tensor. Has the same type and shape as logits.
"""
return F.softmax(logits, axis)
class GeLU(object):
def __init__(self, approximate=False):
self.approximate = approximate
def __call__(self, x):
return F.gelu(x)
def gelu(x, approximate=False):
return F.gelu(x)
class Dropout(object):
def __init__(self, p, seed=0):
self.p = p
self.seed = seed
def __call__(self, inputs):
return F.dropout(inputs, p=self.p)
class BiasAdd(object):
"""
Adds bias to value.
Parameters
----------
x : tensor
A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128.
bias : tensor
Must be the same type as value unless value is a quantized type,
in which case a different quantized type may be used.
Returns
-------
A Tensor with the same type as value.
"""
def __init__(self, data_format='channels_last'):
super(BiasAdd, self).__init__()
if data_format in ['channels_first', 'NCL', 'NCW', 'NCHW', 'NCDHW']:
self.data_format = 'channels_first'
elif data_format in ['channels_last', 'NLC', 'NWC', 'NHWC', 'NDHWC']:
self.data_format = 'channels_last'
else:
raise ("Unsupported data format: " + str(data_format))
def __call__(self, x, bias):
if len(x.shape) > 2 and self.data_format == 'channels_first':
x = nchw_to_nhwc(x)
outputs = torch.add(x, bias)
if len(x.shape) > 2 and self.data_format == 'channels_first':
outputs = nhwc_to_nchw(outputs)
return outputs
def bias_add(x, bias, data_format=None):
"""
Adds bias to value.
Parameters
----------
x : tensor
A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128.
bias : tensor
Must be the same type as value unless value is a quantized type,
in which case a different quantized type may be used.
data_format : A string.
'N...C' and 'NC...' are supported.
name : str
A name for the operation (optional).
Returns
-------
A Tensor with the same type as value.
"""
add_obj = BiasAdd(data_format=data_format)
return add_obj(x, bias)
class Conv1D(object):
def __init__(self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, groups=1):
self.stride = stride
self.dilations = dilations
self.groups = groups
self.data_format, self.padding = preprocess_1d_format(data_format, padding)
def __call__(self, input, filters):
if self.data_format == 'NLC':
input = nhwc_to_nchw(input)
if self.padding == 'same':
out = self.conv1d_same_padding(input, filters)
else:
out = F.conv1d(input, filters, stride=self.stride, padding=self.padding,
dilation=self.dilations, groups=self.groups)
if self.data_format == 'NLC':
out = nchw_to_nhwc(out)
return out
def conv1d_same_padding(self, input, filters):
rows_odd, padding_rows = same_padding(input, filters, self.stride, 1)
if rows_odd:
input = F.pad(input, [0, int(rows_odd)], 'replicate')
return F.conv1d(input, filters, stride=self.stride, padding=(padding_rows // 2), groups=self.groups)
def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None):
"""
Computes a 1-D convolution given 3-D input and filter tensors.
Parameters
----------
input : tensor
A 3D Tensor. Must be of type float16, float32, or float64
filters : tensor
A 3D Tensor. Must have the same type as input.
stride : int of list
An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step.
padding : string
| |
self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_map, self.hess_inv, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_map, self.hess_inv, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.hess_inv = self.shrink_init(self.mu_map, self.hess_inv)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_map, self.hess_inv)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def logq_fr_el2o(self, z, mu, Sigma):
"""Logq for full-rank Gaussian family."""
return jnp.reshape(jax.scipy.stats.multivariate_normal.logpdf(z, mu, Sigma), ())
def get_map_laplace(self):
"""Find the MAP+Laplace solution for the model."""
if self.init_EL2O == 'adam':
self.optimization_start()
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(self.start)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_adam(i, opt_state, opt_update, get_params)
target_diff = np.abs((value - np.float64(self.adam_logp(floatX(update_params)))) / max(value, np.float64(self.adam_logp(floatX(update_params)))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
vars = get_default_varnames(self.model.unobserved_RVs, include_transformed=True)
self.map_dict = {var.name: value for var, value in zip(vars, self.model.fastfn(vars)(self.bij.rmap(update_params.squeeze())))}
else:
self.map_dict = find_MAP(start=self.start, model=self.model, method=self.scipy_map_method)
self.mu_map = np.array([])
for v in self.variables:
self.mu_map = np.append(self.mu_map, self.map_dict[v.name])
self.mu_map = self.mu_map.squeeze()
if self.mu_map.size == 1:
self.mu_map = np.array([self.mu_map])
self.Sigma_map = np.array([1.0 / self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size))]).reshape(-1, 1)
else:
self.Sigma_map = np.linalg.inv(self.target_hessian(self.mu_map.reshape(-1, self.mu_map.size)))
print(f'MAP estimate = {self.map_dict}')
print(f'Sigma estimate at MAP = {self.Sigma_map}')
def run_el2o(self):
"""Run the EL2O algorithm, assuming you've got the MAP+Laplace solution."""
self.mu_k = self.mu_map
self.Sigma_k = self.Sigma_map
self.EL2O = [1e10, 1]
self.zk = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=len(self.mu_k))
Niter = 1
while (self.EL2O[-1] > self.absEL2O
and abs((self.EL2O[-1] - self.EL2O[-2]) / self.EL2O[-1]) > self.fracEL2O
and Niter < self.maxiter_EL2O):
self.zk = np.vstack((self.zk, np.random.multivariate_normal(self.mu_k, self.Sigma_k)))
Nk = len(self.zk)
if not self.use_hess_EL2O:
temp1 = 0
temp2 = 0
for k in range(Nk):
temp1 += np.outer(self.zk[k, :] - np.mean(self.zk, axis=0), self.zk[k, :] - np.mean(self.zk, axis=0))
temp2 += np.outer(self.zk[k, :] - np.mean(self.zk, axis=0), self.target_dlogp(self.zk[k, :].reshape(-1, self.zk[k, :].size)))
if self.mean_field_EL2O:
self.Sigma_k = -1 * np.diag(temp2) / np.diag(temp1)
self.Sigma_k = 1.0 / self.Sigma_k
self.Sigma_k = self.Sigma_k * np.eye(self.Sigma_k.size)
elif not self.mean_field_EL2O:
if temp1.size == 1:
self.Sigma_k = -1 * temp2 / temp1
self.Sigma_k = np.array([1.0 / self.Sigma_k]).reshape(-1, 1)
else:
self.Sigma_k = -1 * np.matmul(np.linalg.inv(temp1), temp2)
self.Sigma_k = np.linalg.inv(self.Sigma_k)
elif self.use_hess_EL2O:
self.Sigma_k = np.linalg.inv(np.sum(self.target_hessian(self.zk), axis=0) / Nk)
if self.mean_field_EL2O:
self.Sigma_k = np.diag(self.Sigma_k) * np.eye(len(self.Sigma_k))
temp = 0
for j in range(Nk):
if self.zk[j, :].size == 1:
joint_logp = np.array([self.target_dlogp(self.zk[j, :].reshape(-1, self.zk[j, :].size))])
else:
joint_logp = self.target_dlogp(self.zk[j, :].reshape(-1, self.zk[j, :].size))
temp += np.matmul(self.Sigma_k, joint_logp)
self.mu_k = np.mean(self.zk, axis=0) + temp / Nk
self.EL2O = np.append(self.EL2O, (1 / (len(self.zk)) * (np.sum((self.target_logp(self.zk) -
jax.vmap(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k), in_axes=0)(self.zk))**2) +
np.sum((self.target_dlogp(self.zk) -
jax.vmap(jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)), in_axes=0)(self.zk))**2)
)))
Niter += 1
print(f'Final EL2O mu = {self.mu_k}')
print(f'Final EL2O Sigma = {self.Sigma_k}')
self.weighted_samples = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.Sigma_k = self.shrink_init(self.mu_k, self.Sigma_k)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def run_el2o_optim(self):
"""Runs EL2O, optimizing for the elements of the Cholesky decomposition of the covariance."""
self.mu_k = self.mu_map
self.Sigma_k = self.Sigma_map
self.L_k = cholesky(self.Sigma_k, lower=True)
self.tril_ind = np.tril_indices(len(self.L_k))
if self.mean_field_EL2O:
self.L_k = np.sqrt(np.diag(self.Sigma_k)) * np.eye(len(self.L_k))
self.tril_ind = np.diag_indices_from(self.L_k)
print(len(self.L_k))
#self.const_k = 0
self.EL2O = [1e10, 1]
Ndim = len(self.mu_k)
Niter = 1
while (self.EL2O[-1] > self.absEL2O
and abs((self.EL2O[-1] - self.EL2O[-2]) / self.EL2O[-1]) > self.fracEL2O
and Niter < self.maxiter_EL2O):
print(f"EL2O iteration: {Niter}")
if Niter < 3:
self.zk = np.random.multivariate_normal(self.mu_k, np.matmul(self.L_k, self.L_k.T), size=self.EL2O_draws)
else:
self.zk = np.vstack((self.zk,
np.random.multivariate_normal(self.mu_k, np.matmul(self.L_k, self.L_k.T),
size=self.EL2O_draws)))
#self.zk = self.zk.reshape(-1, Ndim)
eloargs0 = np.copy(self.mu_k)
eloargs0 = np.append(eloargs0, self.L_k[self.tril_ind])
#eloargs0 = np.append(eloargs0, self.const_k)
#eloargs0 = self.L_k[self.tril_ind]
if self.EL2O_optim_method == 'adam':
print('Using Adam for ELO optimization.')
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(eloargs0)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_elo_adam(i, opt_state, opt_update, get_params, self.zk)
target_diff = np.abs((value - np.float64(self.elo_cost(update_params.squeeze(), self.zk))) /
max(value, np.float64(self.elo_cost(update_params.squeeze(), self.zk))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
opt_result = update_params.squeeze()
self.mu_k = opt_result[0:Ndim]
self.L_k[self.tril_ind] = opt_result[Ndim:]
#self.L_k[self.tril_ind] = opt_result
#self.const_k = opt_result[-1]
self.EL2O = np.append(self.EL2O, self.elo_cost(opt_result.squeeze(), self.zk))
print(f'EL2O: {self.elo_cost(opt_result.squeeze(), self.zk)}')
elif self.EL2O_optim_method != 'adam':
opt_result = minimize(self.elo_cost, x0=eloargs0,
options={'maxiter': self.optim_iter, 'ftol': self.ftol, 'gtol': self.gtol},
method=self.EL2O_optim_method, args=(self.zk,),
jac=np.asarray(jax.grad(self.elo_cost)))
self.mu_k = opt_result.x[0:Ndim]
self.L_k[self.tril_ind] = opt_result.x[Ndim:]
#self.L_k[self.tril_ind] = opt_result.x
#self.const_k = opt_result.x[-1]
self.EL2O = np.append(self.EL2O, self.elo_cost(opt_result.x, self.zk))
print(f'EL2O: {self.elo_cost(opt_result.x, self.zk)}')
Niter += 1
self.Sigma_k = np.matmul(self.L_k, self.L_k.T)
print(f'Final EL2O mu = {self.mu_k}')
print(f'Final EL2O Sigma = {self.Sigma_k}')
#Sigma_lam = self.Sigma_k + lam * np.diag(self.Sigma_k) * np.eye(len(self.Sigma_k))
self.weighted_samples = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log( np.mean(( np.exp(self.posterior_logp) - np.exp(self.log_weight_pq_den/3+self.log_evidence_pq) )**2 ))
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.Sigma_k = self.shrink_init(self.mu_k, self.Sigma_k)
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
'''
self.weighted_samples = np.random.multivariate_normal(self.mu_k, self.Sigma_k, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, self.mu_k.squeeze(), self.Sigma_k, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k),
jax.grad(lambda x: self.logq_fr_el2o(x, self.mu_k, self.Sigma_k)))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
'''
self.all_logq = np.array([])
self.nf_models = []
def elo_cost(self, eloargs, z):
"""EL2O cost function, used for EL2O optimization."""
_mu_k = eloargs[0:z.shape[1]]
_L_k = jnp.zeros((z.shape[1], z.shape[1]))
_L_k = jax.ops.index_update(_L_k, self.tril_ind, eloargs[z.shape[1]:])
#_L_k = jax.ops.index_update(_L_k, self.tril_ind, eloargs)
#_const_k = eloargs[-1]
'''
elo = (1 / len(z)) * (jnp.sum((jnp.asarray(self.target_logp(z)) -
jax.vmap(lambda x: self.logq_fr_el2o(x, _mu_k, jnp.matmul(_L_k, _L_k.T)), in_axes=0)(z)
- _const_k)**2) +
jnp.sum((jnp.asarray(self.target_dlogp(z)) -
jax.vmap(jax.grad(lambda x: self.logq_fr_el2o(x, _mu_k, jnp.matmul(_L_k, _L_k.T))), in_axes=0)(z))**2
))
'''
elo = (1 / len(z)) * jnp.sum((jnp.asarray(self.target_dlogp(z)) -
jax.vmap(jax.grad(lambda x: self.logq_fr_el2o(x, _mu_k, jnp.matmul(_L_k, _L_k.T))), in_axes=0)(z))**2)
return elo
def update_elo_adam(self, step, opt_state, opt_update, get_params, z):
"""Adam update step for EL2O optimization."""
params = np.asarray(get_params(opt_state)).astype(np.float64)
value = np.asarray(self.elo_cost(params.squeeze(), z))
grads = jax.grad(self.elo_cost)(params.squeeze(), z)
opt_state = opt_update(step, grads, opt_state)
update_params = np.asarray(get_params(opt_state)).astype(np.float64)
return value, opt_state, update_params
def run_advi(self):
"""Runs mean-field ADVI for initialization."""
if self.init_method == 'advi':
advi = pm.fit(method='advi', model=self.model)
elif self.init_method == 'fullrank_advi':
advi = pm.fit(method='fullrank_advi', model=self.model)
advi_samples = advi.sample(self.init_draws)
print(f'ADVI mean = {advi.mean.eval()}')
print(f'ADVI cov = {advi.cov.eval()}')
population = []
for i in range(self.init_draws):
point = Point({v.name: advi_samples[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.weighted_samples = np.array(floatX(population))
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, advi.mean.eval(),
advi.cov.eval(), allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*multivariate_normal.logpdf(self.nf_samples, advi.mean.eval(),
advi.cov.eval(), allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, advi.mean.eval(),
advi.cov.eval(), allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weights()
self.init_weights_cleanup(lambda x: self.logq_fr_el2o(x, advi.mean.eval(), advi.cov.eval()),
jax.grad(lambda x: self.logq_fr_el2o(x, advi.mean.eval(), advi.cov.eval())))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.shrink_init(advi.mean.eval(), advi.cov.eval())
self.all_logq = np.array([])
self.nf_models = []
def fit_nf(self, num_draws, bw_search=True):
"""Fit the NF model for a given iteration after initialization."""
if bw_search:
bw_var_weights = []
bw_pq_weights = []
bw_nf_models = | |
# coding=UTF-8
import numpy as np
import math
from .utils import *
from .OrthoMaterial import *
from .IsoMaterial import *
## Class interfacing the solver with 3D FEM calculix computation to obtain the cross section parameter from a composite box
class CompositeBox:
## Composite box constructor; It contains 4 CompositePlate; modify the chord of left and right wall
def __init__(self,Left,Right,Up,Down,Width,Height,OffsetY=0.,OffsetZ=0.):
## Left wall of the box (CompositePlate)
self.Left = Left
## Right wall of the box (CompositePlate)
self.Right = Right
## Up wall of the box (CompositePlate)
self.Up = Up
## Down wall of the box (CompositePlate)
self.Down = Down
## Width of the box (m)
self.Width = Width
## Height height of the box (m)
self.Height = Height
## Y coordinate of the box center in the Frame B
self.OffsetY = OffsetY
## Z coordinate of the box center in the Frame B
self.OffsetZ = OffsetZ
self.Up.Chord = Width
self.Down.Chord = Width
self.Left.Chord = Height - self.Up.GetTotThickness()- self.Down.GetTotThickness()
self.Right.Chord = Height - self.Up.GetTotThickness()- self.Down.GetTotThickness()
def GetOffsets(self):
return [self.OffsetY,self.OffsetZ]
def GetWidth(self):
return self.Width
def GetHeight(self):
return self.Height
## Create the input file for cgx preprocessor
#@param TypeElem the type of finite element used for the computation. Supported : he20r, he20, he8i, he8, pe15 (see ccx doc)
#@param NbElemX the number of finite element in the beam direction (for a constant cross section, 1 element is enough)
#@param NBElemYZ the number of finite element along the wall directions
#@param NBElemPly the number of finite element in a composite ply
def CreateFbdFile(self,TypeElem,NbElemX,NbElemYZ,NbElemPly):
if(TypeElem in ["he20r","he20"]):
divX = 2*NbElemX
divYZ = 2*NbElemYZ
divPly = 2*NbElemPly
elif(TypeElem in ["he8i","he8","pe15"]):
divX = NbElemX
divYZ = NbElemYZ
divPly = NbElemPly
else:
raise RuntimeError("The type of element for the plate homogeneisation is not recognize")
# determination of RVE dimensions
if min(len(self.Left.Layup),len(self.Right.Layup),len(self.Up.Layup),len(self.Down.Layup)) ==0:
raise RuntimeError("One of the box sides has no ply defined")
TL = self.Left.Layup[0].Thickness
TR = self.Right.Layup[0].Thickness
TU = self.Up.Layup[0].Thickness
TD = self.Down.Layup[0].Thickness
H = self.Height
W = self.Width
n=[]
n.append(len(self.Up.Layup))
n.append(len(self.Left.Layup))
n.append(len(self.Down.Layup))
n.append(len(self.Right.Layup))
if min(TL,TR,TU,TD)>0:
pass
else:
raise RuntimeError("One of the box sides has a negative or zero thickness")
Lx = round(divX*0.25*(TL+TR+TU+TD)/NbElemPly/self.Width,16)
Ly = 1.0
Lz = round(H/W,16)
file=open("Box.fbd","w")
# creation of the left up reference point
file.write("SETO up\nSETO up0\nSETO left\n")
file.write("PNT p1 {0} {1} {2}\n".format(-0.5*Lx,0.5+self.OffsetY/W,0.5*Lz-self.Up.GetTotThickness()/W+self.OffsetZ/W))
file.write("SETC left\n")
#creation of the upper side
# dividing the upper side in accordance to the ply division of left and right side
for i in range(n[1]): #left side
file.write("SWEP up{2} up{3} tra 0 {0} 0 {1}\n".format(-round(self.Left.Layup[n[1]-1-i].GetThickness()/W,16),divPly,i,i+1))
file.write("SWEP up{2} up{3} tra 0 {0} 0 {1}\n".format(round(-1+(self.Left.GetTotThickness()+self.Right.GetTotThickness())/W,16),divYZ,n[1],n[1]+1))
for i in range(n[3]): #right side
file.write("SWEP up{2} up{3} tra 0 {0} 0 {1}\n".format(-round(self.Right.Layup[i].GetThickness()/W,16),divPly,n[1]+1+i,n[1]+2+i))
file.write("SETO ply1up\n")
file.write("SWEP up plan1up tra 0 0 {0} {1}\n".format(self.Up.Layup[0].GetThickness()/W,divPly))
file.write("SWEP ply1up ply1up tra {0} 0 0 {1}\n".format(Lx,divX))
file.write("SETR ply1up p all\nSETR ply1up l all\nSETR ply1up s all\n")
file.write("SETC ply1up\n")
for i in range (n[0]-1):
file.write("SETO ply{0}up\n".format(i+2))
file.write("SWEP plan{0}up plan{1}up tra 0 0 {2} {3}\n".format(i+1,i+2,round(self.Up.Layup[i+1].GetThickness()/W,16),divPly))
file.write("SWEP ply{0}up ply{1}up tra {2} 0 0 {3}\n".format(i+2,i+2,Lx,divX))
file.write("SETR ply{0}up p all\nSETR ply{0}up l all\nSETR ply{0}up s all\n".format(i+2))
file.write("SETC ply{0}up\n".format(i+2))
#creation of the left side
file.write("SETC up\nSETO left\n")
file.write("SWEP left left tra 0 0 {0} {1}\n".format(round((-H+self.Up.GetTotThickness()+self.Down.GetTotThickness())/W,16),divYZ))
file.write("SETO ply{0}left\n".format(n[1]))
file.write("SWEP left plan{2}left tra 0 {0} 0 {1}\n".format(round(-self.Left.Layup[n[1]-1].GetThickness()/W,16),divPly,n[1]))
file.write("SWEP ply{2}left ply{2}left tra {0} 0 0 {1}\n".format(Lx,divX,n[1]))
file.write("SETC ply{0}left\n".format(n[1]))
file.write("SETR ply{0}left p all\nSETR ply{0}left l all\nSETR ply{0}left s all\n".format(n[1]))
for i in range (n[1]-1):
file.write("SETO ply{0}left\n".format(n[1]-1-i))
file.write("SWEP plan{0}left plan{1}left tra 0 {2} 0 {3}\n".format(n[1]-i,n[1]-1-i,round(-self.Left.Layup[n[1]-2-i].GetThickness()/W,16),divPly))
file.write("SWEP ply{0}left ply{0}left tra {1} 0 0 {2}\n".format(n[1]-1-i,Lx,divX))
file.write("SETC ply{0}left\n".format(n[1]-1-i))
# creation of the right down reference point
file.write("SETO down\nSETO down0\nSETO right\n")
file.write("PNT p2 {0} {1} {2}\n".format(-0.5*Lx,-0.5+self.OffsetY/W,-0.5*Lz+self.Down.GetTotThickness()/W+self.OffsetZ/W))
file.write("SETC right\n")
#creation of the down side
for i in range(n[3]): #right side
file.write("SWEP down{2} down{3} tra 0 {0} 0 {1}\n".format(round(self.Right.Layup[n[3]-1-i].GetThickness()/W,16),divPly,i,i+1))
file.write("SWEP down{2} down{3} tra 0 {0} 0 {1}\n".format(round(1-(self.Left.GetTotThickness()+self.Right.GetTotThickness())/W,16),divYZ,n[3],n[3]+1))
for i in range(n[1]): #left side
file.write("SWEP down{2} down{3} tra 0 {0} 0 {1}\n".format(round(self.Left.Layup[i].GetThickness()/W,16),divPly,n[3]+1+i,n[3]+2+i))
# ~ file.write("SWEP down down tra 0 1 0 {0}\n".format(divYZ))
file.write("SETO ply1down\n")
file.write("SWEP down plan1down tra 0 0 {0} {1}\n".format(-self.Down.Layup[0].GetThickness()/W,divPly))
file.write("SWEP ply1down ply1down tra {0} 0 0 {1}\n".format(Lx,divX))
file.write("SETR ply1down p all\nSETR ply1down l all\nSETR ply1down s all\n")
file.write("SETC ply1down\n")
for i in range (n[2]-1):
file.write("SETO ply{0}down\n".format(i+2))
file.write("SWEP plan{0}down plan{1}down tra 0 0 {2} {3}\n".format(i+1,i+2,round(-self.Down.Layup[i+1].GetThickness()/W,16),divPly))
file.write("SWEP ply{0}down ply{1}down tra {2} 0 0 {3}\n".format(i+2,i+2,Lx,divX))
file.write("SETR ply{0}down p all\nSETR ply{0}down l all\nSETR ply{0}down s all\n".format(i+2))
file.write("SETC ply{0}down\n".format(i+2))
# ~ #creation of the right side
file.write("SETC down\nSETO right\n")
file.write("SWEP right right tra 0 0 {0} {1}\n".format(round((H-self.Up.GetTotThickness()-self.Down.GetTotThickness())/W,16),divYZ))
file.write("SETO ply{0}right\n".format(n[3]))
file.write("SWEP right plan{2}right tra 0 {0} 0 {1}\n".format(round(self.Right.Layup[n[3]-1].GetThickness()/W,16),divPly,n[3]))
file.write("SWEP ply{2}right ply{2}right tra {0} 0 0 {1}\n".format(Lx,divX,n[3]))
file.write("SETC ply{0}right\n".format(n[3]))
file.write("SETR ply{0}right p all\nSETR ply{0}right l all\nSETR ply{0}right s all\n".format(n[3]))
for i in range (n[3]-1):
file.write("SETO ply{0}right\n".format(n[3]-1-i))
file.write("SWEP plan{0}right plan{1}right tra 0 {2} 0 {3}\n".format(n[3]-i,n[3]-1-i,round(self.Right.Layup[n[3]-2-i].GetThickness()/W,16),divPly))
file.write("SWEP ply{0}right ply{0}right tra {1} 0 0 {2}\n".format(n[3]-1-i,Lx,divX))
file.write("SETC ply{0}right\n".format(n[3]-1-i))
# Merging and meshing
file.write('MERG p all\n')
file.write('MERG l all\n')
file.write('MERG s all\n')
file.write("ELTY all "+TypeElem+"\n")
file.write("MESH all\n")
file.write("SEND all abq\n")
for i in range (n[0]):
file.write("SEND ply{0}up abq nam\n".format(i+1))
for i in range (n[1]):
file.write("SEND ply{0}left abq nam\n".format(i+1))
for i in range (n[2]):
file.write("SEND ply{0}down abq nam\n".format(i+1))
for i in range (n[0]):
file.write("SEND ply{0}right abq nam\n".format(i+1))
file.close
## Create the input file for ccx solver
#@param Stress True = output the stress tensor for each elementary load case, False = no stress output
#@param PlaneSection True = warping of the cross section is not allowed, False = warping of the cross section is allowed
#@param Disp Determine the set of elementary load cases : 0=all the 4 load cases, 1 = traction, 2 = warping, 3 = bending span-wise, 4 = bending chord-wise.
def CreateInpFile(self,Stress=False,PlaneSection=False,Disp=0):
# 0 = Up; 1 = Left; 2 = Down; 3 = Right
Materials = self.Up.Materials
Orientations = self.Up.Orientations
NbPly = []
NbPly.append(len(self.Up.Layup))
NbPly.append(len(self.Left.Layup))
NbPly.append(len(self.Down.Layup))
NbPly.append(len(self.Right.Layup))
for i in range(len(self.Left.Materials)):
if self.Left.Materials[i] not in Materials:
Materials.append(self.Left.Materials[i])
for i in range(len(self.Left.Orientations)):
if self.Left.Orientations[i] not in Orientations:
Orientations.append(self.Left.Orientations[i])
for i in range(len(self.Down.Materials)):
if self.Down.Materials[i] not in Materials:
Materials.append(self.Down.Materials[i])
for i in range(len(self.Down.Orientations)):
if self.Down.Orientations[i] not in Orientations:
Orientations.append(self.Down.Orientations[i])
for i in range(len(self.Right.Materials)):
if self.Right.Materials[i] not in Materials:
Materials.append(self.Right.Materials[i])
for i in range(len(self.Right.Orientations)):
if self.Right.Orientations[i] not in Orientations:
Orientations.append(self.Right.Orientations[i])
file=open("Box.inp","w")
file.write("*include,input=all.msh\n")
for i in range(NbPly[0]):
file.write("*include,input=ply{0}up.nam\n".format(i+1))
for i in range(NbPly[1]):
file.write("*include,input=ply{0}left.nam\n".format(i+1))
for i in range(NbPly[2]):
file.write("*include,input=ply{0}down.nam\n".format(i+1))
for i in range(NbPly[3]):
file.write("*include,input=ply{0}right.nam\n".format(i+1))
file.write("*include,input=periodic.equ\n")
# materials
i=0
for Mat in Materials:
i=i+1
if isinstance(Mat,OrthoMaterial):
El = Mat.GetOrtho()[0]
Et = Mat.GetOrtho()[1]
Nult = Mat.GetOrtho()[2]
Nutl = round(float(Et)/El*Nult,6)
Glt = Mat.GetOrtho()[3]
Gtl = round(float(Et)/(2+2*Nutl),0)
file.write("*material,name=MAT{0}\n".format(i))
file.write("*elastic,type=ENGINEERING CONSTANTS\n")
file.write("{0},{1},{2},{3},{4},{5},{6},{7}\n{8}\n".format(El,Et,Et,Nult,Nult,Nutl,Glt,Glt,Gtl))
elif isinstance(Mat,IsoMaterial):
E = Mat.GetIso()[0]
Nu = Mat.GetIso()[1]
file.write("*material,name=MAT{0}\n".format(i))
file.write("*elastic,type=ISO\n")
file.write("{0},{1}\n".format(E,Nu))
# ~ # orientation
i=0
for Ori in Orientations:
i=i+1
alpha = np.pi/180*Ori
cos = round(math.cos(alpha),6)
sin = round(math.sin(alpha),6)
file.write("*orientation,name=Ori{0}up\n".format(i))
file.write("{0},{1},{2},{3},{4},{5}\n".format(cos,sin,0.,-sin,cos,0.))
file.write("*orientation,name=Ori{0}left\n".format(i))
file.write("{0},{1},{2},{3},{4},{5}\n".format(cos,0.,sin,-sin,0.,cos))
file.write("*orientation,name=Ori{0}down\n".format(i))
file.write("{0},{1},{2},{3},{4},{5}\n".format(cos,-sin,0.,-sin,-cos,0.))
file.write("*orientation,name=Ori{0}right\n".format(i))
file.write("{0},{1},{2},{3},{4},{5}\n".format(cos,0.,-sin,-sin,0.,-cos))
# elements affectation
for i in range(NbPly[0]): #side up
iMat = Materials.index(self.Up.Layup[i].GetMaterial())+1
iOri = Orientations.index(self.Up.Layup[i].GetOrientation())+1
file.write("*solid section, material=MAT{0},elset=Eply{1}up".format(iMat,i+1))
if isinstance(Materials[iMat-1],OrthoMaterial):
file.write(",orientation=Ori{0}up".format(iOri))
file.write("\n")
for i in range(NbPly[1]): #side left
iMat = Materials.index(self.Left.Layup[i].GetMaterial())+1
iOri = Orientations.index(self.Left.Layup[i].GetOrientation())+1
file.write("*solid section, material=MAT{0},elset=Eply{1}left".format(iMat,i+1))
if isinstance(Materials[iMat-1],OrthoMaterial):
file.write(",orientation=Ori{0}left".format(iOri))
file.write("\n")
for i in range(NbPly[2]): #side down
iMat = Materials.index(self.Down.Layup[i].GetMaterial())+1
iOri = Orientations.index(self.Down.Layup[i].GetOrientation())+1
file.write("*solid section, material=MAT{0},elset=Eply{1}down".format(iMat,i+1))
if isinstance(Materials[iMat-1],OrthoMaterial):
file.write(",orientation=Ori{0}down".format(iOri))
file.write("\n")
for i in range(NbPly[3]): #side right
iMat = Materials.index(self.Right.Layup[i].GetMaterial())+1
iOri = Orientations.index(self.Right.Layup[i].GetOrientation())+1
file.write("*solid section, material=MAT{0},elset=Eply{1}right".format(iMat,i+1))
if isinstance(Materials[iMat-1],OrthoMaterial):
file.write(",orientation=Ori{0}right".format(iOri))
file.write("\n")
if PlaneSection:
file.write("*boundary\nnxl,1,1\n\n")
#~ # elementary beam stress loading for flexibility matrix computation
if Disp == 0:
if Stress:
file.write("*step\n*static\n*cload,OP=NEW\nnstrain,1,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*node file,NSET=nxl\nS\n*end step\n\n")
file.write("*step\n*static\n*cload,OP=NEW\nncurv,1,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*node file,NSET=nxl\nS\n*end step\n\n")
file.write("*step\n*static\n*cload,OP=NEW\nncurv,2,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*node file,NSET=nxl\nS\n*end step\n\n")
file.write("*step\n*static\n*cload,OP=NEW\nncurv,3,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*node file,NSET=nxl\nS\n*end step\n\n")
else:
file.write("*step\n*static\n*cload,OP=NEW\nnstrain,1,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*end step\n\n")
file.write("*step\n*static\n*cload,OP=NEW\nncurv,1,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*end step\n\n")
file.write("*step\n*static\n*cload,OP=NEW\nncurv,2,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*end step\n\n")
file.write("*step\n*static\n*cload,OP=NEW\nncurv,3,1.\n*node print,NSET=nstrain\nU\n*node print,NSET=ncurv\nU\n*end step\n\n")
elif Disp == 1:
file.write("*step\n*static\n*cload,OP=NEW\nnstrain,1,1.\n*node file,NSET=Nall\nU\n*end step\n\n")
elif Disp == 2:
file.write("*step\n*static\n*cload,OP=NEW\nncurv,1,1.\n*node file,NSET=Nall\nU\n*end step\n\n")
elif Disp == 3:
file.write("*step\n*static\n*cload,OP=NEW\nncurv,2,2.\n*node file,NSET=Nall\nU\n*end | |
import operator
import numba
from numba import types, typing
from numba.typing.templates import (signature, AbstractTemplate, infer,
ConcreteTemplate, AttributeTemplate, bound_function, infer_global)
from numba.extending import typeof_impl, lower_cast
from numba.extending import type_callable, box, unbox, NativeValue
from numba.extending import models, register_model, infer_getattr
from numba.extending import lower_builtin, overload_method, overload
from numba.targets.imputils import (impl_ret_new_ref, impl_ret_borrowed,
iternext_impl, RefType)
from hpat.str_ext import string_type, gen_unicode_to_std_str, gen_std_str_to_unicode
from numba import cgutils
from llvmlite import ir as lir
import llvmlite.binding as ll
from . import hdict_ext
from hpat.utils import unliteral_all
ll_voidp = lir.IntType(8).as_pointer()
class ByteVecType(types.Opaque):
def __init__(self):
super(ByteVecType, self).__init__(
name='byte_vec')
byte_vec_type = ByteVecType()
register_model(ByteVecType)(models.OpaqueModel)
class DictType(types.Opaque):
def __init__(self, key_typ, val_typ):
self.key_typ = key_typ
self.val_typ = val_typ
super(DictType, self).__init__(
name='DictType{}{}'.format(key_typ, val_typ))
@property
def key(self):
return self.key_typ, self.val_typ
@property
def iterator_type(self):
return DictKeyIteratorType(self.key_typ, self.val_typ)
def is_precise(self):
return self.key_typ.is_precise() and self.val_typ.is_precise()
elem_types = [
types.int8,
types.int16,
types.int32,
types.int64,
types.uint8,
types.uint16,
types.uint32,
types.uint64,
types.boolean,
types.float32,
types.float64,
string_type
]
def typ_str_to_obj(typ_str):
if typ_str == types.boolean:
return "types.boolean"
if typ_str == string_type:
return "string_type"
return "types.{}".format(typ_str)
def _add_dict_symbols(key_str, val_str):
# init dict object
exec("ll.add_symbol('dict_{0}_{1}_init', hdict_ext.dict_{0}_{1}_init)".format(key_str, val_str))
# setitem
exec("ll.add_symbol('dict_{0}_{1}_setitem', hdict_ext.dict_{0}_{1}_setitem)".format(key_str, val_str))
# getitem
exec("ll.add_symbol('dict_{0}_{1}_getitem', hdict_ext.dict_{0}_{1}_getitem)".format(key_str, val_str))
# in
exec("ll.add_symbol('dict_{0}_{1}_in', hdict_ext.dict_{0}_{1}_in)".format(key_str, val_str))
# print
exec("ll.add_symbol('dict_{0}_{1}_print', hdict_ext.dict_{0}_{1}_print)".format(key_str, val_str))
# get
exec("ll.add_symbol('dict_{0}_{1}_get', hdict_ext.dict_{0}_{1}_get)".format(key_str, val_str))
# pop
exec("ll.add_symbol('dict_{0}_{1}_pop', hdict_ext.dict_{0}_{1}_pop)".format(key_str, val_str))
# keys
exec("ll.add_symbol('dict_{0}_{1}_keys', hdict_ext.dict_{0}_{1}_keys)".format(key_str, val_str))
# min
exec("ll.add_symbol('dict_{0}_{1}_min', hdict_ext.dict_{0}_{1}_min)".format(key_str, val_str))
# max
exec("ll.add_symbol('dict_{0}_{1}_max', hdict_ext.dict_{0}_{1}_max)".format(key_str, val_str))
# not_empty
exec("ll.add_symbol('dict_{0}_{1}_not_empty', hdict_ext.dict_{0}_{1}_not_empty)".format(key_str, val_str))
for key_typ in elem_types:
for val_typ in elem_types:
k_obj = typ_str_to_obj(key_typ)
v_obj = typ_str_to_obj(val_typ)
key_str = str(key_typ)
val_str = str(val_typ)
_add_dict_symbols(key_str, val_str)
# create types
exec("dict_{}_{}_type = DictType({}, {})".format(key_str, val_str, k_obj, v_obj))
exec_format_line = "dict_{0}_{1}_init = types.ExternalFunction('dict_{0}_{1}_init', dict_{0}_{1}_type())"
exec(exec_format_line.format(key_str, val_str))
dict_byte_vec_int64_type = DictType(byte_vec_type, types.int64)
dict_byte_vec_int64_init = types.ExternalFunction('dict_byte_vec_int64_init', dict_byte_vec_int64_type())
_add_dict_symbols('byte_vec', 'int64')
ll.add_symbol('byte_vec_init', hdict_ext.byte_vec_init)
ll.add_symbol('byte_vec_set', hdict_ext.byte_vec_set)
ll.add_symbol('byte_vec_free', hdict_ext.byte_vec_free)
ll.add_symbol('byte_vec_resize', hdict_ext.byte_vec_resize)
byte_vec_init = types.ExternalFunction('byte_vec_init', byte_vec_type(types.int64, types.voidptr))
byte_vec_set = types.ExternalFunction(
'byte_vec_set',
types.void(
byte_vec_type,
types.int64,
types.voidptr,
types.int64))
byte_vec_resize = types.ExternalFunction('byte_vec_resize', types.void(byte_vec_type, types.int64))
byte_vec_free = types.ExternalFunction('byte_vec_free', types.void(byte_vec_type))
class MultiMapType(types.Opaque):
def __init__(self, key_typ, val_typ):
self.key_typ = key_typ
self.val_typ = val_typ
super(MultiMapType, self).__init__(
name='MultiMapType{}{}'.format(key_typ, val_typ))
@property
def key(self):
return self.key_typ, self.val_typ
def is_precise(self):
return self.key_typ.is_precise() and self.val_typ.is_precise()
register_model(MultiMapType)(models.OpaqueModel)
class MultiMapRangeIteratorType(types.SimpleIteratorType):
def __init__(self, key_typ, val_typ):
self.key_typ = key_typ
self.val_typ = val_typ
yield_type = val_typ
super(MultiMapRangeIteratorType, self).__init__(
'MultiMapRangeIteratorType{}{}'.format(key_typ, val_typ), yield_type)
@property
def iterator_type(self):
return self
@property
def key(self):
return self.key_typ, self.val_typ
def is_precise(self):
return self.key_typ.is_precise() and self.val_typ.is_precise()
multimap_int64_range_iterator_type = MultiMapRangeIteratorType(types.intp, types.intp)
register_model(MultiMapRangeIteratorType)(models.OpaqueModel)
multimap_int64_type = MultiMapType(types.int64, types.int64)
multimap_int64_init = types.ExternalFunction(
'multimap_int64_init', multimap_int64_type())
multimap_int64_insert = types.ExternalFunction(
'multimap_int64_insert',
types.void(multimap_int64_type, types.int64, types.int64))
multimap_int64_equal_range = types.ExternalFunction(
'multimap_int64_equal_range',
multimap_int64_range_iterator_type(multimap_int64_type, types.int64))
# store the iterator pair type in same storage and avoid repeated alloc
multimap_int64_equal_range_alloc = types.ExternalFunction(
'multimap_int64_equal_range_alloc', multimap_int64_range_iterator_type())
multimap_int64_equal_range_dealloc = types.ExternalFunction(
'multimap_int64_equal_range_dealloc',
types.void(multimap_int64_range_iterator_type))
multimap_int64_equal_range_inplace = types.ExternalFunction(
'multimap_int64_equal_range_inplace',
multimap_int64_range_iterator_type(multimap_int64_type, types.int64,
multimap_int64_range_iterator_type))
ll.add_symbol('multimap_int64_init', hdict_ext.multimap_int64_init)
ll.add_symbol('multimap_int64_insert', hdict_ext.multimap_int64_insert)
ll.add_symbol('multimap_int64_equal_range', hdict_ext.multimap_int64_equal_range)
ll.add_symbol('multimap_int64_equal_range_alloc', hdict_ext.multimap_int64_equal_range_alloc)
ll.add_symbol('multimap_int64_equal_range_dealloc', hdict_ext.multimap_int64_equal_range_dealloc)
ll.add_symbol('multimap_int64_equal_range_inplace', hdict_ext.multimap_int64_equal_range_inplace)
ll.add_symbol('multimap_int64_it_is_valid', hdict_ext.multimap_int64_it_is_valid)
ll.add_symbol('multimap_int64_it_get_value', hdict_ext.multimap_int64_it_get_value)
ll.add_symbol('multimap_int64_it_inc', hdict_ext.multimap_int64_it_inc)
@lower_builtin('getiter', MultiMapRangeIteratorType)
def iterator_getiter(context, builder, sig, args):
it, = args
# return impl_ret_borrowed(context, builder, sig.return_type, it)
return it
@lower_builtin('iternext', MultiMapRangeIteratorType)
@iternext_impl(RefType.UNTRACKED)
def iternext_listiter(context, builder, sig, args, result):
ll_bool = context.get_value_type(types.bool_) # lir.IntType(1)?
# is valid
fnty = lir.FunctionType(ll_bool, [ll_voidp])
it_is_valid = builder.module.get_or_insert_function(fnty, name="multimap_int64_it_is_valid")
# get value
val_typ = context.get_value_type(sig.args[0].val_typ)
fnty = lir.FunctionType(val_typ, [ll_voidp])
get_value = builder.module.get_or_insert_function(fnty, name="multimap_int64_it_get_value")
# increment
fnty = lir.FunctionType(lir.VoidType(), [ll_voidp])
inc_it = builder.module.get_or_insert_function(fnty, name="multimap_int64_it_inc")
range_it, = args
# it != range.second
is_valid = builder.call(it_is_valid, [range_it])
result.set_valid(is_valid)
with builder.if_then(is_valid):
# it->second
val = builder.call(get_value, [range_it])
result.yield_(val)
builder.call(inc_it, [range_it])
# XXX: needs Numba #3014 resolved
# @overload("in")
# def in_dict(key_typ, dict_typ):
# def f(k, dict_int):
# return dict_int_int_in(dict_int, k)
# return f
# XXX possible overload bug
# @overload(operator.setitem)
# def setitem_dict(dict_typ, key_typ, val_typ):
# def f(k, dict_int):
# return dict_int_int_in(dict_int, k)
# return f
@infer
class InDict(AbstractTemplate):
key = "in"
def generic(self, args, kws):
_, cont = args
if isinstance(cont, DictType):
return signature(types.boolean, cont.key_typ, cont)
@infer_global(operator.contains)
class InDictOp(AbstractTemplate):
def generic(self, args, kws):
# contains operator reverses the args
cont, _ = args
if isinstance(cont, DictType):
return signature(types.boolean, cont, cont.key_typ)
dict_int_int_type = DictType(types.intc, types.intc)
dict_int32_int32_type = DictType(types.int32, types.int32)
class DictIntInt(object):
def __new__(cls, *args):
return {}
class DictInt32Int32(object):
def __new__(cls, *args):
return {}
@typeof_impl.register(DictIntInt)
def typeof_dict_int(val, c):
return dict_int_int_type
@typeof_impl.register(DictInt32Int32)
def typeof_dict_int32(val, c):
return dict_int32_int32_type
@type_callable(DictIntInt)
def type_dict_int(context):
def typer():
return dict_int_int_type
return typer
@type_callable(DictInt32Int32)
def type_dict_int32(context):
def typer():
return dict_int32_int32_type
return typer
@infer_global(operator.setitem)
class SetItemDict(AbstractTemplate):
def generic(self, args, kws):
dict_t, _, _ = args
if isinstance(dict_t, DictType):
return signature(types.none, dict_t, dict_t.key_typ, dict_t.val_typ)
@infer_global(operator.getitem)
class GetItemDict(AbstractTemplate):
key = operator.getitem
def generic(self, args, kws):
dict_t, _ = args
if isinstance(dict_t, DictType):
return signature(dict_t.val_typ, dict_t, dict_t.key_typ)
@infer
class PrintDictIntInt(ConcreteTemplate):
key = "print_item"
cases = [signature(types.none, dict_int_int_type),
signature(types.none, dict_int32_int32_type)]
@infer_getattr
class DictAttribute(AttributeTemplate):
key = DictType
@bound_function("dict.get")
def resolve_get(self, dict, args, kws):
assert not kws
assert len(args) == 2
return signature(args[1], *unliteral_all(args))
@bound_function("dict.pop")
def resolve_pop(self, dict, args, kws):
assert not kws
return signature(dict.val_typ, *unliteral_all(args))
@bound_function("dict.keys")
def resolve_keys(self, dict, args, kws):
assert not kws
return signature(DictKeyIteratorType(dict.key_typ, dict.val_typ))
register_model(DictType)(models.OpaqueModel)
@box(DictType)
def box_dict(typ, val, c):
"""
"""
# interval = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
# lo_obj = c.pyapi.float_from_double(interval.lo)
# hi_obj = c.pyapi.float_from_double(interval.hi)
class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(DictIntInt))
res = c.pyapi.call_function_objargs(class_obj, (val,))
# c.pyapi.decref(lo_obj)
# c.pyapi.decref(hi_obj)
c.pyapi.decref(class_obj)
return res
class DictKeyIteratorType(types.Opaque):
def __init__(self, key_typ, val_typ):
self.key_typ = key_typ
self.val_typ = val_typ
super(DictKeyIteratorType, self).__init__(
'DictKeyIteratorType{}{}'.format(key_typ, val_typ))
dict_key_iterator_int_int_type = DictKeyIteratorType(types.intp, types.intp)
dict_key_iterator_int32_int32_type = DictKeyIteratorType(
types.int32, types.int32)
register_model(DictKeyIteratorType)(models.OpaqueModel)
@infer_global(min)
@infer_global(max)
class MinMaxDict(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 1 and isinstance(args[0], DictKeyIteratorType):
return signature(args[0].key_typ, *unliteral_all(args))
# dict_int_int_in = types.ExternalFunction("dict_int_int_in", types.boolean(dict_int_int_type, types.intp))
@lower_builtin(DictIntInt)
def impl_dict_int_int(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [])
fn = builder.module.get_or_insert_function(fnty, name="dict_int_int_init")
return builder.call(fn, [])
@lower_builtin(operator.setitem, DictType, types.Any, types.Any)
def setitem_dict(context, builder, sig, args):
_, key_typ, val_typ = sig.args
dct, key, val = args
fname = "dict_{}_{}_setitem".format(key_typ, val_typ)
if key_typ == string_type:
key_typ = types.voidptr
key = gen_unicode_to_std_str(context, builder, key)
if val_typ == string_type:
val_typ = types.voidptr
val = gen_unicode_to_std_str(context, builder, val)
fnty = lir.FunctionType(lir.VoidType(),
[lir.IntType(8).as_pointer(),
context.get_value_type(key_typ),
context.get_value_type(val_typ)])
fn = builder.module.get_or_insert_function(fnty, name=fname)
return builder.call(fn, [dct, key, val])
@lower_builtin("print_item", dict_int_int_type)
def print_dict(context, builder, sig, args):
# pyapi = context.get_python_api(builder)
# strobj = pyapi.unserialize(pyapi.serialize_object("hello!"))
# pyapi.print_object(strobj)
# pyapi.decref(strobj)
# return context.get_dummy_value()
fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="dict_int_int_print")
return builder.call(fn, args)
@lower_builtin("dict.get", DictType, types.intp, types.intp)
def lower_dict_get(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(64), [lir.IntType(
8).as_pointer(), lir.IntType(64), lir.IntType(64)])
fn = builder.module.get_or_insert_function(fnty, name="dict_int_int_get")
return builder.call(fn, args)
@lower_builtin(operator.getitem, DictType, types.Any)
def lower_dict_getitem(context, builder, sig, args):
dict_typ, key_typ = sig.args
dct, key = args
val_typ = dict_typ.val_typ
fname = "dict_{}_{}_getitem".format(key_typ, val_typ)
if key_typ == string_type:
key_typ = types.voidptr
key = gen_unicode_to_std_str(context, builder, key)
ll_val_typ = context.get_value_type(val_typ)
if val_typ == string_type:
ll_val_typ = context.get_value_type(types.voidptr)
fnty = lir.FunctionType(ll_val_typ,
[lir.IntType(8).as_pointer(), context.get_value_type(key_typ)])
fn = builder.module.get_or_insert_function(fnty, name=fname)
val = builder.call(fn, [dct, key])
if val_typ == string_type:
val = gen_std_str_to_unicode(context, builder, val)
return val
@lower_builtin("dict.pop", DictType, types.intp)
def lower_dict_pop(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(
64), [lir.IntType(8).as_pointer(), lir.IntType(64)])
fn = builder.module.get_or_insert_function(fnty, name="dict_int_int_pop")
return builder.call(fn, args)
@lower_builtin("dict.keys", dict_int_int_type)
def lower_dict_keys(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [
lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="dict_int_int_keys")
return builder.call(fn, args)
@lower_builtin(min, dict_key_iterator_int_int_type)
def lower_dict_min(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(64), [lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="dict_int_int_min")
return builder.call(fn, args)
@lower_builtin(max, dict_key_iterator_int_int_type)
def lower_dict_max(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(64), [lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(fnty, name="dict_int_int_max")
return builder.call(fn, args)
@lower_builtin("in", types.Any, DictType)
def lower_dict_in(context, builder, sig, args):
key_typ, dict_typ = sig.args
key, dct = args
fname = "dict_{}_{}_in".format(key_typ, dict_typ.val_typ)
if key_typ == string_type:
key_typ = types.voidptr
key = gen_unicode_to_std_str(context, builder, key)
fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer(),
context.get_value_type(key_typ), ])
fn = builder.module.get_or_insert_function(fnty, name=fname)
val = builder.call(fn, [dct, key])
if dict_typ.val_typ == string_type:
val = gen_std_str_to_unicode(context, builder, val)
return val
@lower_builtin(operator.contains, DictType, types.Any)
def lower_dict_in_op(context, builder, sig, args):
dict_typ, key_typ = sig.args
dct, key = args
fname = "dict_{}_{}_in".format(key_typ, dict_typ.val_typ)
if key_typ == string_type:
key_typ = types.voidptr
key = gen_unicode_to_std_str(context, builder, key)
fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer(),
context.get_value_type(key_typ), ])
fn = builder.module.get_or_insert_function(fnty, name=fname)
return builder.call(fn, [dct, key])
@lower_cast(dict_int_int_type, types.boolean)
def dict_empty(context, builder, fromty, toty, val):
fnty = lir.FunctionType(lir.IntType(1), [lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(
fnty, name="dict_int_int_not_empty")
return builder.call(fn, (val,))
# ------ int32 versions ------
@lower_builtin(DictInt32Int32)
def impl_dict_int32_int32(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [])
fn = builder.module.get_or_insert_function(
fnty, name="dict_int32_int32_init")
return builder.call(fn, [])
# @lower_builtin(operator.setitem, DictType, types.int32, types.int32)
# def setitem_dict_int32(context, builder, sig, args):
# fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(
# 8).as_pointer(), lir.IntType(32), lir.IntType(32)])
# fn = builder.module.get_or_insert_function(
# fnty, name="dict_int32_int32_setitem")
# return builder.call(fn, args)
@lower_builtin("print_item", dict_int32_int32_type)
def print_dict_int32(context, builder, sig, args):
# pyapi = context.get_python_api(builder)
# strobj = pyapi.unserialize(pyapi.serialize_object("hello!"))
# pyapi.print_object(strobj)
# pyapi.decref(strobj)
# return context.get_dummy_value()
fnty = lir.FunctionType(lir.VoidType(), [lir.IntType(8).as_pointer()])
fn = builder.module.get_or_insert_function(
fnty, name="dict_int32_int32_print")
return builder.call(fn, args)
@lower_builtin("dict.get", DictType, types.int32, types.int32)
def lower_dict_get_int32(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(32), [lir.IntType(
8).as_pointer(), lir.IntType(32), lir.IntType(32)])
fn = builder.module.get_or_insert_function(
fnty, name="dict_int32_int32_get")
return builder.call(fn, args)
# @lower_builtin(operator.getitem, DictType, types.int32)
# def lower_dict_getitem_int32(context, builder, sig, args):
# fnty = lir.FunctionType(lir.IntType(
# 32), [lir.IntType(8).as_pointer(), lir.IntType(32)])
# fn = builder.module.get_or_insert_function(
# fnty, name="dict_int32_int32_getitem")
# return builder.call(fn, args)
@lower_builtin("dict.pop", DictType, types.int32)
def lower_dict_pop_int32(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(
32), [lir.IntType(8).as_pointer(), lir.IntType(32)])
fn = builder.module.get_or_insert_function(
fnty, name="dict_int32_int32_pop")
return builder.call(fn, args)
@lower_builtin("dict.keys", dict_int32_int32_type)
def lower_dict_keys_int32(context, builder, sig, args):
fnty = lir.FunctionType(lir.IntType(8).as_pointer(), [
lir.IntType(8).as_pointer()])
| |
import os
import time
import client.api
import client.models
import psutil
from mamba import description, before, after, it
from expects import *
from expects.matchers import Matcher
from common import Config, Service
from common.helper import (make_dynamic_results_config,
check_modules_exists,
get_network_dynamic_results_fields,
network_generator_model,
network_server_model)
from common.matcher import (has_location,
has_json_content_type,
raise_api_exception,
be_valid_network_server,
be_valid_network_generator,
be_valid_network_generator_result,
be_valid_dynamic_results)
CONFIG = Config(os.path.join(os.path.dirname(__file__),
os.environ.get('MAMBA_CONFIG', 'config.yaml')))
def clear_network_instances(api):
try:
for s in api.list_network_servers():
api.delete_network_server(s.id)
except AttributeError:
pass
try:
for gen in api.list_network_generators():
if gen.running:
api.stop_network_generator(gen.id)
api.delete_network_generator(gen.id)
except AttributeError:
pass
try:
for res in api.list_network_generator_results():
api.delete_network_generator_result(res.id)
except AttributeError:
pass
def get_interface_by_address(addr):
for iface, v in psutil.net_if_addrs().items():
for item in v:
if item.address == addr:
return iface
def is_nonzero_op_result(op_result):
return (op_result.ops_target != 0 and
op_result.ops_actual != 0 and
op_result.bytes_target != 0 and
op_result.bytes_actual != 0 and
op_result.latency_total != 0)
def wait_for_nonzero_result(api, result_id, mode='rw', timeout=5.0):
sleep_time = .1
elapsed_time = 0
while elapsed_time <= timeout:
result = api.get_network_generator_result(result_id)
expect(result).to(be_valid_network_generator_result)
valid = ((not 'r' in mode or is_nonzero_op_result(result.read)) and
(not 'w' in mode or is_nonzero_op_result(result.write)))
if valid:
return result
time.sleep(sleep_time)
elapsed_time += sleep_time
raise AssertionError('Failed waiting for nonzero %s result. %s' % (mode, str(result)))
def wait_for_nonzero_server_result(api, server_id, timeout=5.0):
sleep_time = 0.1
stop_time = time.time() + timeout
while time.time() <= stop_time:
server = api.get_network_server(server_id)
expect(server).to(be_valid_network_server)
if server.stats.bytes_received > 0:
return server
time.sleep(sleep_time)
raise AssertionError('Failed waiting for nonzero server result')
with description('Network Generator Module', 'network') as self:
with shared_context('network_module'):
with before.all:
self._process = self._service.start()
self._api = client.api.NetworkGeneratorApi(self._service.client())
if not check_modules_exists(self._service.client(), 'network'):
self.skip()
with after.all:
try:
self._process.terminate()
self._process.wait()
except AttributeError:
pass
with description('Network Servers'):
with description('/network/servers'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/network/servers', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "GET, POST"}))
with context('POST'):
with shared_context('create server'):
with before.all:
self._result = self._api.create_network_server_with_http_info(
self._model, _return_http_data_only=False)
with after.all:
clear_network_instances(self._api)
with it('created'):
expect(self._result[1]).to(equal(201))
with it('has valid Location header'):
expect(self._result[2]).to(has_location('/network/servers/' + self._result[0].id))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid server'):
expect(self._result[0]).to(be_valid_network_server)
with it('has same config'):
if (not self._model.id):
self._model.id = self._result[0].id
self._model.stats = self._result[0].stats
expect(self._result[0]).to(equal(self._model))
with description('with empty ID'):
with before.all:
self._model = network_server_model(self._api.api_client, interface=self._ip4_server_interface)
with included_context('create server'):
with it('random ID assigned'):
expect(self._result[0].id).not_to(be_empty)
with description('with udp protocol'):
with before.all:
self._model = network_server_model(self._api.api_client, protocol='udp', interface=self._ip4_server_interface)
with included_context('create server'):
pass
with description('with specified ID'):
with before.all:
self._model = network_server_model(self._api.api_client, id='some-specified-id', interface=self._ip4_server_interface)
with included_context('create server'):
pass
with description('unknown protocol'):
with it('bad request (400)'):
model = network_server_model(
self._api.api_client, protocol='foo', interface=self._ip4_server_interface)
expr = lambda: self._api.create_network_server(model)
expect(expr).to(raise_api_exception(400))
with description('duplicate port'):
with after.all:
clear_network_instances(self._api)
with it('bad request (400)'):
model = network_server_model(
self._api.api_client, id = 'some-id-1', interface=self._ip4_server_interface)
self._api.create_network_server(model)
model = network_server_model(
self._api.api_client, id = 'some-id-2', interface=self._ip4_server_interface)
expr = lambda: self._api.create_network_server(model)
expect(expr).to(raise_api_exception(400))
with context('GET'):
with before.all:
model = network_server_model(self._api.api_client, interface=self._ip4_server_interface)
self._servers = []
for a in range(3):
model.port = 3357+a
self._servers.append(self._api.create_network_server(model))
self._result = self._api.list_network_servers_with_http_info(
_return_http_data_only=False)
with after.all:
clear_network_instances(self._api)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('return list'):
expect(self._result[0]).not_to(be_empty)
for gen in self._result[0]:
expect(gen).to(be_valid_network_server)
with description('/network/servers/{id}'):
with before.all:
model = network_server_model(self._api.api_client, interface=self._ip4_server_interface)
server = self._api.create_network_server(model)
expect(server).to(be_valid_network_server)
self._server = server
with context('GET'):
with description('by existing ID'):
with before.all:
self._result = self._api.get_network_server_with_http_info(
self._server.id, _return_http_data_only=False)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('server object'):
expect(self._result[0]).to(be_valid_network_server)
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.get_network_server('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.get_network_server('bad_id')
expect(expr).to(raise_api_exception(400))
with context('DELETE'):
with description('by existing ID'):
with it('removed (204)'):
result = self._api.delete_network_server_with_http_info(
self._server.id, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('not found (404)'):
expr = lambda: self._api.get_network_server(self._server.id)
expect(expr).to(raise_api_exception(404))
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.delete_network_server('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.delete_network_server('bad_id')
expect(expr).to(raise_api_exception(400))
with description('Network Generators'):
with description('/network/generators'):
with context('PUT'):
with it('returns 405'):
expect(lambda: self._api.api_client.call_api('/network/generators', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "GET, POST"}))
with context('POST'):
with shared_context('create generator'):
with before.all:
self._result = self._api.create_network_generator_with_http_info(
self._model, _return_http_data_only=False)
with after.all:
clear_network_instances(self._api)
with it('created (201)'):
expect(self._result[1]).to(equal(201))
with it('has valid Location header'):
expect(self._result[2]).to(has_location('/network/generators/' + self._result[0].id))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid generator'):
expect(self._result[0]).to(be_valid_network_generator)
with it('has same config'):
if (not self._model.id):
self._model.id = self._result[0].id
expect(self._result[0]).to(equal(self._model))
with it('ratio empty'):
expect(self._result[0].config.ratio).to(be_none)
with description("with ratio"):
with before.all:
generator = network_generator_model(self._api.api_client, interface=self._ip4_client_interface)
generator.config.ratio = client.models.BlockGeneratorReadWriteRatio()
generator.config.ratio.reads = 1
generator.config.ratio.writes = 1
self._result = self._api.create_network_generator_with_http_info(generator)
with after.all:
clear_network_instances(self._api)
with it('created (201)'):
expect(self._result[1]).to(equal(201))
with it('has a valid network generator'):
expect(self._result[0]).to(be_valid_network_generator)
with it('ratio configured'):
expect(self._result[0].config.ratio).not_to(be_none)
expect(self._result[0].config.ratio.reads).to(be(1))
expect(self._result[0].config.ratio.writes).to(be(1))
with description('with empty ID'):
with before.all:
self._model = network_generator_model(self._api.api_client, interface=self._ip4_client_interface)
with included_context('create generator'):
with it('random ID assigned'):
expect(self._result[0].id).not_to(be_empty)
with description('with udp protocol'):
with before.all:
self._model = network_generator_model(self._api.api_client, protocol='udp', interface=self._ip4_client_interface)
with included_context('create generator'):
pass
with description('with specified ID'):
with before.all:
self._model = network_generator_model(self._api.api_client, id='some-specified-id', interface=self._ip4_client_interface)
with included_context('create generator'):
pass
with description('unknown protocol'):
with it('bad request (400)'):
model = network_generator_model(
self._api.api_client, protocol='foo', interface=self._ip4_client_interface)
expr = lambda: self._api.create_network_generator(model)
expect(expr).to(raise_api_exception(400))
with context('GET'):
with before.all:
model = network_generator_model(self._api.api_client, interface=self._ip4_client_interface)
self._g8s = [self._api.create_network_generator(model) for a in range(3)]
self._result = self._api.list_network_generators_with_http_info(
_return_http_data_only=False)
with after.all:
clear_network_instances(self._api)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('return list'):
expect(self._result[0]).not_to(be_empty)
for gen in self._result[0]:
expect(gen).to(be_valid_network_generator)
with description('/network/generators/{id}'):
with before.all:
model = network_generator_model(self._api.api_client, interface=self._ip4_client_interface)
g7r = self._api.create_network_generator(model)
expect(g7r).to(be_valid_network_generator)
self._g7r = g7r
with context('GET'):
with description('by existing ID'):
with before.all:
self._result = self._api.get_network_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('generator object'):
expect(self._result[0]).to(be_valid_network_generator)
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.get_network_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.get_network_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with context('DELETE'):
with description('by existing ID'):
with it('removed (204)'):
result = self._api.delete_network_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('not found (404)'):
expr = lambda: self._api.get_network_generator(self._g7r.id)
expect(expr).to(raise_api_exception(404))
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.delete_network_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.delete_network_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with description('/network/generators/{id}/start'):
with before.all:
model = network_generator_model(self._api.api_client, interface=self._ip4_client_interface)
g7r = self._api.create_network_generator(model)
expect(g7r).to(be_valid_network_generator)
self._g7r = g7r
with after.all:
clear_network_instances(self._api)
with context('POST'):
with description('by existing ID'):
with before.all:
self._result = self._api.start_network_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
with it('is not running'):
expect(self._g7r.running).to(be_false)
with it('started (201)'):
expect(self._result[1]).to(equal(201))
with it('has valid Location header'):
expect(self._result[2]).to(has_location('/network/generator-results/' + self._result[0].id))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid result'):
expect(self._result[0]).to(be_valid_network_generator_result)
expect(self._result[0].active).to(be_true)
expect(self._result[0].generator_id).to(equal(self._g7r.id))
with it('is running'):
g7r = self._api.get_network_generator(self._g7r.id)
expect(g7r).to(be_valid_network_generator)
expect(g7r.running).to(be_true)
with description('by existing ID with Dynamic Results'):
with before.all:
self._api.stop_network_generator(self._g7r.id)
dynamic = make_dynamic_results_config(
get_network_dynamic_results_fields())
self._result = self._api.start_network_generator_with_http_info(
self._g7r.id, dynamic_results=dynamic, _return_http_data_only=False)
with it('is not running'):
expect(self._g7r.running).to(be_false)
with it('started (201)'):
expect(self._result[1]).to(equal(201))
with it('has valid Location header'):
expect(self._result[2]).to(has_location('/network/generator-results/' + self._result[0].id))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid result'):
expect(self._result[0]).to(be_valid_network_generator_result)
expect(self._result[0].active).to(be_true)
expect(self._result[0].generator_id).to(equal(self._g7r.id))
expect(self._result[0].dynamic_results).to(be_valid_dynamic_results)
with it('is running'):
g7r = self._api.get_network_generator(self._g7r.id)
expect(g7r).to(be_valid_network_generator)
expect(g7r.running).to(be_true)
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.start_network_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.start_network_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with description('/network/generators/{id}/stop'):
with before.all:
model = network_generator_model(self._api.api_client, interface=self._ip4_client_interface)
g7r = self._api.create_network_generator(model)
expect(g7r).to(be_valid_network_generator)
start_result = self._api.start_network_generator_with_http_info(g7r.id)
expect(start_result[1]).to(equal(201))
g7r = self._api.get_network_generator(g7r.id)
expect(g7r).to(be_valid_network_generator)
self._g7r = g7r
with after.all:
clear_network_instances(self._api)
with context('POST'):
with description('by existing ID'):
with it('is running'):
expect(self._g7r.running).to(be_true)
with it('stopped (204)'):
result = self._api.stop_network_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('is not running'):
g7r = self._api.get_network_generator(self._g7r.id)
expect(g7r).to(be_valid_network_generator)
expect(g7r.running).to(be_false)
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.start_network_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.start_network_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with description('/network/generators/x/bulk-start'):
with before.all:
model = network_generator_model(self._api.api_client, interface=self._ip4_client_interface)
self._g8s = [self._api.create_network_generator(model) for a in range(3)]
with after.all:
clear_network_instances(self._api)
with context('PUT'):
with it('returns 405'):
expect(lambda: self._api.api_client.call_api('/network/generators/x/bulk-start', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "POST"}))
with description('POST'):
with description('by existing IDs'):
with before.all:
request = client.models.BulkStartNetworkGeneratorsRequest(
[g7r.id for g7r in self._g8s])
self._result = self._api.bulk_start_network_generators_with_http_info(
request, _return_http_data_only=False)
with it('is not running'):
for g7r in self._g8s:
expect(g7r.running).to(be_false)
with it('started (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid results'):
for result in self._result[0]:
expect(result).to(be_valid_network_generator_result)
expect(result.active).to(be_true)
with it('is running'):
for g7r in self._g8s:
result = self._api.get_network_generator(g7r.id)
expect(result).to(be_valid_network_generator)
expect(result.running).to(be_true)
with description('with non-existant ID'):
with it('is not running'):
request = client.models.BulkStopNetworkGeneratorsRequest(
[g7r.id for g7r in self._g8s])
self._api.bulk_stop_network_generators(request)
for g7r in self._g8s:
expect(g7r.running).to(be_false)
with it('not found (404)'):
request = client.models.BulkStartNetworkGeneratorsRequest(
[g7r.id for g7r in self._g8s] + ['unknown'])
expr = lambda: self._api.bulk_start_network_generators(request)
expect(expr).to(raise_api_exception(404))
with it('is not running'):
for g7r in self._g8s:
result = self._api.get_network_generator(g7r.id)
expect(result).to(be_valid_network_generator)
expect(result.running).to(be_false)
with description('with invalid ID'):
with it('is not running'):
request = client.models.BulkStopNetworkGeneratorsRequest(
| |
from __future__ import print_function
import os
import json
import shutil
import textwrap
import jinja2
from lxml import etree as ET
from collections import defaultdict
from iatirulesets.text import rules_text
languages = ['en']
# Define the namespaces necessary for opening schema files
namespaces = {
'xsd': 'http://www.w3.org/2001/XMLSchema'
}
# Define attributes that have documentation that differs to that in the schema
custom_attributes = {
}
def get_github_url(repo, path=''):
"""Return a link to the Github UI for a given repository and filepath.
Args:
repo (str): The repository that contains the file at the input path.
path (str): The path (within the repository) to the file. There should be no preceeding slash ('/').
Returns:
str: Link to the Github UI page.
"""
github_branches = {
'IATI-Schemas': 'version-2.03',
'IATI-Codelists': 'version-2.03',
'IATI-Rulesets': 'version-2.03',
'IATI-Extra-Documentation': 'version-2.03',
'IATI-Codelists-NonEmbedded': 'master',
}
return 'https://github.com/IATI/{0}/blob/{1}/{2}'.format(repo, github_branches[repo], path)
def human_list(l):
"""Return a human-friendly version of a list.
Currently seperates list items with commas, but could be extended to insert 'and'/'or' correctly.
Args:
l (list): The list to be made human-friendly.
Returns:
str: The human-friendly represention of the list.
"""
return ', '.join(l)
def lookup_see_also(standard, mapping, path):
"""Return a generator object containing paths relating to the current element as defined by overview-mapping.json.
Args:
standard (str): Can be either organisation-standard or activity-standard)
mapping (list): List for all templates elements within [standard]/overview-mapping.json
path (str): Last sections of the path passed to see_also, if shorter than 3 sections it will just be the entire path
Returns:
generator or str: Yields paths of elements related to the current element
"""
if path == '':
return
for overview, elements in mapping.items():
if path in elements:
yield '/' + standard + '/overview/' + overview
for x in lookup_see_also(standard, mapping, '/'.join(path.split('/')[:-1])):
yield x
def see_also(path, lang):
standard = path.split('/')[0]
if lang == 'en': # FIXME
mapping = json.load(open(os.path.join('IATI-Extra-Documentation', lang, standard, 'overview-mapping.json'))) # Loading this file is incredibly inefficient
# Common 'simple' path e.g. iati-activities or budget/period-start
# Using this prevents subpages of iati-activity using the activity file overview
simpler = len(path.split('/')) > 3
simple_path = '/'.join(path.split('/')[3:]) if simpler else path
return list(lookup_see_also(standard, mapping, simple_path))
standard_ruleset = json.load(open('./IATI-Rulesets/rulesets/standard.json'))
def ruleset_page(lang):
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
ruleset = {xpath: rules_text(rules, '', True) for xpath, rules in standard_ruleset.items()}
rst_filename = os.path.join(lang, 'rulesets', 'standard-ruleset.rst')
try:
os.mkdir(os.path.join('docs', lang, 'rulesets'))
except OSError:
pass
with open(os.path.join('docs', rst_filename), 'w') as fp:
t = jinja_env.get_template(lang + '/ruleset.rst')
fp.write(t.render(
ruleset=ruleset,
extra_docs=get_extra_docs(rst_filename)
))
def ruleset_text(path):
"""Return a list of text describing the rulesets for a given path (xpath)"""
out = []
for xpath, rules in standard_ruleset.items():
if xpath.startswith('//'):
try:
# Use slice 1: to ensure we match /budget/ but not /total-budget/
reduced_path = path.split(xpath[1:] + '/')[1]
except IndexError:
continue
out += rules_text(rules, reduced_path)
return out
codelists_paths = defaultdict(list)
# TODO - This function should be moved into the IATI-Codelists submodule
codelist_mappings = ET.parse('./IATI-Codelists/mapping.xml').getroot().findall('mapping')
def match_codelists(path):
"""
Looks up the codelist that the given path (xpath) should be on.
Returns a tuble of the codelist name, and a boolean as describing whether any conditions apply.
If there is no codelist for the given path, the first part of the tuple is None.
"""
codelist_tuples = []
for mapping in codelist_mappings:
if mapping.find('path').text.startswith('//'):
if path.endswith(mapping.find('path').text.strip('/')):
codelist = mapping.find('codelist').attrib['ref']
if path not in codelists_paths[codelist]:
codelists_paths[codelist].append(path)
tup = (codelist, mapping.find('condition') is not None)
codelist_tuples.append(tup)
else:
pass # FIXME
return codelist_tuples
def is_complete_codelist(codelist_name):
"""Determine whether the specified Codelist is complete.
Args:
codelist_name (str): The name of the Codelist. This is case-sensitive and must match the mapping file.
Returns:
bool: Whether the Codelist is complete.
Note:
Need to manually specify which Codelists are incomplete - it is not auto-detected. This is due to the surrounding architecture making it a challenge to auto-detect this information.
"""
# use a list of incomplete Codelists since it is shorter
incomplete_codelists = [
'Country',
'HumanitarianScopeType',
'HumanitarianScopeVocabulary',
'IndicatorVocabulary',
'Language',
'OrganisationIdentifier',
'OrganisationRegistrationAgency'
]
return codelist_name not in incomplete_codelists
def path_to_ref(path):
return path.replace('//', '_').replace('@', '.')
def get_extra_docs(rst_filename):
extra_docs_file = os.path.join('IATI-Extra-Documentation', rst_filename)
if os.path.isfile(extra_docs_file):
with open(extra_docs_file) as fp:
return fp.read()
else:
return ''
class Schema2Doc(object):
"""Class for converting an IATI XML schema to documentation in the reStructuredText format."""
def __init__(self, schema, lang):
"""
Args:
schema (str): The filename of the schema to use, e.g. 'iati-activities-schema.xsd'
lang (str): A two-letter (ISO 639-1) language code to build the documentation for (e.g. 'en')
Sets:
self.tree (lxml.etree._ElementTree): Representing the input schema.
self.tree2 (lxml.etree._ElementTree): Representing the iati-common.xsd schema.
self.jinja_env (jinja2.environment.Environment): The templates contained within the 'templates' folder.
self.lang (str): The input language.
"""
self.tree = ET.parse("./IATI-Schemas/" + schema)
self.tree2 = ET.parse("./IATI-Schemas/iati-common.xsd")
self.jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))
self.lang = lang
self.jinja_env.filters['is_complete_codelist'] = is_complete_codelist
def get_schema_element(self, tag_name, name_attribute):
"""Returns the xsd definition for a given element from schemas defined in `self.tree` (or `self.tree2` if nothing found).
Args:
tag_name (str): The name of the tag in the schema - will typically be 'element'.
name_attribute (str): The value of the 'name' attribute in the schema - i.e. the name of the element/type etc. being described, e.g. 'iati-activities'.
Returns:
None / lxml.etree._Element: The element tree representng the xsd definition for the given inputs. None if no match found.
"""
schema_element = self.tree.find("xsd:{0}[@name='{1}']".format(tag_name, name_attribute), namespaces=namespaces)
if schema_element is None:
schema_element = self.tree2.find("xsd:{0}[@name='{1}']".format(tag_name, name_attribute), namespaces=namespaces)
return schema_element
def schema_documentation(self, element, ref_element, type_element=None):
"""Return a documention string for either a given ref_element (if not None) or an element.
If the element is a document-link, it will obtain the documentation string from its extension root instead of the extension itself.
Args:
element (lxml.etree._Element): An xsd element definition.
ref_element (lxml.etree._Element): An element that the `element` inherits properties definitions from (using the xsd `ref` inheritance). If set to None, the documention string for the element is returned.
type_element (lxml.etree._Element): An element that the `element` inherits properties definitions from (using the xsd `type` inheritance). Defaults to None, implying element properties/definitions are defined within `element` or `ref_element`.
Returns:
str: The documentation string, extracted from the input ref_element or element.
"""
if ref_element is not None:
xsd_docuementation = ref_element.find(".//xsd:documentation", namespaces=namespaces)
if xsd_docuementation is not None:
return xsd_docuementation.text
xsd_documentation = element.find(".//xsd:documentation", namespaces=namespaces)
if xsd_documentation is not None:
return xsd_documentation.text
if type_element is not None:
xsd_documentation = type_element.find(".//xsd:documentation", namespaces=namespaces)
if xsd_documentation is not None:
return xsd_documentation.text
extension = type_element.find(".//xsd:extension", namespaces=namespaces)
if extension is not None:
base_name = type_element.find(".//xsd:extension", namespaces=namespaces).get("base")
base_element = self.get_schema_element('complexType', base_name)
if base_element is not None:
return base_element.find(".//xsd:documentation", namespaces=namespaces).text
def output_docs(self, element_name, path, element=None, minOccurs='', maxOccurs='', ref_element=None, type_element=None):
"""Output documentation for the given element, and it's children.
If element is not given, we try to find it in the schema using it's
element_name.
Args:
element_name (str):
path (str): The xpath of the context where this element was found. For the root context (i.e. iati-activities), this is an empty string.
element (lxml.etree._Element): If element is not given, we try to find it in the schema using it's element_name.
minOccurs (str): The number of minimum occurances for the given element_name / element.
maxOccurs (str): The number of minimum occurances for the given element_name / element.
ref_element (lxml.etree._Element): An element that the `element` inherits properties definitions from (using the xsd `ref` inheritance). Defaults to None, implying element properties are defined within `element` or `type_element`.
type_element (lxml.etree._Element): An element that the `element` inherits properties definitions from (using the xsd `type` inheritance). Defaults to None, implying element properties are defined within `element` or `ref_element`.
"""
if element is None:
element = self.get_schema_element('element', element_name)
if element is None:
return
github_urls = {
'schema': element.base.replace('./IATI-Schemas/', get_github_url('IATI-Schemas')) + '#L' + str(element.sourceline),
'extra_documentation': get_github_url('IATI-Extra-Documentation', self.lang + '/' + path + element_name + '.rst')
}
try:
os.makedirs(os.path.join('docs', self.lang, path))
except OSError:
pass
rst_filename = os.path.join(self.lang, path, element_name + '.rst')
children = self.element_loop(element, path)
for child_name, child_element, child_ref_element, child_type_element, child_minOccurs, child_maxOccurs in children:
self.output_docs(child_name, path + element.attrib['name'] + '/', child_element, child_minOccurs, child_maxOccurs, child_ref_element, child_type_element)
min_occurss = element.xpath('xsd:complexType/xsd:choice/@minOccur', namespaces=namespaces)
# Note that this min_occurs is different to the python variables
# minOccurs and maxOccurs, because this is read from a choice element,
# whereas those are read from the individual element definitions (only
# possible within a sequence element)
if | |
<reponame>nwfsc-fram/pyFieldSoftware<gh_stars>0
# -----------------------------------------------------------------------------
# Name: ObserverSOAP.py
# Purpose: OPTECS SOAP support for db syncing, using zeep
# http://docs.python-zeep.org/en/master/
#
# Author: <NAME> <<EMAIL>>
#
# Created: Dec 20, 2016
# License: MIT
#
# Install notes:
# * Python 3.6: use requirements.txt for zeep, lxml (refer to older versions of this file for python 3.5)
#
# FIXED with new tomcat endpoint wsdl, no longer required patch to Lib/site-packages/zeep/xsd/schema.py:
# In def _get_instance, (zeep 0.27.0: line 374, zeep 2.4.0: line 498)
# (after try:)
# if qname.localname == 'arrayList':
# return self._types['{http://www.oracle.com/webservices/internal/literal}list']
#
#
# ------------------------------------------------------------------------------
# python -mzeep https://www.webapps.nwfsc.noaa.gov/obclientsyncdev21/ObclientsyncWSSoapHttpPort?WSDL#
import csv
import re
import sys
import math
import hashlib
import textwrap
import logging
import logging.config
import socket
import io
import unittest
import arrow
import keyring
import zeep
from zeep.wsse.username import UsernameToken
from zeep.wsdl.utils import etree_to_string
from PyQt5.QtCore import QObject, pyqtSlot
from PyQt5.QtCore import QVariant
from PyQt5.QtCore import pyqtProperty
from PyQt5.QtCore import pyqtSignal
from apsw import SQLError
from py.observer.ObserverDBUtil import ObserverDBUtil
from py.observer.ObserverDBBaseModel import database
from py.observer.ObserverDBModels import Users, fn, Settings
# Enable DEBUG for dump of SOAP header
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'zeep.transports': {
'level': 'INFO',
'propagate': True,
'handlers': ['console'],
},
}
})
class ObserverSoap(QObject):
mode_urls = {
'ifq': ObserverDBUtil.get_setting('ifq_wsdl_url'), # IFQ (Prod)
'ifqdev': ObserverDBUtil.get_setting('ifqdev_wsdl_url'), # IFQDEV (Test)
'ifqadmin': ObserverDBUtil.get_setting('ifqadmin_wsdl_url') # IFQADMIN (Test)
}
observer_mode = ObserverDBUtil.get_setting('optecs_mode')
wsdl = mode_urls[observer_mode]
obs_username = 'obsclient'
oracle_salt_keyname = 'optecs_oracle_pwsalt'
default_transaction_id = ObserverDBUtil.get_setting('last_db_transaction', 83775)
obs_version = ObserverDBUtil.get_setting('seahag_version', 2019)
obs_credentials_namespace = 'OPTECS v1' # for stored salt and pw
skip_pw_error = True
def __init__(self):
super().__init__()
self._logger = logging.getLogger(__name__)
self.client = \
zeep.Client(
wsdl=self.wsdl,
wsse=UsernameToken(username=self.obs_username, password=self._get_dbsync_pw(), ))
def _get_dbsync_pw(self):
# PW for Java API sync
pw = keyring.get_password(self.obs_credentials_namespace, self.obs_username)
if not pw:
logging.error('Password for observer client sync not set.')
logging.error('Please run set_optecs_sync_pw.py (not in src control) to set it.')
if not self.skip_pw_error:
sys.exit(-1)
return pw
@staticmethod
def get_oracle_salt():
# Salt for oracle password
salt_value = keyring.get_password(ObserverSoap.obs_credentials_namespace, ObserverSoap.oracle_salt_keyname)
if not salt_value:
raise Exception('Salt for password hashing not set. '
'Please run set_optecs_sync_pw.py (not in src control) to set it.')
return salt_value
@staticmethod
def hash_pw(username, pw):
"""
This routine is adapted from SDM_CUSTOM_BUILT_NEW_SHA1 in Oracle
@param username: first+last - should be uppercase (e.g. WILLSMITH)
@param pw: password to encode
@return: custom SHA1 hash that matches how the Oracle DB currently stores passwords.
"""
if not username or not pw:
raise ValueError('Invalid/blank username or pw.')
username = username.upper()
salt_value = ObserverSoap.get_oracle_salt().encode('utf-8')
pw_hash_len = len(username) + len(pw)
padding = 10 * math.ceil(pw_hash_len / 8)
pw_hash_len = padding - pw_hash_len + 40
if pw_hash_len > len(salt_value):
pw_hash_len = len(salt_value)
salt_substr = salt_value[:pw_hash_len]
hash_obj = hashlib.sha1(salt_substr)
hashed_salt = hash_obj.hexdigest()
final_pw = str(hashed_salt).upper() + pw + username # composite hashed salt + pw + username
hash_obj = hashlib.sha1(final_pw.encode('utf-8'))
pw_hashed = hash_obj.hexdigest().upper()
return pw_hashed
@staticmethod
def get_filename(tablename, trip_id, user_id, date_time=None):
"""
Returns filename in the format Table#Export Version_UserID_date_time.csv
e.g. CATCHES#20148_1760_10SEP2014_1356.csv
@param tablename: TRIPS
@param trip_id: Export version? Internal temp trip ID
@param user_id: USER_ID
@param date_time: arrow object
@return: Table#Export Version_UserID_date_time.csv
"""
if not date_time:
date_time = arrow.now()
formatted_dt = date_time.format('DDMMMYYYY_HHmm').upper()
return '{table}#{exp}_{uid}_{dt}.csv'.format(table=tablename.upper(),
exp=trip_id,
uid=user_id,
dt=formatted_dt)
def _get_user_id(self, username):
try:
user_check = Users.get((fn.Lower(Users.first_name.concat(Users.last_name))) == username.lower())
self._logger.debug('ID {} found for user {}'.format(user_check.user, username))
return user_check.user
except Users.DoesNotExist:
self._logger.warning('Name not found: {}'.format(username))
def action_upload(self, username, hashed_pw, filename, unenc_data):
"""
Upload binary blob to web service for parsing
@param username: user for auth
@param hashed_pw: hashed pw for auth
@param filename: filename from get_filename
@param unenc_data: UN-encoded data of csv table data (base64 encoding is automatic.)
@return: is_successful, new_trip_id (if TRIPS, else None)
"""
# http://impl.webservices.obofflinesync.sdm.nwfsc.nmfs.noaa.gov//uploadClientData1
self._logger.info('Upload client scripts to virtual filename {}'.format(filename))
user_id = self._get_user_id(username)
if not user_id:
return False
laptop_name = ObserverDBUtil.get_data_source()
# soap_etree = self.client.create_message(self.client.service, 'uploadClientData1', userName=username,
# password=<PASSWORD>,
# fileName=filename,
# data=unenc_data,
# version=self.obs_version,
# var1=user_id,
# var2=laptop_name,
# var3='',
# var4='')
#
# soap_msg = etree_to_string(soap_etree).decode()
# self._logger.info(f'XML Message: {soap_msg}')
# with self.client.settings(raw_response=True):
result = self.client.service.uploadClientData1(userName=username,
password=<PASSWORD>,
fileName=filename,
data=unenc_data,
version=self.obs_version,
var1=user_id,
var2=laptop_name,
var3='',
var4=''
)
new_trip_id = self._get_trip_id_from_result(result)
is_success = 'SUCCESS' in result
return is_success, new_trip_id
def _get_trip_id_from_result(self, result_string):
# '<br>SUCCESS: Parsed 1 TRIPS row.<div style="font-size:2em;color:#990000">
# Your Online Trip ID is <b>30135</b> </div>.
# <br><div style="font-size:2em;color:#006600">Online transfer complete.</div>'
if not result_string or 'TRIPS' not in result_string:
return None
m = re.search(r"Your Online Trip ID is <b>(?P<trip_id>[0-9]*)", result_string)
trip_id = int(m.group('trip_id'))
self._logger.debug(f'Parsed new TRIP ID from result string, got {trip_id}')
return trip_id
def action_download(self, transaction_id, username=None, hashed_pw=None):
"""
Download transactions from APPLIED_TRANSACTIONS.
@param transaction_id: from DB
@param username: defaults to Admin
@param hashed_pw: defaults to Admin
@return:
"""
# Defaults to admin user, which is how current offline system works.
# http://impl.webservices.obofflinesync.sdm.nwfsc.nmfs.noaa.gov//updateClientScripts
hardcoded_admin_id = 1155
self._logger.info(f'Downloading client scripts from transaction ID {transaction_id}')
if not username or not hashed_pw:
user_q = Users.get(Users.user == hardcoded_admin_id)
username = user_q.first_name + user_q.last_name
hashed_pw = user_q.password
# soap_etree = self.client.create_message(self.client.service, 'updateClientScripts', userName=username,
# password=<PASSWORD>,
# transaction_id=transaction_id,
# version=self.obs_version,
# var1='',
# var2='',
# var3='',
# var4='')
#
# soap_msg = etree_to_string(soap_etree).decode()
# self._logger.info(f'XML Message: {soap_msg}')
results = self.client.service.updateClientScripts(userName=username,
password=<PASSWORD>,
transaction_id=transaction_id,
version=self.obs_version,
var1='',
var2='',
var3='',
var4=''
)
# Sort results by transaction_id
if results:
sorted_results = sorted(results, key=lambda k: k['transaction_id'])
return sorted_results
else:
return None
# return None
def _check_user_pw_enc(self, username):
"""
Check if PASSWORD_ENCRYPTED flag is set for user.
@return: True if PASSWORD_ENCRYPTED and user exists, False otherwise
"""
try:
user_check = Users.get((fn.Lower(Users.first_name.concat(Users.last_name))) == username.lower())
return user_check.password_encrypted == 1
except Users.DoesNotExist:
self._logger.warning('Name not found: {}'.format(username))
return False
def perform_sync(self):
"""
@return: sync_result: bool, sync_output: str
"""
return self.update_client_pull()
def retrieve_updates(self):
"""
Only does a pull, not a full sync
@return: sync_result: bool, sync_output: str
"""
return self.update_client_pull()
def update_client_pull(self):
"""
Currently uses admin user to pull down data
@return: bool, string: Success (T/F), Message describing the result
"""
success_count, fail_count = 0, 0
try:
ddl_results = self.action_download(self.db_sync_transaction_id)
if ddl_results:
success_count, fail_count = self.perform_ddl(ddl_results)
self._logger.info('Successes: {}, Failures: {}'.format(success_count, fail_count))
except Exception as e:
error_msg = 'DB Sync error: {}'.format(e)
return False, error_msg
ObserverDBUtil.db_fix_empty_string_nulls(self._logger)
final_result = f'Update Successful.\nRetrieved {success_count} updates from DB.\n' \
f'Ignored: {fail_count}'
return True, final_result, success_count
def perform_ddl(self, ddl_results):
"""
Perform DDL on database
@param ddl_results: List of dicts from CLOB
@return: successes, errors (counts)
"""
expected_transaction_types = {'U', 'I'}
success_count = 0
error_count = 0
last_transaction_id = None
for ddl in ddl_results:
transaction = ddl['transaction_ddl'].decode('utf-8', errors='ignore').rstrip('\0') # axe \x00
if ddl['transaction_type'] in expected_transaction_types:
transaction = self.remove_sql_to_date(transaction)
self._logger.info(f'TXid {ddl.transaction_id}: {transaction[:15]}...')
self._logger.debug(f'Performing: {transaction}')
database.set_autocommit(True)
try:
database.execute_sql(str(transaction))
success_count += 1
last_transaction_id = ddl['transaction_id']
except SQLError as e:
self._logger.error(e)
error_count += 1
except Exception as e: # might be reinserting the same record etc
self._logger.error(e)
error_count += 1
else:
self._logger.warning('Unexpected transaction type {}'.format(ddl['transaction_type']))
self._logger.warning(ddl['transaction_ddl'])
if last_transaction_id:
self.db_sync_transaction_id = last_transaction_id
return success_count, error_count
@staticmethod
def remove_sql_to_date(transaction: str) -> str:
"""
Remove all occurrences of oracle's TO_DATE(x, y) function from transaction
@param transaction: DDL with possible TO_DATE(x,y) function (one or more)
@return: transaction x without TO_DATE(...) (remove y)
"""
find_str = 'TO_DATE('
while transaction.find(find_str) >= 0:
start_idx = transaction.find(find_str)
comma_idx = transaction.find(',', start_idx)
if comma_idx == -1: # malformed
return transaction
end_idx = transaction.find(')', start_idx)
if end_idx == -1: # malformed
return transaction
# Remove TO_DATE( x, y ) -> x
before_to_date = transaction[:start_idx]
date_param = transaction[start_idx + len(find_str):comma_idx]
after_to_date = transaction[end_idx + 1:]
transaction = before_to_date + date_param + after_to_date
return transaction
@property
def db_sync_transaction_id(self):
# TODO Temporarily always get all transactions. For production, comment out line below.
# return self.default_transaction_id
try:
last_id = Settings.get(Settings.parameter == 'last_db_transaction')
return last_id.value
except Settings.DoesNotExist:
return self.default_transaction_id
@db_sync_transaction_id.setter
def db_sync_transaction_id(self, transaction_id):
try:
last_id = Settings.get(Settings.parameter == 'last_db_transaction')
last_id.value = transaction_id
last_id.save()
except Settings.DoesNotExist:
new_setting = Settings.create(parameter='last_db_transaction',
value=transaction_id)
new_setting.save()
def getCSV(self, db_table_name):
"""
NOTE: Obsolete, see DBSyncController
Does a raw SQL query and pipes to CSV
@param db_table_name: table name, e.g. "TRIPS"
@return: string of csv representation
"""
output = io.StringIO()
table_query = database.execute_sql('SELECT * FROM {}'.format(db_table_name))
try:
fields = [h[0] for h in table_query.getdescription()]
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(fields) # Header
for row in table_query:
writer.writerow(row)
return output.getvalue()
except Exception as e:
self._logger.warning(e)
return None
class TestObserverSOAP(unittest.TestCase):
def test_variety_of_to_dates_and_not_to_dates(self):
test_cases = (
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import curses
import curses.panel
import curses.textpad
import re
import urlparse
import os
# Minimal terminal size
MIN_LINES = 48
MIN_COLS = 150
class tui:
"""The Terminal User Interface for Vindimium bot"""
def __init__(self):
self.running = True
self.paused = False
self.DATA_Y = 1
self.DATA_X = 0
self.DATA_H = 29
self.DATA_W = 32
self.PLAYERS_Y = 1
self.PLAYERS_X = self.DATA_X + self.DATA_W + 2
self.PLAYERS_H = 21
self.PLAYERS_W = 66
self.MAP_Y = 1
self.MAP_X = self.PLAYERS_X + self.PLAYERS_W + 2
self.MAP_H = 0
self.MAP_W = 0
self.PATH_Y = self.PLAYERS_Y + self.PLAYERS_H + 3
self.PATH_X = self.DATA_X + self.DATA_W + 2
self.PATH_H = 5
self.PATH_W = 66
self.LOG_Y = self.DATA_Y + self.DATA_H + 2
self.LOG_X = 0
self.LOG_H = 12
self.LOG_W = self.DATA_W + self.PLAYERS_W + 2
self.HELP_Y = self.LOG_Y + self.LOG_H - 2
self.HELP_X = 1
self.HELP_H = 1
self.HELP_W = self.LOG_W - 2
self.TIME_Y = self.LOG_Y + self.LOG_H + 2
self.TIME_X = 0
self.TIME_H = 0
self.TIME_W = 0
self.MENU_Y = 0
self.MENU_X = 0
self.MENU_H = 24
self.MENU_W = 0
self.SUMMARY_Y = self.LOG_Y + 5
self.SUMMARY_X = self.LOG_X + self.LOG_W + 2
self.SUMMARY_H = 7
self.SUMMARY_W = 20
self.data_win = None
self.map_win = None
self.path_win = None
self.log_win = None
self.help_win = None
self.players_win = None
self.time_win = None
self.menu_win = None
self.time_win = None
self.summary_win = None
self.log_entries = []
self.stdscr = curses.initscr()
curses.start_color()
# Basic color set
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(7, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(8, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
curses.init_pair(9, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(10, curses.COLOR_WHITE, curses.COLOR_YELLOW)
# check for minimal screen size
screen_y, screen_x = self.stdscr.getmaxyx()
if screen_y < MIN_LINES or screen_x < MIN_COLS:
# Try resizing terminal
curses.resizeterm(MIN_LINES, MIN_COLS)
if not curses.is_term_resized(MIN_LINES, MIN_COLS):
self.quit_ui()
print ("Unable to change your terminal size. Your terminal must be at least", \
MIN_LINES, "lines and", MIN_COLS, "columns and it actually has", \
screen_y, "lines and", screen_x, "columns.")
quit(1)
# Screen is up
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.stdscr.keypad(1)
# - /screen init ----
# - /init ---------------------------------------------------------------
def clear(self):
"""Refresh all windows"""
self.stdscr.erase()
if self.data_win:
self.data_win.erase()
if self.map_win:
self.map_win.erase()
if self.path_win:
self.path_win.erase()
if self.log_win:
self.log_win.erase()
if self.help_win:
self.help_win.erase()
if self.players_win:
self.players_win.erase()
if self.time_win:
self.time_win.erase()
if self.summary_win:
self.summary_win.erase()
if self.menu_win:
self.menu_win.erase()
curses.doupdate()
def refresh(self):
"""Refresh all windows"""
self.stdscr.addstr(self.DATA_Y - 1, self.DATA_X + 1, "Game", curses.A_BOLD)
self.stdscr.addstr(self.PLAYERS_Y - 1, self.PLAYERS_X + 1, "Players", curses.A_BOLD)
self.stdscr.addstr(self.SUMMARY_Y - 1, self.SUMMARY_X + 1, "Games summary", curses.A_BOLD)
self.stdscr.noutrefresh()
self.data_win.noutrefresh()
if self.map_win:
self.map_win.noutrefresh()
if self.path_win:
self.path_win.noutrefresh()
if self.log_win:
self.log_win.noutrefresh()
if self.help_win:
self.help_win.noutrefresh()
if self.players_win:
self.players_win.noutrefresh()
if self.time_win:
self.time_win.noutrefresh()
if self.summary_win:
self.summary_win.noutrefresh()
if self.menu_win:
self.menu_win.noutrefresh()
curses.doupdate()
# - Draw game windows --------------------------------------------------
def draw_game_windows(self):
"""Draw the windows needed for the game"""
if self.menu_win:
self.menu_win.erase()
self.draw_data_win()
self.draw_path_win()
self.draw_log_win()
self.draw_help_win()
self.draw_players_win()
self.draw_summary_win()
curses.panel.update_panels()
curses.doupdate()
def draw_data_win(self):
"""Draw main data window"""
self.data_win = curses.newwin(self.DATA_H, self.DATA_W, self.DATA_Y, self.DATA_X)
self.data_win.box()
self.data_pan = curses.panel.new_panel(self.data_win)
self.stdscr.addstr(self.DATA_Y - 1, self.DATA_X + 1, "Game", curses.A_BOLD)
data_lines = ["Playing",
"Bot name",
"Elo",
"Elapsed time",
"Turn",
"Position",
"Life",
"Mine count",
"Gold",
"Move",
"Action",
"Nearest hero",
"Nearest bar",
"Nearest mine"]
self.data_win.vline(1, 13, curses.ACS_VLINE, self.DATA_H)
self.data_win.addch(0, 13, curses.ACS_TTEE)
self.data_win.addch(self.DATA_H-1, 13, curses.ACS_BTEE)
self.data_win.vline(9, 22, curses.ACS_VLINE, self.DATA_H-9)
self.data_win.addch(self.DATA_H-1, 22, curses.ACS_BTEE)
y = 0
for line in data_lines:
self.data_win.addstr(y + 1, 1, line, curses.A_BOLD)
if y < len(data_lines) * 2 - 2:
self.data_win.hline(y + 2, 1, curses.ACS_HLINE, 30)
self.data_win.addch(y + 2, 0, curses.ACS_LTEE)
self.data_win.addch(y + 2, 31, curses.ACS_RTEE)
self.data_win.addch(y + 2, 13, curses.ACS_PLUS)
if y * 2 - 7 > 7:
self.data_win.addch(y + 2, 22, curses.ACS_PLUS)
y += 2
self.data_win.addch(8, 22, curses.ACS_TTEE)
def draw_log_win(self):
"""Draw log window"""
self.stdscr.addstr(self.LOG_Y - 1, self.LOG_X + 1, "Log", curses.A_BOLD)
self.log_win = curses.newwin(self.LOG_H, self.LOG_W, self.LOG_Y, self.LOG_X)
self.log_win.box()
self.log_pan = curses.panel.new_panel(self.log_win)
def draw_path_win(self):
"""Draw path & heuristic window"""
self.stdscr.addstr(self.PATH_Y - 1, self.PATH_X + 1, "Path and heuristic", curses.A_BOLD)
self.path_win = curses.newwin(self.PATH_H, self.PATH_W, self.PATH_Y, self.PATH_X)
self.path_win.box()
self.path_pan = curses.panel.new_panel(self.path_win)
self.path_win.addstr(1, 1, "Heuristic", curses.A_BOLD)
self.path_win.addstr(3, 1, "Path to goal", curses.A_BOLD)
self.path_win.hline(2, 1, curses.ACS_HLINE, 64)
self.path_win.vline(1, 13, curses.ACS_VLINE, 4)
self.path_win.addch(2, 0, curses.ACS_LTEE)
self.path_win.addch(2, 65, curses.ACS_RTEE)
self.path_win.addch(0, 13, curses.ACS_TTEE)
self.path_win.addch(2, 13, curses.ACS_PLUS)
self.path_win.addch(4, 13, curses.ACS_BTEE)
def draw_help_win(self):
"""Draw help window"""
self.help_win = curses.newwin(self.HELP_H, self.HELP_W, self.HELP_Y, self.HELP_X)
self.help_pan = curses.panel.new_panel(self.help_win)
self.help_win.bkgd(curses.color_pair(4) + curses.A_REVERSE)
self.help_win.addstr(0, 1, "Q", curses.A_BOLD + curses.A_STANDOUT)
self.help_win.addstr(0, 2, "uit")
self.help_win.addstr(0, 8, "P", curses.A_BOLD + curses.A_STANDOUT)
self.help_win.addstr(0, 9, "ause")
self.help_win.addstr(0, 16, "S", curses.A_BOLD + curses.A_STANDOUT)
self.help_win.addstr(0, 17, "ave")
def draw_players_win(self):
"""Draw players window"""
self.stdscr.addstr(self.PLAYERS_Y - 1, self.PLAYERS_X + 1, "Players", curses.A_BOLD)
self.players_win = curses.newwin(self.PLAYERS_H, self.PLAYERS_W, self.PLAYERS_Y, self.PLAYERS_X)
self.players_win.box()
self.players_pan = curses.panel.new_panel(self.players_win)
players_lines = ["Name",
"User ID",
"Bot ID",
"Elo",
"Position",
"Life",
"Mine count",
"Gold",
"Spawn pos",
"Crashed"]
self.players_win.vline(1, 11, curses.ACS_VLINE, self.PLAYERS_H-2)
self.players_win.vline(1, 29, curses.ACS_VLINE, self.PLAYERS_H-2)
self.players_win.vline(1, 47, curses.ACS_VLINE, self.PLAYERS_H-2)
self.players_win.addch(0, 11, curses.ACS_TTEE)
self.players_win.addch(0, 29, curses.ACS_TTEE)
self.players_win.addch(0, 47, curses.ACS_TTEE)
self.players_win.addch(self.PLAYERS_H-1, 11, curses.ACS_BTEE)
self.players_win.addch(self.PLAYERS_H-1, 29, curses.ACS_BTEE)
self.players_win.addch(self.PLAYERS_H-1, 47, curses.ACS_BTEE)
y = 0
for line in players_lines:
self.players_win.addstr(y+1, 1, line, curses.A_BOLD)
if y < len(players_lines)*2 - 2:
self.players_win.hline(y + 2, 1, curses.ACS_HLINE, self.PLAYERS_W - 2)
self.players_win.addch(y + 2, 0, curses.ACS_LTEE)
self.players_win.addch(y + 2, 11, curses.ACS_PLUS)
self.players_win.addch(y + 2, 29, curses.ACS_PLUS)
self.players_win.addch(y + 2, 47, curses.ACS_PLUS)
self.players_win.addch(y + 2, self.PLAYERS_W-1, curses.ACS_RTEE)
y += 2
def draw_time_win(self):
"""Draw time line"""
self.TIME_W = self.LOG_W + self.MAP_W + 4
self.stdscr.addstr(self.TIME_Y - 1, self.TIME_X + 1, "Time line", curses.A_BOLD)
self.time_win = curses.newwin(3, self.TIME_W, self.TIME_Y, self.TIME_X)
self.time_pan = curses.panel.new_panel(self.time_win)
self.time_win.box()
self.time_win.addstr(1, 1, " ", curses.color_pair(4) + curses.A_REVERSE)
def draw_summary_win(self):
"""Draw sumary window"""
self.stdscr.addstr(self.SUMMARY_Y - 1, self.SUMMARY_X + 1, "Games summary", curses.A_BOLD)
self.summary_win = curses.newwin(self.SUMMARY_H, self.SUMMARY_W, self.SUMMARY_Y, self.SUMMARY_X)
self.summary_pan = curses.panel.new_panel(self.summary_win)
self.summary_win.box()
self.summary_win.vline(1, 10, curses.ACS_VLINE, self.SUMMARY_H - 2)
self.summary_win.addch(0, 10, curses.ACS_TTEE)
self.summary_win.addch(self.SUMMARY_H - 1, 10, curses.ACS_BTEE)
for i in range(2, self.SUMMARY_H - 1, 2):
self.summary_win.hline(i, 1, curses.ACS_HLINE, self.SUMMARY_W - 2)
self.summary_win.addch(i, 0, curses.ACS_LTEE)
self.summary_win.addch(i, 10, curses.ACS_PLUS)
self.summary_win.addch(i, self.SUMMARY_W - 1, curses.ACS_RTEE)
self.summary_win.addstr(1, 1, "Played", curses.A_BOLD)
self.summary_win.addstr(3, 1, "Won", curses.A_BOLD)
self.summary_win.addstr(5, 1, "Timed out", curses.A_BOLD)
# MAP ------------------------------------------------------------------
def draw_map(self, board_map, path, heroes):
"""Draw the map"""
board_size = len(board_map)
self.MAP_H = board_size
self.MAP_W = board_size
self.stdscr.addstr(self.MAP_Y - 1, self.MAP_X + 1, "Map ("+str(board_size)+"X"+str(board_size)+")", curses.A_BOLD)
self.stdscr.noutrefresh()
if self.map_win:
x, y = self.map_win.getmaxyx()
if x != board_size + 2 or y != board_size + 2:
# resize the window as needed
self.map_win.erase()
curses.panel.update_panels()
self.map_win.resize(board_size + 2, board_size + 2)
# Time line (Cost cpu time)
self.draw_time_win()
self.map_win.noutrefresh()
else:
self.map_win.erase()
self.map_win.noutrefresh()
else:
# map doesn't exist
self.map_win = curses.newwin(board_size + 2, board_size + 2, self.MAP_Y, self.MAP_X)
self.map_pan = curses.panel.new_panel(self.map_win)
# Time line (Cost cpu time)
self.draw_time_win()
curses.panel.update_panels()
self.map_win.box()
# highlight choosen path
if path is None:
path = []
for cell in path:
self.map_win.addch(cell[0]+1, cell[1] + 1, curses.ACS_BULLET, curses.color_pair(3) + curses.A_BOLD)
# Draw map content
y = 0
for line in board_map:
x = 0
for char in line:
attr = 0
if char == "#":
attr = 0
char = curses.ACS_CKBOARD
elif char == "$":
attr = curses.A_BOLD + curses.color_pair(4)
elif char == "T":
attr = curses.A_BOLD + curses.color_pair(5)
elif char == "H":
# default bot color is BLUE
attr = curses.A_BOLD + curses.color_pair(6)
for hero in heroes:
# Select color for bot (cost cpu time)
if hero.pos == (y, x):
if hero.bot_id == 1:
attr = curses.A_BOLD + curses.color_pair(6)
elif hero.bot_id == 2:
attr = curses.A_BOLD + curses.color_pair(8)
elif hero.bot_id == 3:
attr = curses.A_BOLD + curses.color_pair(9)
elif hero.bot_id == 4:
attr = curses.A_BOLD + curses.color_pair(10)
elif char == "@":
attr = curses.A_BOLD + curses.color_pair(2)
elif char == "X":
attr = curses.A_BOLD + curses.color_pair(7)
if not (char == " " and (y, x in path)):
self.map_win.addch(y + 1, x + 1, char, attr)
x = x + 1
y = y + 1
# / Draw window --------------------------------------------------------
# Diplay functions -----------------------------------------------------
# Following methods are used to display data at
# the good place. Names are explicit.
def display_heroes(self, heroes, bot_id):
x = 12
gold_winner = ""
max_gold = 0
gold_pos = 0
mine_winner = ""
max_mine = 0
mine_pos = 0
for hero in heroes:
if hero.bot_id != bot_id:
for i in range(1, 21, 2):
| |
+ str(self.write_log_update)+ '\n')
fw.write('random init: ' + str(self.random_init)+ '\n')
fw.write('seed: ' + str(self.seed)+ '\n')
fw.write('\n\n\n')
fw.write('Force config: ' + '\n')
for item in self.adv_forces:
fw.write(" ".join(map(str, item)))
fw.write('\n')
fw.write('\n')
for key,val in self.idx_to_action.items():
fw.write(str(key) + ':' + val)
fw.write('\n')
fw.write('\n')
def initialize_time(self, control_freq):
self.cur_time =0
self.model_timestep = self.sim.model.opt.timestep
if self.model_timestep<=0:
raise ValueError('xml model defined non-positive time step')
self.control_freq = control_freq
if control_freq<=0:
raise ValueError('control frequency is invalid')
self.control_timestep = 1./control_freq
def _load_model(self):
pass
def _get_reference(self):
pass
def current_fixed_center(self):
center_point = [0, 0]#(-1, -1)
if self.count_to_100 != 50:
center_point[0] = self.min_coord + self.current_coord_offset_X
center_point[1] = self.min_coord + self.current_coord_offset_Y
else:
self.count_to_100 = 0
center_point[0] = self.min_coord + self.current_coord_offset_X
center_point[1] = self.min_coord + self.current_coord_offset_Y
mean_reward = sum(self.post_reward_list[1:50])/len(self.post_reward_list[1:50])
self.post_reward_list = []
self.R_table[center_point[0] - self.min_coord, center_point[1] - self.min_coord] = mean_reward
print("current_center_point: {}".format(center_point))
print("corresponding reward: {}".format(mean_reward))
f = open('gt_center_avg_reward.txt', mode='a', encoding='utf-8')
f.write(str(center_point))
f.write("\n")
f.write(str(mean_reward))
f.write("\n")
f.close()
if self.current_coord_offset_X + 10 >= self.range_ - 1:
if self.current_coord_offset_Y + 10 >= self.range_ - 1:
self.stop_training = True
f = open('gt_center_avg_reward.txt', mode='a', encoding='utf-8')
f.write("EOT")
f.close()
return center_point
self.current_coord_offset_X = 0
self.current_coord_offset_Y += 10
else:
self.current_coord_offset_X += 10
self.count_to_100 += 1
return (center_point[0], center_point[1])
def get_ik_param0(self, object_xml):
print(colored('Init scene with top-down camera', 'red'))
path = xml_path_completion('Baxter/baxter/master1.xml')
model = load_model_from_path(path)
# random initialize model position/quaternion/size
sim = MjSim(model)
viewer = MjViewer(sim)
viewer.cam.fixedcamid = 0
viewer.cam.type = const.CAMERA_FIXED
sim.step()
# viewer.render()
frame = viewer._read_pixels_as_in_window()
h, w = frame.shape[:2]
imageio.imwrite(image_path_completion('ori.jpg'), frame)
crop_frame = frame[:, int((w - h) / 2):int((w - h) / 2) + h, :]
crop_h, crop_w = crop_frame.shape[:2]
self.crop_h_ = crop_h
self.crop_w_ = crop_w
imageio.imwrite(image_path_completion('crop.jpg'), crop_frame)
#max point
if self.default_filtering:
(center_pt, R, grasp_angle, patch_Is_resized, fc8_prediction, self.fc8_norms, R_spec) = \
predict_from_img(None, self.post_reward, self.num_samples, self.training_R_table_ground_truth,
image_path_completion('crop.jpg'), object_xml, self.G, self.total_steps,
self.log_name, is_train=self.train_pro, opt=0, was_valid=self.is_valid_sample)
else:
(center_pt, R, grasp_angle, patch_Is_resized, fc8_prediction, self.fc8_norms, R_spec) = \
predict_from_img(None, self.post_reward, self.num_samples, self.training_R_table_ground_truth,
image_path_completion('crop.jpg'), object_xml, self.G_filter, self.total_steps,
self.log_name, is_train=self.train_pro, opt=0)
self.min_coord = R_spec[1]
if not self.is_once_created_R_table:
self.range_ = R_spec[0]
value = np.empty((), dtype=object)
value[()] = (-1.0, -1.0) # reward - angle pairs
self.R_table = np.full([self.range_, self.range_], value, dtype=object)
if self.training_R_table_ground_truth:
center_pt = self.current_fixed_center()
self.center_pt = center_pt
quat = np.array([np.cos(grasp_angle / 2), 0, 0, -np.sin(grasp_angle / 2)])
coeff = 0.0020
if self.object_xml=='half-nut.xml':
grasp_z = 0.858
elif self.object_xml=='cube.xml':
grasp_z = 0.8
else:
grasp_z = 0.86
grasp_pt = (0.8 - crop_w / 2 * coeff + center_pt[0] * coeff, 0 + crop_h / 2 * coeff - center_pt[1] * coeff, grasp_z)
print('grasping point: ', grasp_pt)
new_quat = np.empty(4)
functions.mju_mulQuat(new_quat, quat, np.array([0.0, 0.0, 1.0]))
# original: provide adv action predictor input
siz = 112
rot_image = imutils.rotate(crop_frame, grasp_angle * 180 /np.pi)
rot_image = rot_image[int(center_pt[1]- siz):int(center_pt[1] + siz),
int(center_pt[0]- siz):int(center_pt[0] + siz), :]
if viewer is not None:
glfw.destroy_window(viewer.window)
viewer = None
self.min_coord = R_spec[1]
return (grasp_pt, new_quat, rot_image, patch_Is_resized, fc8_prediction)
def get_ik_param1(self, sim_with_robot, gripper_pos):
print(colored('Init scene with top-down camera (lifted)', 'red'))
self.save_R_table_down = True
Obj_id = sim_with_robot.model.body_name2id('object0')
self._reset_xml_pos_quat(sim_with_robot.data.body_xpos[Obj_id], sim_with_robot.data.body_xquat[Obj_id])
print(colored('Init scene with top-down camera-second', 'red'))
path = xml_path_completion('Baxter/baxter/master1.xml')
model = load_model_from_path(path)
sim = MjSim(model) #contrain info of Cam coords, direction
viewer = MjViewer(sim)
viewer.cam.fixedcamid = 0
viewer.cam.type = const.CAMERA_FIXED
sim.step()
# viewer.render()
frame = viewer._read_pixels_as_in_window()
h, w = frame.shape[:2]
imageio.imwrite(image_path_completion('ori1.jpg'), frame)
crop_frame = frame[:, int((w - h) / 2):int((w - h) / 2) + h, :]
crop_h, crop_w = crop_frame.shape[:2]
self.crop_h_ = crop_h
self.crop_w_ = crop_w
imageio.imwrite(image_path_completion('crop1.jpg'), crop_frame)
coeff = 0.0020
X = []
Y = []
gx1 = int((gripper_pos[0][0] - 0.8 + (crop_w / 2) * coeff) / coeff)
gy1 = int((crop_h / 2 * coeff - gripper_pos[0][1]) / coeff)
gx2 = int((gripper_pos[1][0] - 0.8 + (crop_w / 2) * coeff) / coeff)
gy2 = int((crop_h / 2 * coeff - gripper_pos[1][1]) / coeff)
gx3 = int((gripper_pos[2][0] - 0.8 + (crop_w / 2) * coeff) / coeff)
gy3 = int((crop_h / 2 * coeff - gripper_pos[2][1]) / coeff)
gx4 = int((gripper_pos[3][0] - 0.8 + (crop_w / 2) * coeff) / coeff)
gy4 = int((crop_h / 2 * coeff - gripper_pos[3][1]) / coeff)
gripper_pos = [(gx1, gy1), (gx2, gy2), (gx3, gy3), (gx4, gy4)]
x = (gx1 +gx2 +gx3 +gx4)/4
y = (gy1 +gy2 +gy3 +gy4)/4
for looper in range(self.num_samples_spot):
X.append(int(np.random.normal(x, 9, 1)[0]))
Y.append(int(np.random.normal(y, 9, 1)[0]))
self.R_table_update_info = [None, X, Y, None, None]
print(colored("R table update init info: {}".format(self.R_table_update_info)))
#max point
if self.default_filtering:
(patch_Is_resized, fc8_prediction, R_spec, R_table_update_info) = \
predict_from_img(gripper_pos, self.post_reward, self.num_samples_spot,
self.training_R_table_ground_truth,
image_path_completion('crop1.jpg'), self.object_xml,
self.G, self.total_steps, self.log_name,
is_train=False, opt=1, update_info=self.R_table_update_info)
else:
(patch_Is_resized, fc8_prediction, R_spec, R_table_update_info) = \
predict_from_img(gripper_pos, self.post_reward, self.num_samples_spot,
self.training_R_table_ground_truth,
image_path_completion('crop1.jpg'), self.object_xml,
self.G_filter, self.total_steps, self.log_name,
is_train=False, opt=1, update_info=self.R_table_update_info)
print(colored("fc8_predictions: {}".format(fc8_prediction), "yellow"))
self.min_coord = R_spec[1]
if not self.is_once_created_R_table:
self.range_ = R_spec[0]
value = np.empty((), dtype=object)
value[()] = (-1.0, -1.0) # reward - angle pairs
self.R_table = np.full([self.range_, self.range_], value, dtype=object)
# Update R_table by 128 random patches
if not self.training_R_table_ground_truth:
for looper in range(self.num_samples_spot):
r_raw = (R_table_update_info[0][looper])[0]
r = 1 / (1 + np.exp(-r_raw))
a = (R_table_update_info[0][looper])[1]
if R_table_update_info[2][looper] - self.min_coord >= 285:
R_table_update_info[2][looper] = 284 + self.min_coord
if R_table_update_info[1][looper] - self.min_coord >= 285:
R_table_update_info[1][looper] = 284 + self.min_coord
self.R_table[R_table_update_info[2][looper] - self.min_coord,
R_table_update_info[1][looper] - self.min_coord] = (r, a)
self.R_table_update_info = R_table_update_info
if viewer is not None:
glfw.destroy_window(viewer.window)
viewer = None
return
def get_ik_param2(self, sim_with_robot):
self.save_R_table_down = True
Obj_id = sim_with_robot.model.body_name2id('object0')
self._reset_xml_pos_quat(sim_with_robot.data.body_xpos[Obj_id], sim_with_robot.data.body_xquat[Obj_id])
print(colored('Init scene with top-down camera-second', 'red'))
path = xml_path_completion('Baxter/baxter/master1.xml')
model = load_model_from_path(path)
sim = MjSim(model) #contrain info of Cam coords, direction
viewer = MjViewer(sim)
viewer.cam.fixedcamid = 0
viewer.cam.type = const.CAMERA_FIXED
sim.step()
# viewer.render()
frame = viewer._read_pixels_as_in_window()
h, w = frame.shape[:2]
imageio.imwrite(image_path_completion('ori2.jpg'), frame)
crop_frame = frame[:, int((w - h) / 2):int((w - h) / 2) + h, :]
crop_h, crop_w = crop_frame.shape[:2]
self.crop_h_ = crop_h
self.crop_w_ = crop_w
imageio.imwrite(image_path_completion('crop2.jpg'), crop_frame)
if self.default_filtering:
(R_spec, R_table_update_info) = predict_from_img(None, self.post_reward, self.num_samples_spot,
self.training_R_table_ground_truth,
image_path_completion('crop2.jpg'),
self.object_xml, self.G,
self.total_steps, self.log_name,
is_train=False, opt=2,
update_info=self.R_table_update_info)
else:
(R_spec, R_table_update_info) = predict_from_img(None, self.post_reward, self.num_samples_spot,
self.training_R_table_ground_truth,
image_path_completion('crop2.jpg'),
self.object_xml, self.G_filter,
self.total_steps, self.log_name,
is_train=False, opt=2,
update_info=self.R_table_update_info)
self.min_coord = R_spec[1]
if not self.is_once_created_R_table2:
self.range_ = R_spec[0]
value = np.empty((), dtype=object)
value[()] = (-1.0, -1.0) # reward - angle pairs
self.R_table2 = np.full([self.range_, self.range_], value, dtype=object)
self.is_once_created_R_table2 = True
# Update R_table by 128 random patches
if not self.training_R_table_ground_truth:
for looper in range(self.num_samples_spot):
r_raw = (R_table_update_info[0][looper])[0]
r = 1/(1 + np.exp(-r_raw))
a = (R_table_update_info[0][looper])[1]
self.R_table2[R_table_update_info[2][looper] - self.min_coord,
R_table_update_info[1][looper] - self.min_coord] = (r, a)
if viewer is not None:
glfw.destroy_window(viewer.window)
viewer = None
return
def _reset_xml(self, object_xml):
# reset master.xml
tree = ET.parse(xml_path_completion("Baxter/baxter/master_tmp.xml"))
root = tree.getroot()
root[3].attrib['file'] = object_xml
tree.write(xml_path_completion("Baxter/baxter/master.xml"))
# reset master1.xml
tree = ET.parse(xml_path_completion("Baxter/baxter/master1_tmp.xml"))
root = tree.getroot()
root[3].attrib['file'] = object_xml
tree.write(xml_path_completion("Baxter/baxter/master1.xml"))
def _reset_xml_pos_quat(self, pos, quat):
object_xml = self.object_xml
tree = ET.parse(xml_path_completion('Baxter/baxter/{}'.format(object_xml)))
root = tree.getroot()
cube_x_new = pos[0]
cube_y_new = pos[1]
quat_vec = [i for i in quat if i != '']
body_xquat = [float(i) for i in quat_vec]
root[-1][0].attrib['pos'] = ' '.join([str(i) for i in [cube_x_new, cube_y_new, pos[2]]])
root[-1][0].attrib['quat'] = ' '.join([str(i) for i in body_xquat])
tree.write(xml_path_completion('Baxter/baxter/tmp.xml'))
tree = ET.parse(xml_path_completion('Baxter/baxter/master1_tmp.xml'))
root = tree.getroot()
root[3].attrib['file'] = 'tmp.xml'
tree.write(xml_path_completion('Baxter/baxter/master1.xml'))
# change object, override object parameters on the fly
def _reset_xml2(self, object_xml):
tree = ET.parse(xml_path_completion('Baxter/baxter/{}'.format(object_xml)))
root = tree.getroot()
# randomize
pos_vec = root[-1][0].attrib['pos'].strip().split(' ')
pos_vec = [i for i in pos_vec if i!='']
cube_pos = [float(i) for i in pos_vec]
cube_pos_x_low = cube_pos[0] - 0.1
cube_pos_x_high = cube_pos[0] + 0.05
cube_pos_y_low = cube_pos[1] - 0.1
cube_pos_y_high = cube_pos[1] + 0.1
cube_x_new = np.random.uniform(cube_pos_x_low, cube_pos_x_high) #(cube_pos_x_high + cube_pos_x_low) / 2#
cube_y_new = np.random.uniform(cube_pos_y_low, cube_pos_y_high) #(cube_pos_y_high + cube_pos_y_low) / 2#
#cube_x_new = (cube_pos_x_high + cube_pos_x_low) / 2#
#cube_y_new = (cube_pos_y_high + cube_pos_y_low) / 2#
root[-1][0].attrib['pos'] = ' '.join([str(i) for i in [cube_x_new, cube_y_new, cube_pos[2]]])
quat_vec = root[-1][0].attrib['quat'].strip().split(' ')
quat_vec =[i for i in quat_vec if i!='']
body_xquat = [float(i) for i in quat_vec]
#Randomizing part
radian = np.random.uniform(low= -45, high=45) * np.pi/180
new_quat = np.empty(4)
functions.mju_mulQuat(new_quat, np.array([np.cos(radian/2), 0., 0., np.sin(radian/2)]), np.array(body_xquat))
root[-1][0].attrib['quat'] = ' '.join([str(i) for i in new_quat])
tree.write(xml_path_completion('Baxter/baxter/tmp.xml'))
tree = ET.parse(xml_path_completion('Baxter/baxter/master_tmp.xml'))
root = tree.getroot()
root[3].attrib['file'] = 'tmp.xml'
tree.write(xml_path_completion('Baxter/baxter/master.xml'))
tree = ET.parse(xml_path_completion('Baxter/baxter/master1_tmp.xml'))
root = tree.getroot()
root[3].attrib['file'] = 'tmp.xml'
tree.write(xml_path_completion('Baxter/baxter/master1.xml'))
def | |
<reponame>hhugo/General
import itertools
import functools
import os
import sys
import textwrap
max_arity = 6
def generate(*element, **kwds):
p = functools.partial(print, file=kwds.get("file"))
previous_line_was_end = False
for line in indent(element, kwds.get("indent", 0)):
line_is_end = line.strip() == "end"
if previous_line_was_end and not line_is_end:
p("")
p(line)
previous_line_was_end = line_is_end
def indent(element, levels=1):
if isinstance(element, str):
yield f"{' ' * levels}{element}"
else:
for sub in element:
yield from indent(sub, levels)
class Facets:
def __init__(
self, *,
prefix, name,
variadic,
bases, values, extensions,
test_examples, test_requirements,
):
self.prefix = prefix
self.name = name
self.bases = list(bases)
self.max_arity = min(itertools.chain([max_arity], (i.max_arity for i in self.bases))) if variadic else 1
self.arities = list(range(self.max_arity))
self.non_zero_arities = list(range(1, self.max_arity))
self.values = list(values)
self.extensions = list(extensions)
self.all_items = list(itertools.chain(
self.values,
itertools.chain.from_iterable(extension.members for extension in self.extensions),
))
self.operators = [item for item in self.all_items if item.operator is not None]
self.test_examples = list(test_examples)
self.test_requirements = list(test_requirements)
@property
def graphviz_label(self):
parts = [self.name]
if len(self.values) > 0:
parts.append("")
parts += [val.name for val in self.values]
exts = [val.name for extension in self.extensions for val in extension.members]
if len(exts) > 0:
parts.append("")
parts += exts
return "\\n".join(parts)
@property
def specification(self):
return mod_spec(self.name, self.specification_items)
@property
def implementation(self):
return mod_impl(self.name, self.implementation_items)
@property
def specification_items(self):
if self.__has_operators():
yield self.__operators_specification()
if self.__is_basic():
yield self.__basic_specification_items()
else:
yield self.__extended_specification_items()
yield self.__extension_makers_specification_items()
yield self.__tests_specification()
@property
def implementation_items(self):
if self.__has_operators():
yield self.__operators_implementation()
if self.__is_basic():
yield self.__basic_implementation_items()
else:
yield self.__extended_implementation_items()
yield self.__extensions_makers_implementation_items()
yield self.__tests_implementation()
def __contextualized_name(self, prefix):
if prefix == self.prefix:
return self.name
else:
return f"{self.prefix}.{self.name}"
# Operators
def __has_operators(self):
return self.__has_own_operators() or self.__inherits_operators()
def __has_own_operators(self):
return len(self.operators) > 0
def __inherits_operators(self):
return any(base.__has_operators() for base in self.bases)
def __operators_specification(self):
return mod_spec("Operators", self.__operators_specification_items())
def __operators_implementation(self):
return mod_impl("Operators", self.__operators_implementation_items())
def __operators_specification_items(self):
yield self.__operators_s0_mod_type()
if len(self.operators) > 0:
yield self.__operators_make0_specification()
def __operators_implementation_items(self):
yield self.__operators_s0_mod_type()
if len(self.operators) > 0:
yield self.__operators_make0_implementation()
def __operators_s0_mod_type(self):
return mod_type("S0", self.__operators_s0_mod_type_items())
def __operators_s0_mod_type_items(self):
yield "type t"
for base in self.bases:
if base.__has_operators():
yield f"include {base.__contextualized_name(self.prefix)}.Operators.S0 with type t := t"
for operator in self.operators:
yield f"val ( {operator.operator} ): { operator.value_type(0, 't')}"
def __operators_make0_specification(self):
yield "module Make0(M: sig"
yield " type t"
for operator in self.operators:
yield f" val {operator.name}: {operator.value_type(0, 't')}"
yield "end): sig"
for operator in self.operators:
yield f" val ( {operator.operator} ): {operator.value_type(0, 'M.t')}"
yield "end"
def __operators_make0_implementation(self):
yield "module Make0(M: sig"
yield " type t"
for operator in self.operators:
yield f" val {operator.name}: {operator.value_type(0, f'{type_params(0)}t')}"
yield "end) = struct"
for operator in self.operators:
yield f" let ( {operator.operator} ) = M.{operator.name}"
yield "end"
# Core contents: basic
def __is_basic(self):
return len(list(itertools.chain.from_iterable(extension.members for extension in self.extensions))) == 0
def __basic_specification_items(self):
yield self.__basic_signature_mod_types()
yield self.__basic_specialize_specifications()
def __basic_implementation_items(self):
yield self.__basic_signature_mod_types()
yield self.__basic_specialize_implementations()
def __basic_signature_mod_types(self):
for arity in self.arities:
yield mod_type(f"S{arity}", self.__basic_signature_mod_type_items(arity))
def __basic_signature_mod_type_items(self, arity):
t = f"{type_params(arity)}t"
yield f"type {t}"
if arity == 0 and self.__has_operators() and self.__is_basic():
yield "module O: Operators.S0 with type t := t"
for base in self.bases:
if arity == 0 and base.__has_operators():
operators_constraint = " and module O := O"
else:
operators_constraint = ""
yield f"include {base.__contextualized_name(self.prefix)}.S{arity} with type {t} := {t}{operators_constraint}"
for value in self.values:
yield f"val {value.name}: {value.value_type(arity, t)}"
def __basic_specialize_specifications(self):
for arity in self.non_zero_arities:
functor_params = "".join(f"({a.upper()}: S0)" for a in abcd(arity))
yield mod_spec(f"Specialize{arity}(M: S{arity}){functor_params}", self.__specialize_specification_items(arity))
def __basic_specialize_implementations(self):
for arity in self.non_zero_arities:
functor_params = "".join(f"({a.upper()}: S0)" for a in abcd(arity))
yield mod_impl(f"Specialize{arity}(M: S{arity}){functor_params}", self.__basic_specialize_implementation_items(arity))
def __specialize_specification_items(self, arity):
yield f"type t = {type_args(arity)}M.t",
yield "include S0 with type t := t",
def __basic_specialize_implementation_items(self, arity):
yield f"type t = {type_args(arity)}M.t"
functor_args = "".join(f"({a.upper()})" for a in abcd(arity))
for base in self.bases:
yield f"module {base.name}_ = {base.__contextualized_name(self.prefix)}.Specialize{arity}(M){functor_args}"
if self.__inherits_operators():
yield mod_impl("O", (f"include {base.name}_.O" for base in self.bases if base.__has_operators()))
for base in self.bases:
if base.__has_operators():
operators_constraint = " and module O := O"
else:
operators_constraint = ""
yield f"include ({base.name}_: {base.__contextualized_name(self.prefix)}.S0 with type t := t{operators_constraint})"
for value in self.values:
yield value.value_specialization(arity)
# Core contents: extended
def __extended_specification_items(self):
yield mod_spec("Basic", self.__basic_specification_items())
yield self.__extended_signature_mod_types()
yield self.__extended_specialize_specifications()
def __extended_implementation_items(self):
yield mod_impl("Basic", self.__basic_implementation_items())
yield self.__extended_signature_mod_types()
yield self.__extended_specialize_implementations()
def __extended_signature_mod_types(self):
for arity in self.arities:
yield mod_type(f"S{arity}", self.__extended_signature_mod_type_items(arity))
def __extended_signature_mod_type_items(self, arity):
yield f"include Basic.S{arity}"
if arity == 0:
yield "module O: Operators.S0 with type t := t"
for extension in self.extensions:
for value in extension.members:
yield f"val {value.name}: {value.value_type(arity, f'{type_params(arity)}t')}"
def __extended_specialize_specifications(self):
for arity in self.non_zero_arities:
functor_params = "".join(f"({a.upper()}: Basic.S0)" for a in abcd(arity))
yield mod_spec(f"Specialize{arity}(M: S{arity}){functor_params}", self.__specialize_specification_items(arity))
def __extended_specialize_implementations(self):
for arity in self.non_zero_arities:
functor_params = "".join(f"({a.upper()}: Basic.S0)" for a in abcd(arity))
yield mod_impl(f"Specialize{arity}(M: S{arity}){functor_params}", self.__extended_specialize_implementation_items(arity))
def __extended_specialize_implementation_items(self, arity):
functor_args = "".join(f"({a.upper()})" for a in abcd(arity))
yield mod_impl("Self",
f"include Basic.Specialize{arity}(M){functor_args}",
(value.value_specialization(arity) for extension in self.extensions for value in extension.members)
)
yield "module O = Operators.Make0(Self)"
yield "include Self"
# Extension makers
def __extension_makers_specification_items(self):
for extension in self.extensions:
yield mod_spec(extension.name, extension.extension_makers_specification(self.arities))
def __extensions_makers_implementation_items(self):
for extension in self.extensions:
yield mod_impl(f"{extension.name}_", extension.extension_makers_implementation(self.arities))
# Tests
def __tests_specification(self):
yield mod_spec("Tests", self.__tests_specification_items())
def __tests_implementation(self):
yield mod_impl("Tests_", self.__tests_implementation_items())
def __tests_specification_items(self):
yield self.__tests_examples_specification()
yield mod_spec("Testable", self.__tests_testable_items())
yield self.__tests_makers_specifications()
def __tests_implementation_items(self):
yield self.__tests_examples_implementation()
yield mod_impl("Testable", self.__tests_testable_items())
yield self.__tests_makers_implementations()
def __tests_examples_specification(self):
yield mod_spec("Examples", self.__tests_examples_items())
def __tests_examples_implementation(self):
yield mod_impl("Examples", self.__tests_examples_items())
def __tests_examples_items(self):
if self.max_arity > 1:
yield mod_type("Element", self.__tests_examples_element_mod_type_items())
for arity in self.arities:
yield mod_type(f"S{arity}", self.__tests_examples_mod_type_items(arity))
def __tests_examples_element_mod_type_items(self):
yield "type t"
basic = "" if self.__is_basic() else "Basic."
yield f"include {basic}S0 with type t := t"
for req in self.test_requirements:
basic = "" if req.__is_basic() else "Basic."
yield f"include {req.name}.{basic}S0 with type t := t"
def __tests_examples_mod_type_items(self, arity):
yield f"type {type_params(arity)}t"
for a in abcd(arity):
yield f"module {a.upper()}: Element"
for base in self.bases:
yield (
f"include {base.__contextualized_name(self.prefix)}.Tests.Examples.S{arity} with type {type_params(arity)}t := {type_params(arity)}t"
+ "".join(f" and module {a.upper()} := {a.upper()}" for a in abcd(arity))
)
t = f"{type_args(arity)}t"
for item in self.test_examples:
yield f"val {item.name}: {item.value_type(0, t)}"
def __tests_testable_items(self):
for arity in self.arities:
yield mod_type(f"S{arity}", self.__tests_testable_mod_type_items(arity))
def __tests_testable_mod_type_items(self, arity):
yield f"include S{arity}"
for req in self.test_requirements:
basic = "" if req.__is_basic() else "Basic."
yield f"include {req.__contextualized_name(self.prefix)}.{basic}S{arity} with type {type_params(arity)}t := {type_params(arity)}t"
def __tests_makers_specifications(self):
for arity in self.arities:
yield mod_spec(
f"Make{arity}(M: Testable.S{arity})(E: Examples.S{arity} with type {type_params(arity)}t := {type_params(arity)}M.t)",
"val test: Test.t",
)
def __tests_makers_implementations(self):
yield mod_impl(
"MakeMakers(MakeExamples: functor (M: Testable.S0) -> functor (E: Examples.S0 with type t := M.t) -> Examples.S0 with type t := M.t)(MakeTests: functor (M: Testable.S0) -> functor (E: Examples.S0 with type t := M.t) -> sig val tests: Test.t list end)",
self.__tests_makers_implementations_items(),
)
def __tests_makers_implementations_items(self):
yield mod_impl(
"Make0(M: Testable.S0)(E: Examples.S0 with type t := M.t)",
self.__tests_make0_implementation_items(),
)
for arity in self.non_zero_arities:
yield mod_impl(
f"Make{arity}(M: Testable.S{arity})(E: Examples.S{arity} with type {type_params(arity)}t := {type_params(arity)}M.t)",
self.__tests_maker_implementation_items(arity),
)
def __tests_make0_implementation_items(self):
yield "open Testing"
yield "module E = MakeExamples(M)(E)"
yield f'let test = "{self.name}" >:: ['
for base in self.bases:
yield f" (let module T = {base.__contextualized_name(self.prefix)}.Tests.Make0(M)(E) in T.test);"
yield "] @ (let module T = MakeTests(M)(E) in T.tests)"
def __tests_maker_implementation_items(self, arity):
yield "include Make0(struct",
functor_args = "".join(f"(E.{a.upper()})" for a in abcd(arity))
yield indent(f"include Specialize{arity}(M){functor_args}"),
for req in self.test_requirements:
basic = "" if req.__is_basic() else "Basic."
yield indent(f"include ({req.__contextualized_name(self.prefix)}.{basic}Specialize{arity}(M){functor_args}: {req.__contextualized_name(self.prefix)}.{basic}S0 with type t := t)")
yield "end)(E)",
def mod_spec(name, *items):
yield f"module {name}: sig"
yield indent(items)
yield "end"
def mod_impl(name, *items):
yield f"module {name} = struct"
yield indent(items)
yield "end"
def mod_type(name, *items):
yield f"module type {name} = sig"
yield indent(items)
yield "end"
def type_params(arity):
if arity == 0:
return ""
elif arity == 1:
return "'a "
else:
return "({}) ".format(', '.join(f"'{a}" for a in abcd(arity)))
def type_args(arity):
if arity == 0:
return ""
elif arity == 1:
return "A.t "
else:
return "({}) ".format(', '.join(f"{a.upper()}.t" for a in abcd(arity)))
def abcd(arity):
return list("abcdefghijkl"[:arity])
class Extension:
def __init__(self, *, name, members, requirements, basic_production):
self.__name = name
self.__members = list(members)
self.__requirements = list(requirements)
self.__basic_production = list(basic_production)
@property
def name(self):
return self.__name
@property
def members(self):
return list(self.__members)
def extension_makers_specification(self, arities):
for arity in arities:
yield f"module Make{arity}(M: sig"
yield f" type {type_params(arity)}t"
for requirement in self.__requirements:
yield f" val {requirement.name}: {requirement.value_type(arity, f'{type_params(arity)}t')}"
yield "end): sig"
for value in itertools.chain(self.members, self.__basic_production):
yield f" val {value.name}: {value.value_type(arity, f'{type_params(arity)}M.t')}"
yield "end"
def extension_makers_implementation(self, arities):
yield "module MakeMakers(Implementation: | |
<reponame>TrucHLe/python-contact-center-insights<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.contactcenterinsights.v1",
manifest={
"Conversation",
"Analysis",
"ConversationDataSource",
"GcsSource",
"DialogflowSource",
"AnalysisResult",
"IssueModelResult",
"ConversationLevelSentiment",
"IssueAssignment",
"CallAnnotation",
"AnnotationBoundary",
"Entity",
"Intent",
"PhraseMatchData",
"DialogflowIntent",
"InterruptionData",
"SilenceData",
"HoldData",
"EntityMentionData",
"IntentMatchData",
"SentimentData",
"IssueModel",
"Issue",
"IssueModelLabelStats",
"PhraseMatcher",
"PhraseMatchRuleGroup",
"PhraseMatchRule",
"PhraseMatchRuleConfig",
"ExactMatchConfig",
"Settings",
"RuntimeAnnotation",
"AnswerFeedback",
"ArticleSuggestionData",
"FaqAnswerData",
"SmartReplyData",
"SmartComposeSuggestionData",
"DialogflowInteractionData",
"ConversationParticipant",
},
)
class Conversation(proto.Message):
r"""The conversation resource.
Attributes:
call_metadata (google.cloud.contact_center_insights_v1.types.Conversation.CallMetadata):
Call-specific metadata.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which this conversation should
expire. After this time, the conversation data
and any associated analyses will be deleted.
ttl (google.protobuf.duration_pb2.Duration):
Input only. The TTL for this resource. If
specified, then this TTL will be used to
calculate the expire time.
name (str):
Immutable. The resource name of the
conversation. Format:
projects/{project}/locations/{location}/conversations/{conversation}
data_source (google.cloud.contact_center_insights_v1.types.ConversationDataSource):
The source of the audio and transcription for
the conversation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which the
conversation was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The most recent time at which
the conversation was updated.
start_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which the conversation started.
language_code (str):
A user-specified language code for the
conversation.
agent_id (str):
An opaque, user-specified string representing
the human agent who handled the conversation.
labels (Sequence[google.cloud.contact_center_insights_v1.types.Conversation.LabelsEntry]):
A map for the user to specify any custom
fields. A maximum of 20 labels per conversation
is allowed, with a maximum of 256 characters per
entry.
transcript (google.cloud.contact_center_insights_v1.types.Conversation.Transcript):
Output only. The conversation transcript.
medium (google.cloud.contact_center_insights_v1.types.Conversation.Medium):
Immutable. The conversation medium.
duration (google.protobuf.duration_pb2.Duration):
Output only. The duration of the
conversation.
turn_count (int):
Output only. The number of turns in the
conversation.
latest_analysis (google.cloud.contact_center_insights_v1.types.Analysis):
Output only. The conversation's latest
analysis, if one exists.
runtime_annotations (Sequence[google.cloud.contact_center_insights_v1.types.RuntimeAnnotation]):
Output only. The annotations that were
generated during the customer and agent
interaction.
dialogflow_intents (Sequence[google.cloud.contact_center_insights_v1.types.Conversation.DialogflowIntentsEntry]):
Output only. All the matched Dialogflow
intents in the call. The key corresponds to a
Dialogflow intent, format:
projects/{project}/agent/{agent}/intents/{intent}
"""
class Medium(proto.Enum):
r"""Possible media for the conversation."""
MEDIUM_UNSPECIFIED = 0
PHONE_CALL = 1
CHAT = 2
class CallMetadata(proto.Message):
r"""Call-specific metadata.
Attributes:
customer_channel (int):
The audio channel that contains the customer.
agent_channel (int):
The audio channel that contains the agent.
"""
customer_channel = proto.Field(proto.INT32, number=1,)
agent_channel = proto.Field(proto.INT32, number=2,)
class Transcript(proto.Message):
r"""A message representing the transcript of a conversation.
Attributes:
transcript_segments (Sequence[google.cloud.contact_center_insights_v1.types.Conversation.Transcript.TranscriptSegment]):
A list of sequential transcript segments that
comprise the conversation.
"""
class TranscriptSegment(proto.Message):
r"""A segment of a full transcript.
Attributes:
text (str):
The text of this segment.
confidence (float):
A confidence estimate between 0.0 and 1.0 of
the fidelity of this segment. A default value of
0.0 indicates that the value is unset.
words (Sequence[google.cloud.contact_center_insights_v1.types.Conversation.Transcript.TranscriptSegment.WordInfo]):
A list of the word-specific information for
each word in the segment.
language_code (str):
The language code of this segment as a
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__
language tag. Example: "en-US".
channel_tag (int):
For conversations derived from multi-channel
audio, this is the channel number corresponding
to the audio from that channel. For
audioChannelCount = N, its output values can
range from '1' to 'N'. A channel tag of 0
indicates that the audio is mono.
segment_participant (google.cloud.contact_center_insights_v1.types.ConversationParticipant):
The participant of this segment.
"""
class WordInfo(proto.Message):
r"""Word-level info for words in a transcript.
Attributes:
start_offset (google.protobuf.duration_pb2.Duration):
Time offset of the start of this word
relative to the beginning of the total
conversation.
end_offset (google.protobuf.duration_pb2.Duration):
Time offset of the end of this word relative
to the beginning of the total conversation.
word (str):
The word itself. Includes punctuation marks
that surround the word.
confidence (float):
A confidence estimate between 0.0 and 1.0 of
the fidelity of this word. A default value of
0.0 indicates that the value is unset.
"""
start_offset = proto.Field(
proto.MESSAGE, number=1, message=duration_pb2.Duration,
)
end_offset = proto.Field(
proto.MESSAGE, number=2, message=duration_pb2.Duration,
)
word = proto.Field(proto.STRING, number=3,)
confidence = proto.Field(proto.FLOAT, number=4,)
text = proto.Field(proto.STRING, number=1,)
confidence = proto.Field(proto.FLOAT, number=2,)
words = proto.RepeatedField(
proto.MESSAGE,
number=3,
message="Conversation.Transcript.TranscriptSegment.WordInfo",
)
language_code = proto.Field(proto.STRING, number=4,)
channel_tag = proto.Field(proto.INT32, number=5,)
segment_participant = proto.Field(
proto.MESSAGE, number=9, message="ConversationParticipant",
)
transcript_segments = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Conversation.Transcript.TranscriptSegment",
)
call_metadata = proto.Field(
proto.MESSAGE, number=7, oneof="metadata", message=CallMetadata,
)
expire_time = proto.Field(
proto.MESSAGE, number=15, oneof="expiration", message=timestamp_pb2.Timestamp,
)
ttl = proto.Field(
proto.MESSAGE, number=16, oneof="expiration", message=duration_pb2.Duration,
)
name = proto.Field(proto.STRING, number=1,)
data_source = proto.Field(
proto.MESSAGE, number=2, message="ConversationDataSource",
)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
start_time = proto.Field(proto.MESSAGE, number=17, message=timestamp_pb2.Timestamp,)
language_code = proto.Field(proto.STRING, number=14,)
agent_id = proto.Field(proto.STRING, number=5,)
labels = proto.MapField(proto.STRING, proto.STRING, number=6,)
transcript = proto.Field(proto.MESSAGE, number=8, message=Transcript,)
medium = proto.Field(proto.ENUM, number=9, enum=Medium,)
duration = proto.Field(proto.MESSAGE, number=10, message=duration_pb2.Duration,)
turn_count = proto.Field(proto.INT32, number=11,)
latest_analysis = proto.Field(proto.MESSAGE, number=12, message="Analysis",)
runtime_annotations = proto.RepeatedField(
proto.MESSAGE, number=13, message="RuntimeAnnotation",
)
dialogflow_intents = proto.MapField(
proto.STRING, proto.MESSAGE, number=18, message="DialogflowIntent",
)
class Analysis(proto.Message):
r"""The analysis resource.
Attributes:
name (str):
Immutable. The resource name of the analysis.
Format:
projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}
request_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which the analysis
was requested.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time at which the analysis
was created, which occurs when the long-running
operation completes.
analysis_result (google.cloud.contact_center_insights_v1.types.AnalysisResult):
Output only. The result of the analysis,
which is populated when the analysis finishes.
"""
name = proto.Field(proto.STRING, number=1,)
request_time = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
analysis_result = proto.Field(proto.MESSAGE, number=7, message="AnalysisResult",)
class ConversationDataSource(proto.Message):
r"""The conversation source, which is a combination of transcript
and audio.
Attributes:
gcs_source (google.cloud.contact_center_insights_v1.types.GcsSource):
A Cloud Storage location specification for
the audio and transcript.
dialogflow_source (google.cloud.contact_center_insights_v1.types.DialogflowSource):
The source when the conversation comes from
Dialogflow.
"""
gcs_source = proto.Field(
proto.MESSAGE, number=1, oneof="source", message="GcsSource",
)
dialogflow_source = proto.Field(
proto.MESSAGE, number=3, oneof="source", message="DialogflowSource",
)
class GcsSource(proto.Message):
r"""A Cloud Storage source of conversation data.
Attributes:
audio_uri (str):
Cloud Storage URI that points to a file that
contains the conversation audio.
transcript_uri (str):
Immutable. Cloud Storage URI that points to a
file that contains the conversation transcript.
"""
audio_uri = proto.Field(proto.STRING, number=1,)
transcript_uri = proto.Field(proto.STRING, number=2,)
class DialogflowSource(proto.Message):
r"""A Dialogflow source of conversation data.
Attributes:
dialogflow_conversation (str):
Output only. The name of the Dialogflow
conversation that this conversation resource is
derived from. Format:
projects/{project}/locations/{location}/conversations/{conversation}
audio_uri (str):
Cloud Storage URI that points to a file that
contains the conversation audio.
"""
dialogflow_conversation = proto.Field(proto.STRING, number=1,)
audio_uri = proto.Field(proto.STRING, number=3,)
class AnalysisResult(proto.Message):
r"""The result of an analysis.
Attributes:
call_analysis_metadata (google.cloud.contact_center_insights_v1.types.AnalysisResult.CallAnalysisMetadata):
Call-specific metadata created by the
analysis.
end_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which the analysis ended.
"""
class CallAnalysisMetadata(proto.Message):
r"""Call-specific metadata created during analysis.
Attributes:
annotations (Sequence[google.cloud.contact_center_insights_v1.types.CallAnnotation]):
A list of call annotations that apply to this
call.
entities (Sequence[google.cloud.contact_center_insights_v1.types.AnalysisResult.CallAnalysisMetadata.EntitiesEntry]):
All the entities in the call.
sentiments (Sequence[google.cloud.contact_center_insights_v1.types.ConversationLevelSentiment]):
Overall conversation-level sentiment for each
channel of the call.
intents (Sequence[google.cloud.contact_center_insights_v1.types.AnalysisResult.CallAnalysisMetadata.IntentsEntry]):
All the matched intents in the call.
phrase_matchers (Sequence[google.cloud.contact_center_insights_v1.types.AnalysisResult.CallAnalysisMetadata.PhraseMatchersEntry]):
All the matched phrase matchers in the call.
issue_model_result (google.cloud.contact_center_insights_v1.types.IssueModelResult):
Overall conversation-level issue modeling
result.
"""
annotations = proto.RepeatedField(
proto.MESSAGE, number=2, message="CallAnnotation",
)
entities = proto.MapField(
proto.STRING, proto.MESSAGE, number=3, message="Entity",
)
sentiments = proto.RepeatedField(
proto.MESSAGE, number=4, message="ConversationLevelSentiment",
)
intents = proto.MapField(
proto.STRING, proto.MESSAGE, number=6, message="Intent",
)
phrase_matchers = proto.MapField(
proto.STRING, proto.MESSAGE, number=7, message="PhraseMatchData",
)
issue_model_result = proto.Field(
proto.MESSAGE, number=8, message="IssueModelResult",
)
call_analysis_metadata = proto.Field(
proto.MESSAGE, number=2, oneof="metadata", message=CallAnalysisMetadata,
)
end_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
class IssueModelResult(proto.Message):
r"""Issue Modeling result on a conversation.
Attributes:
issue_model (str):
Issue model that generates the result.
issues (Sequence[google.cloud.contact_center_insights_v1.types.IssueAssignment]):
All the matched issues.
"""
issue_model = proto.Field(proto.STRING, number=1,)
issues = proto.RepeatedField(proto.MESSAGE, number=2, message="IssueAssignment",)
class ConversationLevelSentiment(proto.Message):
r"""One channel of conversation-level sentiment data.
Attributes:
channel_tag (int):
The channel of the audio that the data
applies to.
sentiment_data (google.cloud.contact_center_insights_v1.types.SentimentData):
Data specifying sentiment.
"""
channel_tag = proto.Field(proto.INT32, | |
<gh_stars>0
''''
This script generates all figures in the paper.
One can make a distinction between forecasting 7 or 14 days ahead via the variable num_days.
'''
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import numpy as np
import math
import datetime as dt
import pandas as pd
import seaborn as sn
import copy
from sklearn.metrics import mean_squared_error, mean_absolute_error
from datetime import datetime
import matplotlib.dates as mdates
import generate_figures as gf
from sklearn.metrics import mean_squared_error, mean_absolute_error
from matplotlib import ticker
tick_locator = ticker.MaxNLocator(nbins=4)
import geopandas as gpd
import mezuro_preprocessing as mzp
import scipy.stats as stats
months = mdates.MonthLocator() # every month
horizon = 14
start_date = '01-07-2020'
end_date = '31-12-2020'
date_format = '%d-%m-%Y'
start_date_number = dt.datetime.strptime(start_date,date_format)
end_date_number = dt.datetime.strptime(end_date,date_format)
num_days = 14
dates = []
for t in range(0, (end_date_number - start_date_number).days + 1):
day_difference = dt.timedelta(days = t)
Current_date_number = start_date_number + day_difference
Current_date = Current_date_number.strftime('%d-%m-%Y')
Current_date_plot_str = Current_date_number.strftime('%Y%m%d')
dates.append(Current_date_number)
Trans_rates = pd.read_csv("results_forecasting/Trans_rates_NB_mob_samples_final_new.csv")
Trans_rates_basic = pd.read_csv('results_forecasting/Trans_rates_NB_mob_final_new.csv')
Trans_rates_basic['date'] = Trans_rates_basic['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Trans_rates_basic['date'] >= start_date_number) & (Trans_rates_basic['date'] <= end_date_number)
Trans_rates_basic = Trans_rates_basic.loc[mask]
Trans_rates_P_mob = pd.read_csv('results_forecasting/Trans_rates_P_mob_final_new.csv')
Trans_rates_P_mob['date'] = Trans_rates_P_mob['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Trans_rates_P_mob['date'] >= start_date_number) & (Trans_rates_P_mob['date'] <= end_date_number)
Trans_rates_P_mob = Trans_rates_P_mob.loc[mask]
Trans_rates_P_nomob = pd.read_csv('results_forecasting/Trans_rates_P_nomob_final_new.csv')
Trans_rates_P_nomob['date'] = Trans_rates_P_nomob['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Trans_rates_P_nomob['date'] >= start_date_number) & (Trans_rates_P_nomob['date'] <= end_date_number)
Trans_rates_P_nomob = Trans_rates_P_nomob.loc[mask]
Trans_rates_NB_mob = pd.read_csv('results_forecasting/Trans_rates_NB_mob_final_new.csv')
Trans_rates_NB_mob['date'] = Trans_rates_NB_mob['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Trans_rates_NB_mob['date'] >= start_date_number) & (Trans_rates_NB_mob['date'] <= end_date_number)
Trans_rates_NB_mob = Trans_rates_NB_mob.loc[mask]
Trans_rates_NB_nomob = pd.read_csv('results_forecasting/Trans_rates_NB_nomob_final_new.csv')
Trans_rates_NB_nomob['date'] = Trans_rates_NB_nomob['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Trans_rates_NB_nomob['date'] >= start_date_number) & (Trans_rates_NB_nomob['date'] <= end_date_number)
Trans_rates_NB_nomob = Trans_rates_NB_nomob.loc[mask]
df_quantiles = pd.read_csv('results_forecasting/Trans_rates_NB_mob_int_final_new.csv')
df_quantiles['date'] = df_quantiles['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (df_quantiles['date'] >= start_date_number) & (df_quantiles['date'] <= end_date_number)
df_quantiles = df_quantiles.loc[mask]
#Load result files
Results_7days = pd.read_csv('results_forecasting/Results_7days_01072020_31122020_point_new.csv')
Results_7days['date'] = Results_7days['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Results_7days['date'] >= start_date_number) & (Results_7days['date'] <= end_date_number)
Results_7days = Results_7days.loc[mask]
Results_7days_nomob = pd.read_csv('results_forecasting/Results_7days_01072020_31122020_point_nomob_new.csv')
Results_7days_nomob['date'] = Results_7days_nomob['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Results_7days_nomob['date'] >= start_date_number) & (Results_7days_nomob['date'] <= end_date_number)
Results_7days_nomob = Results_7days_nomob.loc[mask]
Results_14days = pd.read_csv('results_forecasting/Results_14days_01072020_31122020_point_new.csv')
Results_14days['date'] = Results_14days['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Results_14days['date'] >= start_date_number) & (Results_14days['date'] <= end_date_number)
Results_14days = Results_14days.loc[mask]
Results_14days_nomob = pd.read_csv('results_forecasting/Results_14days_01072020_31122020_point_nomob_new.csv')
Results_14days_nomob['date'] = Results_14days_nomob['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Results_14days_nomob['date'] >= start_date_number) & (Results_14days_nomob['date'] <= end_date_number)
Results_14days_nomob = Results_14days_nomob.loc[mask]
if num_days == 7:
Results_7days_samples = pd.read_csv('results_forecasting/Results_7days_01072020_30092020_new.csv')
Results_7days_samples = Results_7days_samples.append(pd.read_csv('results_forecasting/Results_7days_01102020_31122020_new.csv'))
else:
Results_7days_samples = pd.read_csv('results_forecasting/Results_14days_01072020_30092020_new.csv')
Results_7days_samples = Results_7days_samples.append(pd.read_csv('results_forecasting/Results_14days_01102020_31122020_new.csv'))
Results_7days_samples['date'] = Results_7days_samples['date'].apply(lambda x: dt.datetime.strptime(x, '%d-%m-%Y'))
mask = (Results_7days_samples['date'] >= start_date_number) & (Results_7days_samples['date'] <= end_date_number)
Results_7days_samples = Results_7days_samples.loc[mask]
#Load validity results on new tests!
df_newtests = pd.read_csv('results_forecasting/Validation_newtests_init.csv')
df_newtests_sim = pd.read_csv('results_forecasting/Validation_newtests.csv')
df_newtests_sim_lower = pd.read_csv('results_forecasting/Validation_newtests_lower.csv')
df_newtests_sim_upper = pd.read_csv('results_forecasting/Validation_newtests_upper.csv')
MSE_7 = []
MSE_7_nomob = []
MSE_diff = []
MAE_7 = []
MAE_7_nomob = []
MAE_diff = []
RMSE_7 = []
RMSE_7_nomob = []
RMSE_diff = []
WMAPE_7 = []
WMAPE_7_nomob = []
WMAPE_diff = []
tot_lower = []
tot_upper = []
tot_predicted = []
tot_outcome = []
Median_error_7 = []
Median_error_7_nomob = []
Median_error_diff = []
R2_7 = []
R2_7_nomob = []
nr_samples = 100
conf_level = .95
HELP_df_7days_samples_tot = Results_7days_samples.groupby(['date','sample']).agg({'pred':'sum'})
for DATE in dates:
print(DATE)
if num_days == 7:
HELP_df_7 = Results_7days.groupby('date').get_group(DATE)
HELP_df_7_nomob = Results_7days_nomob.groupby('date').get_group(DATE)
else:
HELP_df_7 = Results_14days.groupby('date').get_group(DATE)
HELP_df_7_nomob = Results_14days_nomob.groupby('date').get_group(DATE)
HELP_MSE = mean_squared_error(HELP_df_7['pred'],HELP_df_7['outcome'])
HELP_MSE_nomob = mean_squared_error(HELP_df_7_nomob['pred'],HELP_df_7_nomob['outcome'])
MSE_7.append(HELP_MSE)
MSE_7_nomob.append(HELP_MSE_nomob)
MSE_diff.append(1 - HELP_MSE / HELP_MSE_nomob)
HELP_MAE = mean_absolute_error(HELP_df_7['pred'],HELP_df_7['outcome'])
HELP_MAE_nomob = mean_absolute_error(HELP_df_7_nomob['pred'],HELP_df_7_nomob['outcome'])
MAE_7.append(HELP_MAE)
MAE_7_nomob.append(HELP_MAE_nomob)
MAE_diff.append(1 - HELP_MAE / HELP_MAE_nomob)
HELP_RMSE = np.sqrt(HELP_MSE)
HELP_RMSE_nomob = np.sqrt(HELP_MSE_nomob)
RMSE_7.append(HELP_RMSE)
RMSE_7_nomob.append(HELP_RMSE_nomob)
RMSE_diff.append(1 - HELP_RMSE / HELP_RMSE_nomob)
#Compare relative rankings
predict_tot = sum(HELP_df_7['pred'])
real_tot = sum(HELP_df_7['outcome'])
tot_predicted.append(predict_tot)
tot_outcome.append(real_tot)
predict_percentages = HELP_df_7['pred'] / predict_tot
real_percentages = HELP_df_7['outcome'] / real_tot
mask = (Results_7days['date'] == DATE)
Results_7days.loc[mask,'total_pred'] = predict_tot
Results_7days.loc[mask,'total_real'] = real_tot
HELP_WMAPE = sum(abs(HELP_df_7['pred']-HELP_df_7['outcome'])) / real_tot
HELP_WMAPE_nomob = sum(abs(HELP_df_7_nomob['pred']-HELP_df_7_nomob['outcome'])) / real_tot
WMAPE_7.append(HELP_WMAPE)
WMAPE_7_nomob.append(HELP_WMAPE_nomob)
WMAPE_diff.append(1 - HELP_WMAPE / HELP_WMAPE_nomob)
HELP_median = np.median((abs(HELP_df_7['pred']-HELP_df_7['outcome'])/HELP_df_7['outcome']).dropna())
HELP_median_nomob = np.median((abs(HELP_df_7_nomob['pred']-HELP_df_7_nomob['outcome'])/HELP_df_7_nomob['outcome']).dropna())
Median_error_7.append(HELP_median)
Median_error_7_nomob.append(HELP_median_nomob)
Median_error_diff.append(1 - HELP_median / HELP_median_nomob)
HELP_df_7_samples = Results_7days_samples.groupby('date').get_group(DATE)
SS_res = sum(pow(HELP_df_7['pred'] - HELP_df_7['outcome'],2))
SS_tot = sum(pow(HELP_df_7['outcome'] - HELP_df_7['outcome'].mean(),2))
R2_7.append(1 - SS_res / SS_tot)
SS_res = sum(pow(HELP_df_7_nomob['pred'] - HELP_df_7_nomob['outcome'],2))
SS_tot = sum(pow(HELP_df_7_nomob['outcome'] - HELP_df_7_nomob['outcome'].mean(),2))
R2_7_nomob.append(1 - SS_res / SS_tot)
#list_pred_tot = []
'''
#Compute for each sample totals!
for sample in range(0,nr_samples):
HELP_df_7_sample = HELP_df_7_samples.groupby('sample').get_group(sample)
predict_tot = sum(HELP_df_7_sample['pred'])
mask2 = (Results_7days_samples['date'] == DATE) & (Results_7days_samples['sample'] == sample)
Results_7days_samples.loc[mask2,'total_pred'] = predict_tot
list_pred_tot.append(predict_tot)
'''
HELP_df_samples = HELP_df_7days_samples_tot.groupby('date').get_group(DATE)
conf_lower = (1 - conf_level) / 2
conf_upper = (1 + conf_level) / 2
quant_tot_lower = np.quantile(HELP_df_samples,conf_lower)
quant_tot_upper = np.quantile(HELP_df_samples,conf_upper)
Results_7days.loc[mask,'lower_tot'] = quant_tot_lower
Results_7days.loc[mask,'upper_tot'] = quant_tot_upper
tot_lower.append(quant_tot_lower)
tot_upper.append(quant_tot_upper)
'''
for index, row in Trans_rates_NB_mob.iterrows():
Current_date_number = row['date']
#Adjust mobility:
if Current_date_number.weekday() == 5:
Trans_rates_NB_mob.at[index,'mobility'] *= (.36*6.2 / 31)
Trans_rates_NB_mob.at[index,'rate_mob'] /= (.36*6.2 / 31)
else:
if Current_date_number.weekday() == 6:
Trans_rates_NB_mob.at[index,'mobility'] *= (.27*6.2 / 31)
Trans_rates_NB_mob.at[index,'rate_mob'] /= (.27*6.2 / 31)
else:
Trans_rates_NB_mob.at[index,'mobility'] *= (1 - (.36+.27)*6.2 / 31)
Trans_rates_NB_mob.at[index,'rate_mob'] /= (1 - (.36+.27)*6.2 / 31)
'''
df_R_RIVM = pd.read_json('data/COVID-19_reproductiegetal.json').set_index('Date')
ylim_lower = 30
ylim_upper = 13000
setpoint_dates = np.exp(np.log(ylim_lower) + 0.05*(np.log(ylim_upper) - np.log(ylim_lower)))
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, df_newtests['newtests_pred'], c = 'blue', label = 'Via initialization')
ax.plot(dates, df_newtests['newtests'], c= 'black', label = 'RIVM')
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = datetime(2020,8,6),c='orange')
ax.annotate(xy=(datetime(2020,8,6), setpoint_dates), s="08-06", c='orange')
ax.axvline(x = datetime(2020,8,18),c='orange')
ax.annotate(xy=(datetime(2020,8,18), setpoint_dates), s="08-18", c='orange')
ax.axvline(x = datetime(2020,9,29),c='orange')
ax.annotate(xy=(datetime(2020,9,29), setpoint_dates), s="09-29", c='orange')
ax.axvline(x = datetime(2020,10,14),c='r')
ax.annotate(xy=(datetime(2020,10,14), setpoint_dates), s="10-14", c='r')
ax.axvline(x = datetime(2020,11,4),c='r')
ax.annotate(xy=(datetime(2020,11,4), setpoint_dates), s="11-04", c='r')
ax.axvline(x = datetime(2020,12,14),c='r')
ax.annotate(xy=(datetime(2020,12,14), setpoint_dates), s="12-14", c='r')
ax.axvline(x = datetime(2020,12,25),c='orange')
ax.annotate(xy=(datetime(2020,12,25), setpoint_dates), s="12-25", c='orange')
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Date')
#ax.set_ylabel('Effective reproduction number')
ax.set_title('Daily reported positive tests')
#handles,labels = ax.get_legend_handles_labels()
#handles = [handles[0], handles[2], handles[1]]
#labels = [labels[0], labels[2], labels[1]]
#ax.legend(handles, labels, loc = 'upper right')
ax.set_yscale('log')
ax.legend(loc = 'upper left')
plt.ylim([ylim_lower,ylim_upper])
plt.show()
fig.savefig('fig_final_newtests_new.pdf',format="pdf", bbox_inches="tight")
ylim_lower = 15
ylim_upper = 50000
setpoint_dates = np.exp(np.log(ylim_lower) + 0.05*(np.log(ylim_upper) - np.log(ylim_lower)))
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, df_newtests_sim['newtests_pred'], c='b',label = 'Via simulation')
ax.fill_between(dates, df_newtests_sim_upper['newtests_pred'],df_newtests_sim_lower['newtests_pred'],alpha=.3)
ax.plot(dates, df_newtests['newtests'], c= 'black', label = 'RIVM')
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = datetime(2020,8,6),c='orange')
ax.annotate(xy=(datetime(2020,8,6), setpoint_dates), s="08-06", c='orange')
ax.axvline(x = datetime(2020,8,18),c='orange')
ax.annotate(xy=(datetime(2020,8,18), setpoint_dates), s="08-18", c='orange')
ax.axvline(x = datetime(2020,9,29),c='orange')
ax.annotate(xy=(datetime(2020,9,29), setpoint_dates), s="09-29", c='orange')
ax.axvline(x = datetime(2020,10,14),c='r')
ax.annotate(xy=(datetime(2020,10,14), setpoint_dates), s="10-14", c='r')
ax.axvline(x = datetime(2020,11,4),c='r')
ax.annotate(xy=(datetime(2020,11,4), setpoint_dates), s="11-04", c='r')
ax.axvline(x = datetime(2020,12,14),c='r')
ax.annotate(xy=(datetime(2020,12,14), setpoint_dates), s="12-14", c='r')
ax.axvline(x = datetime(2020,12,25),c='orange')
ax.annotate(xy=(datetime(2020,12,25), setpoint_dates), s="12-25", c='orange')
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Date')
#ax.set_ylabel('Effective reproduction number')
ax.set_title('Daily reported positive tests')
#handles,labels = ax.get_legend_handles_labels()
#handles = [handles[0], handles[2], handles[1]]
#labels = [labels[0], labels[2], labels[1]]
#ax.legend(handles, labels, loc = 'upper right')
ax.set_yscale('log')
ax.legend(loc = 'upper left')
plt.ylim([ylim_lower,ylim_upper])
plt.show()
fig.savefig('fig_final_newtests_sim_new.pdf',format="pdf", bbox_inches="tight")
ylim_lower = 0
ylim_upper = 2.5
setpoint_dates = ylim_lower + 0.05*(ylim_upper - ylim_lower)
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, Trans_rates_basic.loc[7:,'R'], c='b',label = 'This paper')
ax.fill_between(dates, df_quantiles.loc[7:,'upper_R'],df_quantiles.loc[7:,'lower_R'],alpha=.3)
ax.plot(dates,df_R_RIVM.loc[start_date_number:end_date_number,'Rt_avg'], c = 'black', label = 'RIVM')
ax.fill_between(dates, df_R_RIVM.loc[start_date_number:end_date_number,'Rt_up'],df_R_RIVM.loc[start_date_number:end_date_number,'Rt_low'],color = 'black',alpha=.3)
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = datetime(2020,8,6),c='orange')
ax.annotate(xy=(datetime(2020,8,6), setpoint_dates), s="08-06", c='orange')
ax.axvline(x = datetime(2020,8,18),c='orange')
ax.annotate(xy=(datetime(2020,8,18), setpoint_dates), s="08-18", c='orange')
ax.axvline(x = datetime(2020,9,29),c='orange')
ax.annotate(xy=(datetime(2020,9,29), setpoint_dates), s="09-29", c='orange')
ax.axvline(x = datetime(2020,10,14),c='r')
ax.annotate(xy=(datetime(2020,10,14), setpoint_dates), s="10-14", c='r')
ax.axvline(x = datetime(2020,11,4),c='r')
ax.annotate(xy=(datetime(2020,11,4), setpoint_dates), s="11-04", c='r')
ax.axvline(x = datetime(2020,12,14),c='r')
ax.annotate(xy=(datetime(2020,12,14), setpoint_dates), s="12-14", c='r')
ax.axvline(x = datetime(2020,12,25),c='orange')
ax.annotate(xy=(datetime(2020,12,25), setpoint_dates), s="12-25", c='orange')
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Date')
#ax.set_ylabel('Effective reproduction number')
ax.set_title('Effective reproduction number')
#handles,labels = ax.get_legend_handles_labels()
#handles = [handles[0], handles[2], handles[1]]
#labels = [labels[0], labels[2], labels[1]]
#ax.legend(handles, labels, loc = 'upper right')
ax.legend(loc = 'upper right')
plt.ylim([ylim_lower,ylim_upper])
plt.show()
fig.savefig('fig_final_R_new.pdf',format="pdf", bbox_inches="tight")
ylim_lower = 0
ylim_upper = 2.5
setpoint_dates = ylim_lower + 0.05*(ylim_upper - ylim_lower)
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, Trans_rates_basic.loc[7:,'R'].shift(-11), c='b',label = 'This paper (shifted by 11 days)')
ax.fill_between(dates, df_quantiles.loc[7:,'upper_R'].shift(-11),df_quantiles.loc[7:,'lower_R'].shift(-11),alpha=.3)
ax.plot(dates,df_R_RIVM.loc[start_date_number:end_date_number,'Rt_avg'], c = 'black', label = 'RIVM')
ax.fill_between(dates, df_R_RIVM.loc[start_date_number:end_date_number,'Rt_up'],df_R_RIVM.loc[start_date_number:end_date_number,'Rt_low'],color = 'black',alpha=.3)
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = datetime(2020,8,6),c='orange')
ax.annotate(xy=(datetime(2020,8,6), setpoint_dates), s="08-06", c='orange')
ax.axvline(x = datetime(2020,8,18),c='orange')
ax.annotate(xy=(datetime(2020,8,18), setpoint_dates), s="08-18", c='orange')
ax.axvline(x = datetime(2020,9,29),c='orange')
ax.annotate(xy=(datetime(2020,9,29), setpoint_dates), s="09-29", c='orange')
ax.axvline(x = datetime(2020,10,14),c='r')
ax.annotate(xy=(datetime(2020,10,14), setpoint_dates), s="10-14", c='r')
ax.axvline(x = datetime(2020,11,4),c='r')
ax.annotate(xy=(datetime(2020,11,4), setpoint_dates), s="11-04", c='r')
ax.axvline(x = datetime(2020,12,14),c='r')
ax.annotate(xy=(datetime(2020,12,14), setpoint_dates), s="12-14", c='r')
ax.axvline(x = datetime(2020,12,25),c='orange')
ax.annotate(xy=(datetime(2020,12,25), setpoint_dates), s="12-25", c='orange')
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Date')
#ax.set_ylabel('Effective reproduction number')
ax.set_title('Effective reproduction number')
#handles,labels = ax.get_legend_handles_labels()
#handles = [handles[0], handles[2], handles[1]]
#labels = [labels[0], labels[2], labels[1]]
#ax.legend(handles, labels, loc = 'upper right')
ax.legend(loc = 'upper right')
plt.ylim([ylim_lower,ylim_upper])
plt.show()
fig.savefig('fig_final_R_shift_new.pdf',format="pdf", bbox_inches="tight")
ylim_lower = 0
ylim_upper = 1
setpoint_dates = ylim_lower + 0.05*(ylim_upper - ylim_lower)
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, Trans_rates_basic.loc[7:,'frac_loc'], c='b')
ax.fill_between(dates, df_quantiles.loc[7:,'upper_fracloc'],df_quantiles.loc[7:,'lower_fracloc'],alpha=.3)
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = datetime(2020,8,6),c='orange')
ax.annotate(xy=(datetime(2020,8,6), setpoint_dates), s="08-06", c='orange')
ax.axvline(x = datetime(2020,8,18),c='orange')
ax.annotate(xy=(datetime(2020,8,18), setpoint_dates), s="08-18", c='orange')
ax.axvline(x = datetime(2020,9,29),c='orange')
ax.annotate(xy=(datetime(2020,9,29), setpoint_dates), s="09-29", c='orange')
ax.axvline(x = datetime(2020,10,14),c='r')
ax.annotate(xy=(datetime(2020,10,14), setpoint_dates), s="10-14", c='r')
ax.axvline(x = datetime(2020,11,4),c='r')
ax.annotate(xy=(datetime(2020,11,4), setpoint_dates), s="11-04", c='r')
ax.axvline(x = datetime(2020,12,14),c='r')
ax.annotate(xy=(datetime(2020,12,14), setpoint_dates), s="12-14", c='r')
ax.axvline(x = datetime(2020,12,25),c='orange')
ax.annotate(xy=(datetime(2020,12,25), setpoint_dates), s="12-25", c='orange')
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Date')
#ax.set_ylabel('Effective R')
ax.set_title('Fraction of local contacts')
#ax.legend(loc = 'upper right')
plt.ylim([ylim_lower,ylim_upper])
plt.show()
fig.savefig('fig_final_fracloc_new.pdf',format="pdf", bbox_inches="tight")
ylim_lower = 0
ylim_upper = .2
setpoint_dates = ylim_lower + 0.05*(ylim_upper - ylim_lower)
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, Trans_rates_basic.loc[7:,'rate_loc'], c='b')
ax.fill_between(dates, df_quantiles.loc[7:,'upper_loc'],df_quantiles.loc[7:,'lower_loc'],alpha=.3)
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = datetime(2020,8,6),c='orange')
ax.annotate(xy=(datetime(2020,8,6), setpoint_dates), s="08-06", c='orange')
ax.axvline(x = datetime(2020,8,18),c='orange')
ax.annotate(xy=(datetime(2020,8,18), setpoint_dates), s="08-18", c='orange')
ax.axvline(x = datetime(2020,9,29),c='orange')
ax.annotate(xy=(datetime(2020,9,29), setpoint_dates), s="09-29", c='orange')
ax.axvline(x = datetime(2020,10,14),c='r')
ax.annotate(xy=(datetime(2020,10,14), setpoint_dates), s="10-14", c='r')
ax.axvline(x = datetime(2020,11,4),c='r')
ax.annotate(xy=(datetime(2020,11,4), setpoint_dates), s="11-04", c='r')
ax.axvline(x = datetime(2020,12,14),c='r')
ax.annotate(xy=(datetime(2020,12,14), setpoint_dates), s="12-14", c='r')
ax.axvline(x = datetime(2020,12,25),c='orange')
ax.annotate(xy=(datetime(2020,12,25), setpoint_dates), s="12-25", c='orange')
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Date')
#ax.set_ylabel('Effective R')
ax.set_title('Transmission rate - local contacts')
#ax.legend(loc = 'upper right')
plt.ylim([ylim_lower,ylim_upper])
plt.show()
fig.savefig('fig_final_loc_new.pdf',format="pdf", bbox_inches="tight")
ylim_lower = 0
ylim_upper = .55
setpoint_dates = ylim_lower + 0.05*(ylim_upper - ylim_lower)
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, Trans_rates_NB_mob.loc[7:,'rate_mob'], c='b',)
ax.fill_between(dates, df_quantiles.loc[7:,'upper_mob'],df_quantiles.loc[7:,'lower_mob'],alpha=.3)
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = datetime(2020,8,6),c='orange')
ax.annotate(xy=(datetime(2020,8,6), setpoint_dates), s="08-06", c='orange')
ax.axvline(x = datetime(2020,8,18),c='orange')
ax.annotate(xy=(datetime(2020,8,18), setpoint_dates), s="08-18", c='orange')
ax.axvline(x = datetime(2020,9,29),c='orange')
ax.annotate(xy=(datetime(2020,9,29), setpoint_dates), s="09-29", c='orange')
ax.axvline(x = datetime(2020,10,14),c='r')
ax.annotate(xy=(datetime(2020,10,14), setpoint_dates), s="10-14", c='r')
ax.axvline(x = datetime(2020,11,4),c='r')
ax.annotate(xy=(datetime(2020,11,4), setpoint_dates), s="11-04", c='r')
ax.axvline(x = datetime(2020,12,14),c='r')
ax.annotate(xy=(datetime(2020,12,14), setpoint_dates), s="12-14", c='r')
ax.axvline(x = datetime(2020,12,25),c='orange')
ax.annotate(xy=(datetime(2020,12,25), setpoint_dates), s="12-25", c='orange')
ax.xaxis.set_major_locator(months)
ax.set_xlabel('Date')
#ax.set_ylabel('Effective R')
ax.set_title('Transmission rate - contacts due to travelling')
#ax.legend(loc = 'upper right')
plt.ylim([ylim_lower,ylim_upper])
plt.show()
fig.savefig('fig_final_mob_new.pdf',format="pdf", bbox_inches="tight")
ylim_lower = 0
ylim_upper = .3
setpoint_dates = ylim_lower + 0.05*(ylim_upper - ylim_lower)
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(dates, Trans_rates_basic.loc[7:,'prob_times_contacts'], c='b')
ax.fill_between(dates, df_quantiles.loc[7:,'upper_newlyinfected'],df_quantiles.loc[7:,'lower_newlyinfected'],alpha=.3)
ax.axvline(x = datetime(2020,7,1),c='green')
ax.annotate(xy=(datetime(2020,7,1), setpoint_dates), s="07-01", c='green')
ax.axvline(x = | |
errors
# If refreshing, all stocks are initialzed via newly downloaded
# online data
if refresh:
errors = get_online_mp(all_tickers)
else:
if local:
get_local(local)
else:
self.print(
f"[3]: no local data detected, moving on to online initialization"
)
if online:
errors = get_online_mp(online)
else:
errors = {"delisted": [], "timeout": []}
num_errors = len(errors["delisted"]) + len(errors["timeout"])
delisted = errors["delisted"][:]
timeouts = errors["timeout"][:]
self.print(
f"Successfully added {(len(all_tickers) - num_errors)} of "
f"{len(all_tickers)} stocks ({len(delisted)} failures, "
f"{len(timeouts)} timeouts, {num_errors} total errors))"
)
previous = []
# Attempting timeout one more time
# TODO: This is not a complete solution
while timeouts:
self.print(f"\n[5]: retrying {len(timeouts)} timeouts")
errors = get_online_mp(timeouts, timeout=10)
n_timeout = len(errors["timeout"])
n_delisted = len(errors["delisted"])
n_attempts = len(timeouts)
n_errors = n_timeout + n_delisted
n_success = n_attempts - n_errors
self.print(
f"{n_success} / {n_attempts} successful timeout reattempts "
f"({n_delisted} failures, {n_timeout} timeouts, {n_errors} "
"total errors)"
)
delisted += errors["delisted"]
previous = timeouts[:]
timeouts = []
# Just retrying timeouts once before considering them failures
for ticker in errors["timeout"]:
if ticker in previous:
delisted.append(ticker)
else:
timeouts.append(ticker)
for ticker in delisted:
all_tickers.remove(ticker)
for ticker in all_tickers:
types.stock.instances[ticker] = self[ticker]
return delisted
def _get_listings(self, exchange: str) -> list[str]:
"""Gets the listings for a particular exchange"""
info = self._exchange_info
return info[info["Exchange"] == exchange]["Symbol"].to_list()
def _remove_errors(self) -> None:
"""Removes errors from list of tracked tickers"""
for symbol in self.errors:
ei = self._exchange_info[self._exchange_info["Symbol"] == symbol].index
self._exchange_info.drop(ei, inplace=True)
with open(self._eipath, "wb") as f:
self._exchange_info.to_pickle(f)
class Filter:
"""
A universe's filter object that allows stock filtering
A filter object attached to all universe objects that automatically
processes filter expressions for its attached universe
Thse objects are not intended to be instantiated by the end user, and only
have utility in their attachment to a Universe object.
Parameters:
universe: The universe object to attach to
"""
def __init__(self, universe: Universe) -> None:
self.universe = universe
def __getitem__(self, item: Any) -> Universe:
"""The standard method of operating a universe filter. Filter
expressions should act as 'indexing' the universe"""
if not isinstance(item, Iterable):
item = [item]
universe = copy(self.universe)
for filterr in self._validate_filters(item):
if filterr.called:
universe = self._filter_mp(universe, filterr)
else:
universe = self._filter(universe, filterr)
return universe
@staticmethod
def _filter_mp(universe: Universe, filterr: FilterExpression) -> UniverseView:
"""A multiprocessing version of the filter execution"""
def process_stock(stock):
return (stock.name, filterr._exec(stock))
filtered: Union[dict[str, Stock], list[Stock]] = []
with Pool() as pool:
filtered = dict(pool.map(process_stock, universe.values()))
filtered = [v for k, v in universe.items() if filtered[k]]
return UniverseView(universe, filtered, filterr)
@staticmethod
def _filter_mp_v2(universe: Universe, filterr: FilterExpression) -> UniverseView:
"""A multiprocessing version of the filter execution that utilizes
automatic batching of the universe's current stocks"""
def process_batch(batch):
return [(stock, filterr._exec(stock)) for stock in batch]
batches = list(utils.auto_batch(list(universe.values())))
filtered = []
with Pool() as pool:
filtered = pool.map(process_batch, batches)
filtered = [stock for batch in filtered for stock, success in batch if success]
return UniverseView(universe, filtered, filterr)
@staticmethod
def _filter(universe: Universe, filterr: FilterExpression) -> UniverseView:
"""Executes a filter expression on a universe
Executes a single filter expression on this filter's universe,
returning a universe view that is the result of the filter
Parameters:
universe (Universe | UniverseView):
The universe to filter
filterr (FilterExpression):
The expression to apply
Returns:
The filtered universe object
"""
filtered = [v for v in universe.values() if filterr._exec(v)]
return UniverseView(universe, filtered, filterr)
@staticmethod
def _validate_filters(filters: Iterable[ValidFilter]) -> list[FilterExpression]:
"""Validates that all objects in a filter indexing operation are valid
filters or filter expressions"""
def validate(filterr):
if isinstance(filterr, list):
if all(isinstance(obj, str) for obj in filterr):
filterr = FilterExpression(filterr, special="strlist")
elif all(isinstance(obj, Stock) for obj in filterr):
filterr = FilterExpression(filterr, special="stocklist")
elif isinstance(filterr, dict):
if all(
((isinstance(k, str), isinstance(v, Stock)) == (True, True))
for k, v in filterr.items()
):
filterr = FilterExpression(filterr, special="dict")
if not isinstance(filterr, FilterExpression):
raise TypeError(f"Invalid filter {filterr}")
return filterr
return [validate(filterr) for filterr in filters]
class FilterExpression:
"""
An expression compiled inside of a filter indexing operation
An object produced by performing a boolean operation on a stock attribute,
when the attribute is accessed from a universe object. When compiled,
filter expressions will always produce a function that takes in a single
stock object as an input, and produces a boolean output. Functions passed
into a filter indexing operation that operate similarly are also valid
filter expressions.
For the sake of example, let x be a Universe or UniverseView (they operate
identically when being filtered). The expression "x.beta() > 1" will
produce a filter expression object that, when compiled, will result in a
function whose input is a stock object and output is the boolean result of
"stock.beta() > 1". Any expression that takes a single stock as an input
with a boolean return is a valid expression inside of a filtering operation.
For example, the expression "x.value" will return a filter expression whos
attached function will evaluate the boolean of conversion of a stock's
value -- False if the stock is worthless else True.
Filter expression objects can only be created by accessing stock attributes
on a universe object.
Filter Expressions are only intended to be created by Universe and Filter
objects. They should never be instantiated by end users.
Parameters:
attr:
The stock attribute to access for each stock
special:
Used when creating nonstandard filter expressions
"""
attr: str
def __init__(
self,
attr: Union[str, ValidFilter],
special: Optional[Literal["strlist", "stocklist", "dict"]] = None,
) -> None:
self.operation: str = ""
self.condition: Any = None
self.exp: Optional[Callable] = None
self.args: Any = None
self.kwargs: Any = None
self.called: bool = False
self.is_other: bool = False
if isinstance(attr, str):
self.attr = attr
else:
self.attr = "direct"
self.operation = "filter"
if type(attr) is list[str]:
self.condition = attr
elif type(attr) is list[Stock]:
self.condition = [stock.name for stock in attr]
elif type(attr) is dict:
self.condition = list(attr.keys())
"""
if special is not None:
self.attr = "direct"
self.operation = "filter"
if special == "strlist":
self.condition = attr
elif special == "stocklist":
self.condition = [stock.name for stock in attr]
elif special == "dict":
self.condition = list(attr.keys())
"""
def __str__(self) -> str:
convert = {
"__lt__": "<",
"__le__": "<=",
"__eq__": "==",
"__ne__": "!=",
"__gt__": ">",
"__ge__": ">=",
"filter": "filter",
}
attr = self.attr_string()
if self.is_other:
return attr
elif self.operation is None:
return f"if {attr}"
return f"{attr} {convert[self.operation]} {self.condition}"
def __hash__(self) -> int:
kwargs = None
if self.kwargs:
kwargs = ((k, v) for k, v in self.kwargs)
return (self.attr, self.args, kwargs, self.operation, self.condition).__hash__()
def __call__(self, *args: Any, **kwargs: Any) -> FilterExpression:
self.args = args
self.kwargs = kwargs
self.called = True
return self
def __bool__(self) -> bool:
if self.is_other:
return True
raise NotImplementedError(
f"Direct boolean conversion not currently supported for filter "
"expressions. If checking for a false value, try "
f"'{self.attr_string()} == False'"
)
def __lt__(self, other: object) -> FilterExpression:
if self.is_other:
return NotImplemented
if isinstance(other, FilterExpression):
other.is_other = True
if not self.operation:
self.operation = "__lt__"
if not self.condition:
self.condition = other
return self
def __le__(self, other: object) -> FilterExpression:
if self.is_other:
return NotImplemented
if isinstance(other, FilterExpression):
other.is_other = True
if not self.operation:
self.operation = "__le__"
if not self.condition:
self.condition = other
return self
def __eq__(self, other: object) -> FilterExpression: # type: ignore[override]
if self.is_other:
return NotImplemented
if isinstance(other, FilterExpression):
other.is_other = True
if not self.operation:
self.operation = "__eq__"
if not self.condition:
self.condition = other
return self
def __ne__(self, other: object) -> FilterExpression: # type: ignore[override]
if self.is_other:
return NotImplemented
if isinstance(other, FilterExpression):
other.is_other = True
if not self.operation:
self.operation = "__ne__"
if not self.condition:
self.condition = other
return self
def __gt__(self, other: object) -> FilterExpression:
if self.is_other:
return NotImplemented
if isinstance(other, FilterExpression):
other.is_other = True
if not self.operation:
self.operation = "__gt__"
if not self.condition:
self.condition = other
return self
def __ge__(self, other: object) -> FilterExpression:
if self.is_other:
return NotImplemented
if isinstance(other, FilterExpression):
other.is_other = True
if not self.operation:
self.operation = "__ge__"
if not self.condition:
self.condition = other
return self
def __contains__(self, other: object) -> FilterExpression:
if self.is_other:
| |
order on the first day
expected_orders = [
[{
'amount': contracts,
'commission': 0.0,
'created': T('2014-01-06 14:31'),
'dt': T('2014-01-06 14:32'),
'filled': contracts,
'id': wildcard,
'limit': None,
'limit_reached': False,
'reason': None,
'sid': self.future,
'status': 1,
'stop': None,
'stop_reached': False
}],
] + [[]] * (len(self.closes) - 1)
assert_equal(
orders.tolist(),
expected_orders,
check_names=False,
)
assert_equal(
orders.index,
self.closes,
check_names=False,
)
transactions = perf['transactions']
# since we only order on the first day, we should only transact on the
# first day
expected_transactions = [
[{
'amount': contracts,
'commission': None,
'dt': T('2014-01-06 14:32'),
'order_id': wildcard,
'price': 1.0,
'sid': self.future,
}],
] + [[]] * (len(self.closes) - 1)
assert_equal(
transactions.tolist(),
expected_transactions,
check_names=False,
)
assert_equal(
transactions.index,
self.closes,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
expected_starting_cash = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.trading_minutes,
)
assert_equal(
portfolio_snapshots['starting_cash'],
expected_starting_cash,
check_names=False,
)
zero_minutes = pd.Series(0.0, index=self.trading_minutes)
for field in 'pnl', 'returns', 'cash_flow':
assert_equal(
portfolio_snapshots[field],
zero_minutes,
check_names=False,
msg=field,
)
reindex_columns = sorted(
set(portfolio_snapshots.columns) - {
'starting_cash',
'cash_flow',
'pnl',
'returns',
'positions',
},
)
minute_reindex = perf.rename(
columns={
'capital_used': 'cash_flow',
'ending_cash': 'cash',
'ending_exposure': 'positions_exposure',
'ending_value': 'positions_value',
},
)[reindex_columns].reindex(
self.trading_minutes,
method='bfill',
)
first_minute = self.trading_minutes[0]
# the first minute should have the default values because we haven't
# done anything yet
minute_reindex.loc[first_minute, 'cash'] = (
self.SIM_PARAMS_CAPITAL_BASE
)
minute_reindex.loc[
first_minute,
['positions_exposure', 'positions_value'],
] = 0
assert_equal(
portfolio_snapshots[reindex_columns],
minute_reindex,
check_names=False,
)
class TestFixedReturns(WithMakeAlgo, WithWerror, ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = True
START_DATE = T('2014-01-06')
END_DATE = T('2014-01-10')
# note: class attributes after this do not configure fixtures, they are
# just used in this test suite
# we use a contract multiplier to make sure we are correctly calculating
# exposure as price * multiplier
future_contract_multiplier = 2
asset_start_price = 100
asset_daily_returns = np.array([
+0.02, # up 2%
-0.02, # down 2%, this should give us less value that we started with
+0.00, # no returns
+0.04, # up 4%
])
asset_daily_close = prices_generating_returns(
asset_daily_returns,
asset_start_price,
)
asset_daily_volume = 100000
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.equity = cls.asset_finder.retrieve_asset(
cls.asset_finder.equities_sids[0],
)
cls.future = cls.asset_finder.retrieve_asset(
cls.asset_finder.futures_sids[0],
)
cls.equity_minutes = pd.Index(
cls.trading_calendars[Equity].minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.equity_closes = pd.Index(
cls.trading_calendars[Equity].session_closes_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.equity_closes.name = None
futures_cal = cls.trading_calendars[Future]
cls.future_minutes = pd.Index(
futures_cal.execution_minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.future_closes = pd.Index(
futures_cal.execution_time_from_close(
futures_cal.session_closes_in_range(
cls.START_DATE,
cls.END_DATE,
),
),
)
cls.future_closes.name = None
cls.future_opens = pd.Index(
futures_cal.execution_time_from_open(
futures_cal.session_opens_in_range(
cls.START_DATE,
cls.END_DATE,
),
),
)
cls.future_opens.name = None
def init_instance_fixtures(self):
super().init_instance_fixtures()
if self.DATA_PORTAL_FIRST_TRADING_DAY is None:
if self.DATA_PORTAL_USE_MINUTE_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_future_minute_bar_reader.first_trading_day
)
elif self.DATA_PORTAL_USE_DAILY_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_future_daily_bar_reader.first_trading_day
)
self.futures_data_portal = DataPortal(
self.asset_finder,
self.trading_calendars[Future],
first_trading_day=self.DATA_PORTAL_FIRST_TRADING_DAY,
equity_daily_reader=(
self.bcolz_equity_daily_bar_reader
if self.DATA_PORTAL_USE_DAILY_DATA else
None
),
equity_minute_reader=(
self.bcolz_equity_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
adjustment_reader=(
self.adjustment_reader
if self.DATA_PORTAL_USE_ADJUSTMENTS else
None
),
future_minute_reader=(
self.bcolz_future_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
future_daily_reader=(
MinuteResampleSessionBarReader(
self.bcolz_future_minute_bar_reader.trading_calendar,
self.bcolz_future_minute_bar_reader)
if self.DATA_PORTAL_USE_MINUTE_DATA else None
),
last_available_session=self.DATA_PORTAL_LAST_AVAILABLE_SESSION,
last_available_minute=self.DATA_PORTAL_LAST_AVAILABLE_MINUTE,
minute_history_prefetch_length=(
self.DATA_PORTAL_MINUTE_HISTORY_PREFETCH
),
daily_history_prefetch_length=(
self.DATA_PORTAL_DAILY_HISTORY_PREFETCH
),
)
@classmethod
def make_futures_info(cls):
return make_commodity_future_info(
first_sid=ord('Z'),
root_symbols=['Z'],
years=[cls.START_DATE.year],
multiplier=cls.future_contract_multiplier,
)
@classmethod
def _make_minute_bar_data(cls, calendar, sids):
daily_close = cls.asset_daily_close
daily_open = daily_close - 1
daily_high = daily_close + 1
daily_low = daily_close - 2
random_state = np.random.RandomState(seed=1337)
data = pd.concat(
[
simulate_minutes_for_day(
o,
h,
l,
c,
cls.asset_daily_volume,
trading_minutes=len(calendar.minutes_for_session(session)),
random_state=random_state,
)
for o, h, l, c, session in zip(
daily_open,
daily_high,
daily_low,
daily_close,
calendar.sessions_in_range(cls.START_DATE, cls.END_DATE),
)
],
ignore_index=True,
)
data.index = calendar.minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
for sid in sids:
yield sid, data
@classmethod
def make_equity_minute_bar_data(cls):
return cls._make_minute_bar_data(
cls.trading_calendars[Equity],
cls.asset_finder.equities_sids,
)
@classmethod
def make_future_minute_bar_data(cls):
return cls._make_minute_bar_data(
cls.trading_calendars[Future],
cls.asset_finder.futures_sids,
)
@parameter_space(
direction=['long', 'short'],
# checking the portfolio forces a sync; we want to ensure that the
# perf packets are correct even without explicitly requesting the
# portfolio every day. we also want to test that ``context.portfolio``
# produces the expected values when queried mid-simulation
check_portfolio_during_simulation=[True, False],
)
def test_equity_single_position(self,
direction,
check_portfolio_during_simulation):
if direction not in ('long', 'short'):
raise ValueError(
'direction must be either long or short, got: %r' % direction,
)
shares = 1 if direction == 'long' else -1
expected_fill_price = self.data_portal.get_scalar_asset_spot_value(
self.equity,
'close',
# we expect to kill in the second bar of the first day
self.equity_minutes[1],
'minute',
)
def initialize(context):
api.set_benchmark(self.equity)
api.set_slippage(api.slippage.NoSlippage())
api.set_commission(api.commission.NoCommission())
context.first_bar = True
if check_portfolio_during_simulation:
portfolio_snapshots = {}
def check_portfolio(data, context, first_bar):
portfolio = context.portfolio
portfolio_snapshots[api.get_datetime()] = portfolio_snapshot(
portfolio,
)
positions = portfolio.positions
if first_bar:
assert_equal(positions, {})
return
assert_equal(list(positions), [self.equity])
position = positions[self.equity]
assert_equal(position.last_sale_date, api.get_datetime())
assert_equal(position.amount, shares)
assert_equal(
position.last_sale_price,
data.current(self.equity, 'close'),
)
assert_equal(position.asset, self.equity)
assert_equal(
position.cost_basis,
expected_fill_price,
)
else:
def check_portfolio(data, context, first_bar):
pass
def handle_data(context, data):
first_bar = context.first_bar
if first_bar:
api.order(self.equity, shares)
context.first_bar = False
# take the snapshot after the order; ordering does not affect
# the portfolio on the bar of the order, only the following bars
check_portfolio(data, context, first_bar)
perf = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
zeros = pd.Series(0.0, index=self.equity_closes)
all_zero_fields = [
'excess_return',
'treasury_period_return',
]
if direction == 'long':
all_zero_fields.extend((
'short_value',
'shorts_count',
))
else:
all_zero_fields.extend((
'long_value',
'longs_count',
))
for field in all_zero_fields:
assert_equal(
perf[field],
zeros,
check_names=False,
check_dtype=False,
msg=field,
)
ones = pd.Series(1, index=self.equity_closes)
if direction == 'long':
count_field = 'longs_count'
else:
count_field = 'shorts_count'
assert_equal(
perf[count_field],
ones,
check_names=False,
msg=field,
)
if direction == 'long':
expected_exposure = pd.Series(
self.asset_daily_close,
index=self.equity_closes,
)
exposure_fields = 'long_value', 'long_exposure'
else:
expected_exposure = pd.Series(
-self.asset_daily_close,
index=self.equity_closes,
)
exposure_fields = 'short_value', 'short_exposure'
for field in exposure_fields:
assert_equal(
perf[field],
expected_exposure,
check_names=False,
msg=field,
)
if direction == 'long':
delta = self.asset_daily_close - expected_fill_price
else:
delta = -self.asset_daily_close + expected_fill_price
expected_portfolio_value = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE + delta,
index=self.equity_closes,
)
assert_equal(
perf['portfolio_value'],
expected_portfolio_value,
check_names=False,
)
capital_base_series = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.equity_closes,
)
# leverage is gross market exposure / current notional capital
# gross market exposure is
# sum(long_exposure) + sum(abs(short_exposure))
# current notional capital is the current portfolio value
expected_max_leverage = np.maximum.accumulate(
expected_exposure.abs() / expected_portfolio_value,
)
assert_equal(
perf['max_leverage'],
expected_max_leverage,
check_names=False,
)
expected_cash = capital_base_series.copy()
if direction == 'long':
# we purchased one share on the first day
cash_modifier = -expected_fill_price
else:
# we sold one share on the first day
cash_modifier = +expected_fill_price
expected_cash[1:] += cash_modifier
assert_equal(
perf['starting_cash'],
expected_cash,
check_names=False,
)
expected_cash[0] += cash_modifier
assert_equal(
perf['ending_cash'],
expected_cash,
check_names=False,
)
# we purchased one share on the first day
expected_capital_used = pd.Series(0.0, index=self.equity_closes)
expected_capital_used[0] += cash_modifier
assert_equal(
perf['capital_used'],
expected_capital_used,
check_names=False,
)
for field in 'ending_value', 'ending_exposure':
# for equities, position value and position exposure are the same
assert_equal(
perf[field],
expected_exposure,
check_names=False,
msg=field,
)
# we don't start with any positions; the first day has no starting
# exposure
expected_starting_exposure = expected_exposure.shift(1)
expected_starting_exposure[0] = 0.0
for field in 'starting_value', 'starting_exposure':
# for equities, position value and position exposure are the same
assert_equal(
perf[field],
expected_starting_exposure,
check_names=False,
msg=field,
)
assert_equal(
perf['trading_days'],
pd.Series(
np.arange(len(self.equity_closes)) + 1,
index=self.equity_closes,
dtype=np.int64,
),
check_names=False,
)
orders = perf['orders']
expected_single_order = {
'amount': shares,
'commission': 0.0,
'created': T('2014-01-06 14:31'),
'dt': T('2014-01-06 14:32'),
'filled': shares,
'id': wildcard,
'limit': None,
'limit_reached': False,
'reason': None,
'sid': self.equity,
'status': 1,
'stop': None,
'stop_reached': False
}
# we only order on the first day
expected_orders = (
[[expected_single_order]] +
[[]] * (len(self.equity_closes) - 1)
)
assert_equal(
orders.tolist(),
expected_orders,
check_names=False,
)
assert_equal(
orders.index,
self.equity_closes,
check_names=False,
)
transactions = perf['transactions']
expected_single_transaction = {
'amount': shares,
'commission': None,
'dt': T('2014-01-06 14:32'),
'order_id': wildcard,
'price': self.data_portal.get_scalar_asset_spot_value(
self.equity,
'close',
T('2014-01-06 14:32'),
'minute',
),
'sid': self.equity,
}
# since we only order on the first day, we should only transact on the
# first day
expected_transactions = (
[[expected_single_transaction]] +
[[]] * (len(self.equity_closes) - 1)
)
assert_equal(
transactions.tolist(),
expected_transactions,
)
assert_equal(
transactions.index,
self.equity_closes,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
expected_starting_cash = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.equity_minutes,
)
assert_equal(
portfolio_snapshots['starting_cash'],
expected_starting_cash,
check_names=False,
)
expected_portfolio_capital_used = pd.Series(
cash_modifier,
index=self.equity_minutes,
)
expected_portfolio_capital_used[0] = 0.0
expected_capital_used[0] = 0
assert_equal(
portfolio_snapshots['cash_flow'],
expected_portfolio_capital_used,
check_names=False,
)
minute_prices = self.data_portal.get_history_window(
[self.equity],
self.equity_minutes[-1],
len(self.equity_minutes),
'1m',
'close',
'minute',
)[self.equity]
expected_pnl = minute_prices.diff()
# we don't enter the position until the second minute
expected_pnl.iloc[:2] = 0.0
expected_pnl = expected_pnl.cumsum()
if direction == 'short':
expected_pnl = -expected_pnl
assert_equal(
portfolio_snapshots['pnl'],
expected_pnl,
check_names=False,
)
expected_portfolio_value = self.SIM_PARAMS_CAPITAL_BASE + expected_pnl
| |
self.ap.roll_deg = targets_node.getFloat("roll_deg")
target_agl_ft = targets_node.getFloat("altitude_agl_ft")
ground_m = pos_node.getFloat("altitude_ground_m")
# if pressure based:
# error_m = pos_pressure_node.getFloat("pressure_error_m")
# target_msl_ft = (ground_m + error_m) * m2ft + target_agl_ft
# else: ...
self.ap.altitude_msl_ft = ground_m * m2ft + target_agl_ft
self.ap.altitude_ground_m = ground_m
self.ap.pitch_deg = targets_node.getFloat("pitch_deg")
self.ap.airspeed_kt = targets_node.getFloat("airspeed_kt")
self.ap.flight_timer = task_node.getFloat("flight_timer")
self.ap.target_waypoint_idx = route_node.getInt("target_waypoint_idx")
self.ap.task_attr = 0
# wp_counter will get incremented externally in the
# remote_link message sender because each time we send a
# serial message to the remote ground station is when we
# want to advance to the next waypoint.
counter = remote_link_node.getInt("wp_counter")
self.ap.wp_longitude_deg = 0.0
self.ap.wp_latitude_deg = 0.0
self.ap.wp_index = 0
self.ap.route_size = active_node.getInt("route_size")
if self.ap.route_size > 0 and counter < self.ap.route_size:
self.ap.wp_index = counter
wp_path = "wpt[%d]" % self.ap.wp_index
wp_node = active_node.getChild(wp_path, True)
self.ap.wp_longitude_deg = wp_node.getFloat("longitude_deg")
self.ap.wp_latitude_deg = wp_node.getFloat("latitude_deg")
elif counter == self.ap.route_size:
self.ap.wp_longitude_deg = circle_node.getFloat("longitude_deg")
self.ap.wp_latitude_deg = circle_node.getFloat("latitude_deg")
self.ap.wp_index = 65534
self.ap.task_attr = int(round(circle_node.getFloat("radius_m") * 10))
if self.ap.task_attr > 32767: self.ap.task_attr = 32767
elif counter == self.ap.route_size + 1:
self.ap.wp_longitude_deg = home_node.getFloat("longitude_deg")
self.ap.wp_latitude_deg = home_node.getFloat("latitude_deg")
self.ap.wp_index = 65535
self.ap.task_id = 0 # code for unknown or not set
if task_node.getString("current_task_id") == "circle":
self.ap.task_id = 1
elif task_node.getString("current_task_id") == "parametric":
# draw like it's a circle
self.ap.task_id = 1
elif task_node.getString("current_task_id") == "route":
self.ap.task_id = 2
elif task_node.getString("current_task_id") == "land":
self.ap.task_id = 3
self.ap.sequence_num = remote_link_node.getInt("sequence_num")
self.ap_buf = self.ap.pack()
return self.ap_buf
wp_counter = 0
def pack_ap_status_dict(self, index):
# fixme: tecs_target_tot is really zero now because these values
# are computed in energy *error* terms
row = dict()
row['timestamp'] = targets_node.getFloat('timestamp')
row['master_switch'] = ap_node.getBool("master_switch")
row['pilot_pass_through'] = ap_node.getBool("pilot_pass_through")
row['groundtrack_deg'] = targets_node.getFloat('groundtrack_deg')
row['roll_deg'] = targets_node.getFloat('roll_deg')
row['altitude_msl_ft'] = targets_node.getFloat('altitude_msl_ft')
row['pitch_deg'] = targets_node.getFloat('pitch_deg')
row['airspeed_kt'] = targets_node.getFloat('airspeed_kt')
row['altitude_ground_m'] = pos_node.getFloat("altitude_ground_m")
row['tecs_target_tot'] = tecs_node.getFloat("target_total")
row['flight_timer'] = task_node.getFloat("flight_timer")
row['target_waypoint_idx'] = route_node.getInt("target_waypoint_idx")
route_size = active_node.getInt("route_size")
row['route_size'] = route_size
row['task_attrib'] = 0.0
if self.wp_counter < route_size:
wp_node = active_node.getChild('wpt[%d]' % self.wp_counter, True)
row['wpt_index'] = self.wp_counter
row['wpt_longitude_deg'] = wp_node.getFloat("longitude_deg")
row['wpt_latitude_deg'] = wp_node.getFloat("latitude_deg")
elif self.wp_counter == route_size:
row['wpt_index'] = 65534
row['wpt_longitude_deg'] = circle_node.getFloat("longitude_deg")
row['wpt_latitude_deg'] = circle_node.getFloat("latitude_deg")
row['task_attrib'] = int(round(circle_node.getFloat("radius_m") * 10))
elif self.wp_counter == route_size + 1:
row['wpt_index'] = 65535
row['wpt_longitude_deg'] = home_node.getFloat("longitude_deg")
row['wpt_latitude_deg'] = home_node.getFloat("latitude_deg")
row['current_task_id'] = task_node.getString("current_task_id")
self.wp_counter += 1
if self.wp_counter >= route_size + 2:
self.wp_counter = 0
return row
def pack_ap_status_csv(self, index):
# fixme: tecs_target_tot is really zero now because these values
# are computed in energy *error* terms
row = dict()
row['timestamp'] = '%.4f' % targets_node.getFloat('timestamp')
row['master_switch'] = '%d' % ap_node.getBool("master_switch")
row['pilot_pass_through'] = '%d' % ap_node.getBool("pilot_pass_through")
row['groundtrack_deg'] = '%.2f' % targets_node.getFloat('groundtrack_deg')
row['roll_deg'] = '%.2f' % targets_node.getFloat('roll_deg')
row['altitude_msl_ft'] = '%.2f' % targets_node.getFloat('altitude_msl_ft')
row['pitch_deg'] = '%.2f' % targets_node.getFloat('pitch_deg')
row['airspeed_kt'] = '%.1f' % targets_node.getFloat('airspeed_kt')
row['altitude_ground_m'] = '%.1f' % pos_node.getFloat("altitude_ground_m")
row['tecs_target_tot'] = '%.4f' % tecs_node.getFloat("target_total")
keys = ['timestamp', 'master_switch', 'pilot_pass_through',
'groundtrack_deg', 'roll_deg', 'altitude_msl_ft', 'pitch_deg',
'airspeed_kt', 'altitude_ground_m',
'tecs_target_tot']
return row, keys
def unpack_ap_status_v4(self, buf):
result = struct.unpack(ap_status_v4_fmt, buf)
index = result[0]
wp_lon = result[10]
wp_lat = result[11]
wp_index = result[12]
route_size = result[13]
targets_node.setFloat("timestamp", result[1])
targets_node.setFloat("groundtrack_deg", result[2] / 10.0)
targets_node.setFloat("roll_deg", result[3] / 10.0)
targets_node.setFloat("altitude_msl_ft", result[4])
pos_node.setFloat("altitude_ground_m", result[5])
targets_node.setFloat("pitch_deg", result[6] / 10.0)
targets_node.setFloat("airspeed_kt", result[7] / 10.0)
status_node.setFloat("flight_timer", result[8])
route_node.setInt("target_waypoint_idx", result[9])
if wp_index < route_size:
wp_node = active_node.getChild('wpt[%d]' % wp_index, True)
wp_node.setFloat("longitude_deg", wp_lon)
wp_node.setFloat("latitude_deg", wp_lat)
elif wp_index == 65534:
circle_node.setFloat("longitude_deg", wp_lon)
circle_node.setFloat("latitude_deg", wp_lat)
elif wp_index == 65535:
home_node.setFloat("longitude_deg", wp_lon)
home_node.setFloat("latitude_deg", wp_lat)
active_node.setInt("route_size", route_size)
if result[14] >= 1:
remote_link_node.setInt("sequence_num", result[14])
return index
def unpack_ap_status_v5(self, buf):
ap = aura_messages.ap_status_v5(buf)
index = ap.index
wp_lon = ap.wp_longitude_deg
wp_lat = ap.wp_latitude_deg
wp_index = ap.wp_index
route_size = ap.route_size
targets_node.setFloat("timestamp", ap.timestamp_sec)
flags = ap.flags
ap_node.setBool("master_switch", flags & (1<<0))
ap_node.setBool("pilot_pass_through", flags & (1<<1))
targets_node.setFloat("groundtrack_deg", ap.groundtrack_deg)
targets_node.setFloat("roll_deg", ap.roll_deg)
targets_node.setFloat("altitude_msl_ft", ap.altitude_msl_ft)
pos_node.setFloat("altitude_ground_m", ap.altitude_ground_m)
targets_node.setFloat("pitch_deg", ap.pitch_deg)
targets_node.setFloat("airspeed_kt", ap.airspeed_kt)
status_node.setFloat("flight_timer", ap.flight_timer)
status_node.setBool("onboard_flight_timer", True)
if route_size != active_node.getInt("route_size"):
# route size change, zero all the waypoint coordinates
for i in range(active_node.getInt("route_size")):
wp_node = active_node.getChild('wpt[%d]' % i, True)
wp_node.setFloat("longitude_deg", 0)
wp_node.setFloat("latitude_deg", 0)
route_node.setInt("target_waypoint_idx", ap.target_waypoint_idx)
if wp_index < route_size:
wp_node = active_node.getChild('wpt[%d]' % wp_index, True)
wp_node.setFloat("longitude_deg", wp_lon)
wp_node.setFloat("latitude_deg", wp_lat)
elif wp_index == 65534:
circle_node.setFloat("longitude_deg", wp_lon)
circle_node.setFloat("latitude_deg", wp_lat)
elif wp_index == 65535:
home_node.setFloat("longitude_deg", wp_lon)
home_node.setFloat("latitude_deg", wp_lat)
active_node.setInt("route_size", route_size)
if ap.sequence_num >= 1:
remote_link_node.setInt("sequence_num", ap.sequence_num)
return index
def unpack_ap_status_v6(self, buf):
ap = aura_messages.ap_status_v6(buf)
index = ap.index
wp_lon = ap.wp_longitude_deg
wp_lat = ap.wp_latitude_deg
wp_index = ap.wp_index
route_size = ap.route_size
task_id = ap.task_id
task_attrib = ap.task_attribute
targets_node.setFloat("timestamp", ap.timestamp_sec)
flags = ap.flags
ap_node.setBool("master_switch", flags & (1<<0))
ap_node.setBool("pilot_pass_through", flags & (1<<1))
targets_node.setFloat("groundtrack_deg", ap.groundtrack_deg)
targets_node.setFloat("roll_deg", ap.roll_deg)
targets_node.setFloat("altitude_msl_ft", ap.altitude_msl_ft)
pos_node.setFloat("altitude_ground_m", ap.altitude_ground_m)
targets_node.setFloat("pitch_deg", ap.pitch_deg)
targets_node.setFloat("airspeed_kt", ap.airspeed_kt)
status_node.setFloat("flight_timer", ap.flight_timer)
status_node.setBool("onboard_flight_timer", True)
if route_size != active_node.getInt("route_size"):
# route size change, zero all the waypoint coordinates
for i in range(active_node.getInt("route_size")):
wp_node = active_node.getChild('wpt[%d]' % i, True)
wp_node.setFloat("longitude_deg", 0)
wp_node.setFloat("latitude_deg", 0)
route_node.setInt("target_waypoint_idx", ap.target_waypoint_idx)
if wp_index < route_size:
wp_node = active_node.getChild('wpt[%d]' % wp_index, True)
wp_node.setFloat("longitude_deg", wp_lon)
wp_node.setFloat("latitude_deg", wp_lat)
elif wp_index == 65534:
circle_node.setFloat("longitude_deg", wp_lon)
circle_node.setFloat("latitude_deg", wp_lat)
circle_node.setFloat("radius_m", task_attrib / 10.0)
elif wp_index == 65535:
home_node.setFloat("longitude_deg", wp_lon)
home_node.setFloat("latitude_deg", wp_lat)
if task_id == 1:
task_node.setString("current_task_id", "circle")
elif task_id == 2:
task_node.setString("current_task_id", "route")
elif task_id == 3:
task_node.setString("current_task_id", "land")
else:
task_node.setString("current_task_id", "unknown")
active_node.setInt("route_size", route_size)
if ap.sequence_num >= 1:
remote_link_node.setInt("sequence_num", ap.sequence_num)
return index
def unpack_ap_status_v7(self, buf):
ap = aura_messages.ap_status_v7(buf)
index = ap.index
wp_lon = ap.wp_longitude_deg
wp_lat = ap.wp_latitude_deg
wp_index = ap.wp_index
route_size = ap.route_size
task_id = ap.task_id
task_attrib = ap.task_attribute
targets_node.setFloat("timestamp", ap.timestamp_sec)
flags = ap.flags
ap_node.setBool("master_switch", flags & (1<<0))
ap_node.setBool("pilot_pass_through", flags & (1<<1))
targets_node.setFloat("groundtrack_deg", ap.groundtrack_deg)
targets_node.setFloat("roll_deg", ap.roll_deg)
targets_node.setFloat("altitude_msl_ft", ap.altitude_msl_ft)
pos_node.setFloat("altitude_ground_m", ap.altitude_ground_m)
targets_node.setFloat("pitch_deg", ap.pitch_deg)
targets_node.setFloat("airspeed_kt", ap.airspeed_kt)
status_node.setFloat("flight_timer", ap.flight_timer)
status_node.setBool("onboard_flight_timer", True)
if route_size != active_node.getInt("route_size"):
# route size change, zero all the waypoint coordinates
for i in range(active_node.getInt("route_size")):
wp_node = active_node.getChild('wpt[%d]' % i, True)
wp_node.setFloat("longitude_deg", 0)
wp_node.setFloat("latitude_deg", 0)
route_node.setInt("target_waypoint_idx", ap.target_waypoint_idx)
if wp_index < route_size:
wp_node = active_node.getChild('wpt[%d]' % wp_index, True)
wp_node.setFloat("longitude_deg", wp_lon)
wp_node.setFloat("latitude_deg", wp_lat)
elif wp_index == 65534:
circle_node.setFloat("longitude_deg", wp_lon)
circle_node.setFloat("latitude_deg", wp_lat)
circle_node.setFloat("radius_m", task_attrib / 10.0)
elif wp_index == 65535:
home_node.setFloat("longitude_deg", wp_lon)
home_node.setFloat("latitude_deg", wp_lat)
if task_id == 1:
task_node.setString("current_task_id", "circle")
elif task_id == 2:
task_node.setString("current_task_id", "route")
elif task_id == 3:
task_node.setString("current_task_id", "land")
else:
task_node.setString("current_task_id", "unknown")
active_node.setInt("route_size", route_size)
if ap.sequence_num >= 1:
remote_link_node.setInt("sequence_num", ap.sequence_num)
return index
def pack_system_health_bin(self, use_cached=False):
health_time = status_node.getFloat('frame_time')
if not use_cached and health_time > self.last_health_time:
self.last_health_time = health_time
self.health.index = 0
self.health.timestamp_sec = health_time
self.health.system_load_avg = status_node.getFloat("system_load_avg")
self.health.fmu_timer_misses = status_node.getInt("fmu_timer_misses")
self.health.avionics_vcc = power_node.getFloat("avionics_vcc")
self.health.main_vcc = power_node.getFloat("main_vcc")
self.health.cell_vcc = power_node.getFloat("cell_vcc")
self.health.main_amps = power_node.getFloat("main_amps")
self.health.total_mah = power_node.getFloat("total_mah")
self.health_buf = self.health.pack()
return self.health_buf
def pack_system_health_dict(self, index):
row = dict()
row['timestamp'] = status_node.getFloat('frame_time')
row['system_load_avg'] = status_node.getFloat('system_load_avg')
row['fmu_timer_misses'] = status_node.getFloat('fmu_timer_misses')
row['avionics_vcc'] = power_node.getFloat('avionics_vcc')
row['main_vcc'] = power_node.getFloat('main_vcc')
row['cell_vcc'] = power_node.getFloat('cell_vcc')
row['main_amps'] = power_node.getFloat('main_amps')
row['total_mah'] = power_node.getFloat('total_mah')
return row
def pack_system_health_csv(self, index):
row = dict()
row['timestamp'] = '%.4f' % status_node.getFloat('frame_time')
row['system_load_avg'] = '%.2f' % status_node.getFloat('system_load_avg')
row['avionics_vcc'] = '%.2f' % power_node.getFloat('avionics_vcc')
row['main_vcc'] = '%.2f' % power_node.getFloat('main_vcc')
row['cell_vcc'] = '%.2f' % power_node.getFloat('cell_vcc')
row['main_amps'] = '%.2f' % power_node.getFloat('main_amps')
row['total_mah'] = '%.0f' % power_node.getFloat('total_mah')
keys = ['timestamp', 'system_load_avg', 'avionics_vcc', 'main_vcc',
'cell_vcc', 'main_amps', 'total_mah']
return row, keys
def unpack_system_health_v4(self, buf):
health = aura_messages.system_health_v4(buf)
status_node.setFloat("frame_time", health.timestamp_sec)
status_node.setFloat("system_load_avg", health.system_load_avg)
power_node.setFloat("avionics_vcc", health.avionics_vcc)
power_node.setFloat("main_vcc", health.main_vcc)
power_node.setFloat("cell_vcc", health.cell_vcc)
power_node.setFloat("main_amps", health.main_amps)
power_node.setInt("total_mah", health.total_mah)
return health.index
def unpack_system_health_v5(self, buf):
health = aura_messages.system_health_v5(buf)
status_node.setFloat("frame_time", health.timestamp_sec)
status_node.setFloat("system_load_avg", health.system_load_avg)
power_node.setFloat("avionics_vcc", health.avionics_vcc)
power_node.setFloat("main_vcc", health.main_vcc)
power_node.setFloat("cell_vcc", health.cell_vcc)
power_node.setFloat("main_amps", health.main_amps)
power_node.setInt("total_mah", health.total_mah)
return health.index
def unpack_system_health_v6(self, buf):
health = aura_messages.system_health_v6(buf)
status_node.setFloat("frame_time", health.timestamp_sec)
status_node.setFloat("system_load_avg", health.system_load_avg)
status_node.setInt("fmu_timer_misses", health.fmu_timer_misses)
power_node.setFloat("avionics_vcc", health.avionics_vcc)
power_node.setFloat("main_vcc", health.main_vcc)
power_node.setFloat("cell_vcc", health.cell_vcc)
power_node.setFloat("main_amps", health.main_amps)
power_node.setInt("total_mah", health.total_mah)
return health.index
def pack_payload_dict(self, index):
row = dict()
row['timestamp'] = payload_node.getFloat('timestamp')
row['trigger_num'] = payload_node.getInt('trigger_num')
return row
def pack_payload_csv(self, index):
row = dict()
row['timestamp'] = '%.4f' % payload_node.getFloat('timestamp')
row['trigger_num'] = '%d' % payload_node.getInt('trigger_num')
keys = ['timestamp', 'trigger_num']
return row, keys
def unpack_payload_v2(self, buf):
payload = aura_messages.payload_v2(buf)
payload_node.setFloat("timestamp", payload.timestamp_sec)
payload_node.setInt("trigger_num", payload.trigger_num)
return payload.index
def unpack_payload_v3(self, buf):
payload = aura_messages.payload_v3(buf)
payload_node.setFloat("timestamp", payload.timestamp_sec)
payload_node.setInt("trigger_num", payload.trigger_num)
return payload.index
def pack_event_dict(self, index):
row = dict()
timestamp = event_node.getFloat('timestamp')
if timestamp < 0.001:
imu_node = getNode('/sensors/imu[0]', True)
timestamp = imu_node.getFloat('timestamp')
row['timestamp'] = timestamp
row['message'] = event_node.getString('message')
return row
def pack_event_csv(self, index):
row = dict()
row['timestamp'] = '%.4f' % event_node.getFloat('timestamp')
row['message'] = event_node.getString('message')
keys = ['timestamp', 'message']
return row, keys
def unpack_event_v1(self, buf):
event = aura_messages.event_v1(buf)
m = | |
#!/usr/bin/python
#Copyright IBM Corp. 2018.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from rest_framework import svt_tester_base
from rest_framework.statemachine import StateMachine
from rest_framework.restUtils import HttpError
from rest_framework import novaUtils
from rest_framework import Utils
import os
import time
import copy
from datetime import datetime
from datetime import timedelta
START = "START"
DEPLOYING = "DEPLOYING"
DEPLOY_COMPLETE = 'DEPLOY_COMPLETE'
DEPLOY_ADD = 'DEPLOY_ADD'
RESIZING = 'RESIZING'
START_DELETE = 'START_DELETE'
TIMED_OUT = 'TIMED_OUT'
RESIZE_COMPLETE = 'RESIZE_COMPLETE'
DELETING = 'DELETING'
DELETE_COMPLETE = 'DELETE_COMPLETE'
DELETE_START = 'DELETE_START'
ERROR = 'ERROR'
END = "END"
NUM_DEPLOYS = 'number_of_deploys'
DEPLOYS_BEFORE_SCENARIO_START = 'deploys_before_scenario_start'
SCG_FLAVOR_NAMES = 'scg_flavor_names'
START_IP = 'start_ip_address'
IMAGE_NAME_LIST = 'image_name_list'
SRV_NAME_PREFIX = 'server_name_prefix'
NET_NAME = 'network_name'
DEPLOY_FLAV = 'deploy_flavor'
RESIZE_FLAV = 'resize_flavor'
SLEEP_INT = 'sleep_interval'
WAIT_TO = 'wait_timeout'
NUM_CONCUR_DEPLOYS = 'number_of_concurrent_deploys'
OPT_TIMED_STATS = 'timed_stats'
DEPLOY_SUM = 'deploy_sum'
DELETE_SUM = 'delete_sum'
RESIZE_SUM = 'resize_sum'
DEPLOY_COUNT = 'deploy_count'
DELETE_COUNT = 'delete_count'
RESIZE_COUNT = 'resize_count'
SERVER_DICT = 'server'
VMS_LIST = 'vms'
STATS_DICT = 'stats'
STATE_START_TIME = 'state_start'
IP_ADDR = 'ip_addr'
DEPLOY_FLAVID = 'deploy_flavorId'
IMAGE_ID = 'imageRef'
NET_ID = 'networkId'
RESIZE_FLAVID = 'resize_flavorId'
POST_DEPLOY_STATE = 'post_deploy_state'
SM_STATE = 'state'
SM_CARGO = 'cargo'
NAME_KEY = 'name'
ID_KEY = 'id'
TASK_STATE_KEY = 'task_state'
VM_STATE_KEY = 'vm_state'
POWER_STATE_KEY = 'power_state'
HEALTH_VAL_KEY = 'health_value'
TIME_OUT_STATE_KEY = 'timeout_state'
#Sample Config Parameters
"""
[test_1013_complex2_deploy]
number_of_deploys = 100
deploys_before_scenario_start = 100
start_ip_address =172.26.17.1
image_name_list = ['rhel72_1','rhel73LE_1','rhel73LE_2']
server_name_prefix = rojv_
network_name = private
deploy_flavor = roj1
resize_flavor = roj2
sleep_interval = 8
wait_timeout = 300
number_of_concurrent_deploys = 10
timed_stats = True
scg_flavor_names = ['vSCSI']
"""
class SvtDeployTester(svt_tester_base.SvtTesterBase):
"""Testcase to do sequential deploys and sequential resize on stop.
Requires a number of config options to be set in the configuration file
which is passed as an argument:
number_of_deploys = <number of deploys to perform>
server_name_prefix = <prefix of the server name>
start_ip_address = <starting ip address>
image_name = <name of image to use for deploy>
network_name = <name of the network to use>
wait_timeout
flavor_for_deploy = <flavor used for deploys>
concurrent_count = <number of concurrent deploys>
Optional configuration:
timed_stats = <True or False depending if you want to see average timings>
"""
required_options = [NUM_DEPLOYS, START_IP, IMAGE_NAME_LIST,
SRV_NAME_PREFIX, NET_NAME, DEPLOY_FLAV, RESIZE_FLAV,
SLEEP_INT, WAIT_TO, NUM_CONCUR_DEPLOYS,
DEPLOYS_BEFORE_SCENARIO_START, SCG_FLAVOR_NAMES
]
def cmp(a, b):
return (a > b) - (a < b)
def test_1013_complex2_deploy(self):
options_missing = False
for option in self.required_options:
if not self.config_has_option(option):
print(('option=', option, 'not found in configuration file'))
options_missing = True
if options_missing:
print('Provide missing options to the configuration file.')
os._exit(1)
number_of_deploys = int(self.config_get(NUM_DEPLOYS))
server_name_prefix = self.config_get(SRV_NAME_PREFIX)
start_ip_address = self.config_get(START_IP)
image_name_list = self.config_get(IMAGE_NAME_LIST)
network_name = self.config_get(NET_NAME)
deploy_flavor = self.config_get(DEPLOY_FLAV)
resize_flavor = self.config_get(RESIZE_FLAV)
number_of_concurrent_deploys = \
self.config_get(NUM_CONCUR_DEPLOYS)
post_states = [END, DELETING, RESIZING, DELETING]
deploys_before_scenario_starts = \
self.config_get(DEPLOYS_BEFORE_SCENARIO_START)
wait_timeout = self.config_get(WAIT_TO)
sleep_interval = self.config_get(SLEEP_INT)
if self.config_has_option(OPT_TIMED_STATS):
timed_stats = self.config_get(OPT_TIMED_STATS)
else:
timed_stats = False
deploy_flavorId = None
resize_flavorId = None
try:
flavor_list = Utils.get_flavor_list(self.authent_id, self.novaUrl)
except HttpError as e:
print(('HTTP Error: {0}'.format(e.body)))
os._exit(1)
for flavor in flavor_list:
if flavor[NAME_KEY] == deploy_flavor:
deploy_flavorId = flavor[ID_KEY]
if flavor[NAME_KEY] == resize_flavor:
resize_flavorId = flavor[ID_KEY]
if deploy_flavorId and resize_flavorId:
break
if not deploy_flavorId:
print(("Deploy Flavor, {0} not found".format(deploy_flavor)))
exit(1)
if not resize_flavorId:
print(("Resize Flavor, {0} not found".format(resize_flavor)))
os._exit(1)
image_ref_list = []
for image_name in image_name_list:
imageRef = Utils.get_named_image(self.authent_id, self.glanceUrl,
image_name)
image_ref_list.append(imageRef)
if not image_ref_list:
print(('Named images {0} not found,'\
'provide a valid image names.'.format(image_name_list)))
os._exit(1)
networkId = Utils.get_named_network_id(self.authent_id,
self.quantumUrl,
network_name)
if not networkId:
print(('Named Network {0} not found,'\
' provide a valid image name.'.format(network_name)))
os._exit(1)
netmask = Utils.get_netmask(self.authent_id, self.quantumUrl,
networkId, start_ip_address)
# define state machine
sm = StateMachine()
sm.add_state(START, self.start)
sm.add_state(DEPLOYING, self.deploying)
sm.add_state(DEPLOY_COMPLETE, self.deploy_complete)
sm.add_state(DEPLOY_ADD, self.deploy_add)
sm.add_state(RESIZING, self.resizing)
sm.add_state(START_DELETE, self.start_delete)
sm.add_state(DELETING, self.deleting)
sm.add_state(DELETE_COMPLETE, self.delete_complete)
sm.add_state(RESIZE_COMPLETE, self.resize_complete)
sm.add_state(TIMED_OUT, self.timed_out)
sm.add_state(END, self.end_transition, end_state=1)
sm.add_state(ERROR, self.error_transition, end_state=1)
sm.set_start(START)
if timed_stats:
stats = {DEPLOY_SUM: timedelta(0),
DEPLOY_COUNT: 0,
DELETE_SUM: timedelta(0),
DELETE_COUNT: 0,
RESIZE_SUM: timedelta(0),
RESIZE_COUNT: 0
}
else:
stats = None
req_deploy_count = 0
num_deployed = 0
num_deleted = 0
num_resized = 0
work_item_list = []
servers_timed_out = []
servers_errored = []
server_list = Utils.get_server_details(self.authent_id, self.novaUrl,
fields=['name',
'id',
'created',
'addresses'])
if number_of_deploys <= len(server_list):
print(('Requested {0} Virtual Servers, but {1} already exist'.\
format(number_of_deploys, len(server_list))))
os._exit(1)
ip_pool = []
for i in range(number_of_deploys + number_of_concurrent_deploys):
ip = Utils.next_ip(start_ip_address, netmask, i)
if not ip:
break
ip_pool.append(ip)
for server in server_list:
for network in server['addresses']:
for addr_dict in server['addresses'][network]:
if addr_dict['addr'] in ip_pool:
ip_pool.remove(addr_dict['addr'])
print(('ip_pool[{0}]={1}'.format(len(ip_pool), ip_pool)))
print("preeti server_list=",server_list)
#server_list.sort()
#server_list.sort(cmp, self.key_created, False)
projected_deploy_count = len(server_list)
# SCG section.
# We first get the list of SCG flavor names.
try:
scg_flavor_list = self.config_get(SCG_FLAVOR_NAMES)
except HttpError as e:
print(('HTTP Error: {0}'.format(e.body)))
os._exit(1)
# Query for all the SCGs
try:
scg_response, scg_dict = novaUtils.getSCGs(self.novaUrl, self.authent_id)
except HttpError as e:
print(('HTTP Error: {0}'.format(e.body)))
os._exit(1)
# Scan the usable SCGs
scgs_to_use = []
for scg in scg_dict['storage_connectivity_groups']:
for scg_flavor in scg_flavor_list:
if scg['display_name'] == scg_flavor:
scgs_to_use.append(scg['id'])
scgs_copy = copy.copy(scgs_to_use)
while work_item_list[:] or projected_deploy_count < number_of_deploys:
# Count the number of severs that are currently deploying
concurrent_deploy_count = 0
for work_item in work_item_list:
if work_item[SM_STATE] == DEPLOYING or\
work_item[SM_STATE] == START:
concurrent_deploy_count += 1
# request as many deploys as possible to meet the concurrent value
while concurrent_deploy_count < number_of_concurrent_deploys and \
projected_deploy_count < number_of_deploys:
if len(server_list) > 5:
display_count = 5
else:
display_count = len(server_list)
if display_count > 0:
print(('deployed servers={0}'.\
format(server_list[0:display_count])))
print(('ip_pool={0}'.format(ip_pool[0:5])))
imageId = \
image_ref_list[req_deploy_count % len(image_ref_list)]
ip = ip_pool.pop(0)
if len(scgs_copy) != 0:
scg_id = scgs_copy[0]
if scg_id in scgs_copy:
scgs_copy.remove(scg_id)
else:
scgs_copy = copy.copy(scgs_to_use)
scg_id = scgs_copy[0]
if scg_id in scgs_copy:
scgs_copy.remove(scg_id)
try:
scg_prop = {"powervm:storage_connectivity_group":scg_id}
novaUtils.addSCGtoFlavor(self.novaUrl, self.authent_id,
deploy_flavorId, scg_prop)
except Exception as e:
print("Encoutered an exception in adding scg")
server = self.deploy_server(imageId, server_name_prefix,
ip, networkId,
deploy_flavorId)
cargo = {}
index = 0
index = req_deploy_count % len(post_states)
if req_deploy_count < deploys_before_scenario_starts:
cargo[POST_DEPLOY_STATE] = END
projected_deploy_count += 1
elif index == 0:
cargo[POST_DEPLOY_STATE] = END
projected_deploy_count += 1
elif index == 1 or index == 3:
cargo[POST_DEPLOY_STATE] = DELETING
elif index == 2:
cargo[POST_DEPLOY_STATE] = RESIZING
cargo[RESIZE_FLAVID] = resize_flavorId
projected_deploy_count += 1
cargo[SERVER_DICT] = server
cargo[STATE_START_TIME] = datetime.now()
cargo[STATS_DICT] = stats
cargo[WAIT_TO] = wait_timeout
work_item_list.append({SM_STATE: START,
SM_CARGO: cargo})
concurrent_deploy_count += 1
req_deploy_count += 1
for work_item in work_item_list[:]:
currentState = work_item[SM_STATE]
currentCargo = work_item[SM_CARGO]
if sm.is_end_state(currentState):
work_item_list.remove(work_item)
print(('work_item finished:', str(work_item)))
continue
# get updated state and cargo
(newState, newCargo) = sm.step(currentState, currentCargo)
if newState != currentState:
print(('{0} -> {1} for Server {2}'.\
format(currentState, newState,
newCargo[SERVER_DICT][NAME_KEY])))
if newState == DELETE_COMPLETE:
# recycle IP
server = newCargo['server']
for network in newCargo['server']['addresses']:
for addr_dict in \
newCargo['server']['addresses'][network]:
if not addr_dict in ip_pool:
ip_pool.insert(0, addr_dict['addr'])
print(('+++ip_pool after insert{0}'.\
format(ip_pool[0:3])))
print(('**ip addr of deleted vm={0}'.\
format(ip_pool[0])))
num_deleted += 1
elif newState == TIMED_OUT:
servers_timed_out.append(newCargo[SERVER_DICT]
[NAME_KEY])
elif newState == DEPLOY_ADD:
server_id = None
server_id = newCargo[SERVER_DICT]['id']
server = {}
fields = ['name', 'id', 'created', 'addresses']
try:
server = Utils.get_server(self.authent_id,
self.novaUrl,
server_id, fields=fields)
except HttpError as e:
print(('HTTP Error: {0}'.format(e.body)))
os._exit(1)
if not server in server_list:
server_list.append(server)
print(('Adding Server {0} to list of servers'.\
format(server)))
num_deployed += 1
elif newState == RESIZE_COMPLETE:
num_resized += 1
elif newState == START_DELETE:
deleteCargo = {}
deleteCargo[SERVER_DICT] = server_list.pop(0)
print(('Request Delete Server {0}'.\
format(deleteCargo[SERVER_DICT]['name'])))
try:
Utils.delete_server(self.authent_id, self.novaUrl,
deleteCargo[SERVER_DICT])
except HttpError as e:
print(('HTTP Error: {0}'.format(e.body)))
os._exit(1)
deleteCargo[STATE_START_TIME] = datetime.now()
deleteCargo[STATS_DICT] = stats
deleteCargo[WAIT_TO] = wait_timeout
work_item_list.append({SM_STATE: DELETING,
SM_CARGO: deleteCargo})
# update work_item
work_item[SM_STATE] = newState
work_item[SM_CARGO] = newCargo
time.sleep(sleep_interval)
print(('number of requested deploys=', req_deploy_count))
print('-------SUMMARTY-------')
print(('Number of deploys = {0}'.format(num_deployed)))
print(('Number of deletions = {0}'.format(num_deleted)))
print(('Number of resizes = {0}'.format(num_resized)))
print(('Number of servers in Error State ='.format(len(servers_errored))))
print(('Number of servers that timed out {0}'.\
format(len(servers_timed_out))))
if timed_stats:
if stats[DEPLOY_COUNT] > 0:
print(('deployed count:{0}, total time:{1}, average time{2}'.\
format(stats[DEPLOY_COUNT], stats[DEPLOY_SUM],
stats[DEPLOY_SUM] / stats[DEPLOY_COUNT])))
if stats[DELETE_COUNT] > 0:
print(('deleted count:{0}, total time:{1}, average time{2}'.\
format(stats[DELETE_COUNT], stats[DELETE_SUM],
stats[DELETE_SUM] / stats[DELETE_COUNT])))
if stats[RESIZE_COUNT] > 0:
print(('resize count:{0}, total time:{1}, average time{2}'.\
format(stats[RESIZE_COUNT], stats[RESIZE_SUM],
stats[RESIZE_SUM] / stats[RESIZE_COUNT])))
def deploy_server(self, imageId, server_name_prefix, ip_addr, networkId,
deploy_flavor):
myState = START
server_name = \
Utils.unique_server_name(server_name_prefix, ip_addr)
for i in range(5):
if deploy_flavor:
server = Utils.create_server(self.authent_id, self.novaUrl,
imageId, server_name,
ip_addr,
networkId,
flavor_id=deploy_flavor)
else:
server = Utils.create_server(self.authent_id, self.novaUrl,
imageId, | |
"""A module for handling potentials.
This module contains several different classes representing potentials,
each having methods to compute relevant nondimensional quantities as
functions of nondimensional force or stretch.
This module also contains the parent class ``Potential`` that is used to
assign a potential to a given model using keyword arguments.
Examples:
Create a Lennard-Jones potential model with
a nondimensional potential energy scale of 8 and evaluate
the nondimensional potential energy at a stretch of 1.23:
>>> from ufjc.potential import LennardJonesPotential
>>> model = LennardJonesPotential(varepsilon=8)
>>> model.beta_u(1.23)
4.046654314368616
Do the same with the Lenard-Jones-FENE potential:
>>> from ufjc.potential import LJFENEPotential
>>> model = LJFENEPotential(varepsilon=(8, 8))
>>> model.beta_u(1.23)
8.510502022381505
Create a single-link model in one dimension, instantiate it with
the Morse potential, and compute the incremental link stretch under
a nondimensional force of 8:
>>> from ufjc.potential import Potential
>>> class Link1D(Potential):
... def __init__(self, **kwargs):
... Potential.__init__(self, **kwargs)
>>> Link1D(potential='morse').delta_lambda(8)
0.04890980361596759
>>> Link1D(potential='lj-fene').eta_link(1)
184.0
"""
# Import external modules
import numpy as np
from scipy.special import lambertw
class HarmonicPotential(object):
r"""The harmonic potential.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness :math:`\kappa=\varepsilon`.
c (float): The correction parameter :math:`c=1`.
"""
def __init__(self, **kwargs):
"""Initializes the ``HarmonicPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.kappa = self.varepsilon
self.c = 1
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \frac{1}{2}(\lambda-1)^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return 0.5*(lambda_ - 1)**2
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = \varepsilon(\lambda - 1)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force at a sample stretch:
>>> from ufjc.potential import HarmonicPotential
>>> HarmonicPotential().eta_link(1.8)
70.4
"""
return self.varepsilon*(lambda_ - 1)
def delta_lambda(self, eta):
r"""The incremental stretch as a function of nondimensional force,
.. math::
\Delta\lambda(\eta) = \frac{\eta}{\varepsilon}
.
Args:
eta (array_like): The nondimensional force(s).
Returns:
numpy.ndarray: The incremental stretch(s).
"""
return eta/self.varepsilon
class LogSquaredPotential(object):
r"""The log-squared potential :cite:`mao2017rupture`.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness :math:`\kappa=\varepsilon`.
c (float): The correction parameter :math:`c=2/5`.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = e^{-1}\varepsilon`.
lambda_max (float): The stretch at the maximum nondimensional force,
:math:`\lambda_\mathrm{max} = e^{1}`.
"""
def __init__(self, **kwargs):
"""Initializes the ``LogSquaredPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.kappa = self.varepsilon
self.c = 2/5
self.eta_max = self.varepsilon/np.exp(1)
self.lambda_max = np.exp(1)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \frac{1}{2}\big[\ln(\lambda)\big]^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return 0.5*np.log(lambda_)**2
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = \varepsilon\,\frac{\ln(\lambda)}{\lambda}
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force at a sample stretch:
>>> from ufjc.potential import LogSquaredPotential
>>> LogSquaredPotential().eta_link(1.8)
28.736236950770266
"""
return self.varepsilon*np.log(lambda_)/lambda_
def delta_lambda(self, eta):
r"""The incremental stretch as a function of nondimensional force,
.. math::
\Delta\lambda(\eta) = e^{-\mathcal{W}(-\eta/\varepsilon)}
,\qquad \eta\in[0,\eta_\mathrm{max}]
.
Args:
eta (array_like): The nondimensional force(s).
Returns:
numpy.ndarray: The incremental stretch(s).
"""
return (np.exp(-lambertw(-eta/self.varepsilon)) - 1).real
class MorsePotential(object):
r"""The Morse potential :cite:`morse1929diatomic`.
Attributes:
varepsilon (float): The nondimensional energy scale.
alpha (float): The Morse parameter.
kappa (float): The nondimensional stiffness
:math:`\kappa=2\varepsilon\alpha^2`.
c (float): The correction parameter
:math:`c=1/(1+3\alpha/2)`.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = \sqrt{\kappa\varepsilon/8}`.
lambda_max (float): The stretch at the maximum nondimensional force,
:math:`\lambda_\mathrm{max} = 1+\ln(2)/\alpha`.
"""
def __init__(self, **kwargs):
"""Initializes the ``MorsePotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88)
and ``alpha`` (default 1).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.alpha = kwargs.get('alpha', 1)
self.kappa = 2*self.varepsilon*self.alpha**2
self.c = 1/(1 + 3/2*self.alpha)
self.eta_max = np.sqrt(self.kappa*self.varepsilon/8)
self.lambda_max = 1 + np.log(2)/self.alpha
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) = \left[1
- e^{-\alpha(\lambda - 1)}\right]^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return (1 - np.exp(-self.alpha*(lambda_ - 1)))**2
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = 2\alpha\varepsilon e^{-\alpha(\lambda - 1)}
\left[1 - e^{-\alpha(\lambda - 1)}\right]^2
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force at a sample stretch:
>>> from ufjc.potential import MorsePotential
>>> MorsePotential().eta_link(1.23)
28.731992431367807
"""
return 2*self.alpha*self.varepsilon * \
np.exp(-self.alpha*(lambda_ - 1)) * \
(1 - np.exp(-self.alpha*(lambda_ - 1)))
def delta_lambda(self, eta):
r"""The incremental stretch as a function of nondimensional force,
.. math::
\Delta\lambda(\eta) =
\ln\left(\frac{2}{1 + \sqrt{1 -
\eta/\eta_\mathrm{max}}}\right)^{1/\alpha}
,\qquad \eta\in[0,\eta_\mathrm{max}]
.
Args:
eta (array_like): The nondimensional force(s).
Returns:
numpy.ndarray: The incremental stretch(s).
"""
return np.log(
2/(1 + np.sqrt(1 - eta/self.eta_max))
)/self.alpha
class LennardJonesPotential(object):
r"""The Lennard-Jones potential :cite:`jones1924determinationii`.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness
:math:`\kappa=72\varepsilon`.
c (float): The correction parameter
:math:`c=2/23`.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = \eta(\lambda_\mathrm{max})`.
lambda_max (float): The stretch at the maximum nondimensional force,
:math:`\lambda_\mathrm{max} = (13/7)^{1/6}`.
"""
def __init__(self, **kwargs):
"""Initializes the ``LennardJonesPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default 88).
"""
self.varepsilon = kwargs.get('varepsilon', 88)
self.kappa = 72*self.varepsilon
self.c = 2/23
self.lambda_max = (13/7)**(1/6)
self.eta_max = self.eta_link(self.lambda_max)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) =
\frac{1}{\lambda^{12}} - \frac{2}{\lambda^6} + 1
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
return 1/lambda_**12 - 2/lambda_**6 + 1
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) = \varepsilon\phi(\lambda)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = 12\varepsilon\left(
\frac{1}{\lambda^7} - \frac{1}{\lambda^{13}}\right)
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional force(s).
"""
return self.varepsilon*(12/lambda_**7 - 12/lambda_**13)
class LJFENEPotential(object):
r"""The Lennard-Jones-FENE potential :cite:`kremer1990dynamics`.
Attributes:
varepsilon (float): The nondimensional energy scale.
kappa (float): The nondimensional stiffness.
c (float): The correction parameter.
eta_max (float): The maximum nondimensional force
:math:`\eta_\mathrm{max} = \eta(\lambda_\mathrm{max})`.
lambda_max (float): The stretch at the maximum nondimensional force.
"""
def __init__(self, **kwargs):
"""Initializes the ``LJFENEPotential`` class.
Args:
**kwargs: Arbitrary keyword arguments.
Can be used to specify ``varepsilon`` (default (88, 230))
and ``lambda_max`` (default 1.5).
"""
self.varepsilon_1, self.varepsilon_2 = \
kwargs.get('varepsilon', (88, 230))
self.varepsilon = self.varepsilon_2
self.lambda_max = kwargs.get('lambda_max', 1.5)
self.kappa = 72*self.varepsilon_1 + self.varepsilon_2 * \
(self.lambda_max**2 + 1)/(self.lambda_max**2 - 1)**2
self.c = 1/(1 - (
-1512 + (6*self.lambda_max**2 + 2)/(self.lambda_max**2 - 1)**3
)/(2*self.kappa/self.varepsilon_2)
)
self.eta_max = self.eta_link(self.lambda_max)
def phi(self, lambda_):
r"""The scaled nondimensional potential energy function,
.. math::
\phi(\lambda) =
\frac{\varepsilon_1}{\varepsilon_2}\left(
\frac{1}{\lambda^{12}} - \frac{2}{\lambda^6} + 1
\right) - \frac{1}{2}\,\ln\left[
1 - \left(\frac{\lambda}{\lambda_\mathrm{max}}\right)^2
\right]
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The scaled nondimensional potential energy(s).
"""
lambda_fene = (lambda_ < self.lambda_max)*lambda_
return self.varepsilon_1/self.varepsilon_2*(
1/lambda_**12 - 2/lambda_**6 + 1
) - 0.5*np.log(
1 - (lambda_fene/self.lambda_max)**2
)*(lambda_ < self.lambda_max)
def beta_u(self, lambda_):
r"""The nondimensional potential energy function,
.. math::
\beta u(\lambda) =
\varepsilon\phi(\lambda) =
\varepsilon_1\left(
\frac{1}{\lambda^{12}} - \frac{2}{\lambda^6} + 1
\right) - \frac{\varepsilon_2}{2}\,\ln\left[
1 - \left(\frac{\lambda}{\lambda_\mathrm{max}}\right)^2
\right]
.
Args:
lambda_ (array_like): The stretch(s).
Returns:
numpy.ndarray: The nondimensional potential energy(s).
"""
return self.varepsilon*self.phi(lambda_)
def eta_link(self, lambda_):
r"""The nondimensional force as a function of stretch,
.. math::
\eta(\lambda) = 12\varepsilon_1\left(
\frac{1}{\lambda^7} - \frac{1}{\lambda^{13}}
\right) + \frac{\varepsilon_2\lambda}
{\lambda_\mathrm{max}^2 | |
= staticmethod(_libsumo.vehicletype_setTau)
__swig_getmethods__["setColor"] = lambda x: _libsumo.vehicletype_setColor
if _newclass:
setColor = staticmethod(_libsumo.vehicletype_setColor)
__swig_getmethods__["setMinGapLat"] = lambda x: _libsumo.vehicletype_setMinGapLat
if _newclass:
setMinGapLat = staticmethod(_libsumo.vehicletype_setMinGapLat)
__swig_getmethods__["setMaxSpeedLat"] = lambda x: _libsumo.vehicletype_setMaxSpeedLat
if _newclass:
setMaxSpeedLat = staticmethod(_libsumo.vehicletype_setMaxSpeedLat)
__swig_getmethods__["setLateralAlignment"] = lambda x: _libsumo.vehicletype_setLateralAlignment
if _newclass:
setLateralAlignment = staticmethod(_libsumo.vehicletype_setLateralAlignment)
__swig_getmethods__["setActionStepLength"] = lambda x: _libsumo.vehicletype_setActionStepLength
if _newclass:
setActionStepLength = staticmethod(_libsumo.vehicletype_setActionStepLength)
__swig_getmethods__["copy"] = lambda x: _libsumo.vehicletype_copy
if _newclass:
copy = staticmethod(_libsumo.vehicletype_copy)
__swig_getmethods__["getVType"] = lambda x: _libsumo.vehicletype_getVType
if _newclass:
getVType = staticmethod(_libsumo.vehicletype_getVType)
__swig_getmethods__["setSpeedDeviation"] = lambda x: _libsumo.vehicletype_setSpeedDeviation
if _newclass:
setSpeedDeviation = staticmethod(_libsumo.vehicletype_setSpeedDeviation)
__swig_getmethods__["setParameter"] = lambda x: _libsumo.vehicletype_setParameter
if _newclass:
setParameter = staticmethod(_libsumo.vehicletype_setParameter)
__swig_getmethods__["subscribe"] = lambda x: _libsumo.vehicletype_subscribe
if _newclass:
subscribe = staticmethod(_libsumo.vehicletype_subscribe)
__swig_getmethods__["subscribeContext"] = lambda x: _libsumo.vehicletype_subscribeContext
if _newclass:
subscribeContext = staticmethod(_libsumo.vehicletype_subscribeContext)
__swig_getmethods__["getAllSubscriptionResults"] = lambda x: _libsumo.vehicletype_getAllSubscriptionResults
if _newclass:
getAllSubscriptionResults = staticmethod(_libsumo.vehicletype_getAllSubscriptionResults)
__swig_getmethods__["getSubscriptionResults"] = lambda x: _libsumo.vehicletype_getSubscriptionResults
if _newclass:
getSubscriptionResults = staticmethod(_libsumo.vehicletype_getSubscriptionResults)
__swig_getmethods__["getAllContextSubscriptionResults"] = lambda x: _libsumo.vehicletype_getAllContextSubscriptionResults
if _newclass:
getAllContextSubscriptionResults = staticmethod(_libsumo.vehicletype_getAllContextSubscriptionResults)
__swig_getmethods__["getContextSubscriptionResults"] = lambda x: _libsumo.vehicletype_getContextSubscriptionResults
if _newclass:
getContextSubscriptionResults = staticmethod(_libsumo.vehicletype_getContextSubscriptionResults)
__swig_getmethods__["makeWrapper"] = lambda x: _libsumo.vehicletype_makeWrapper
if _newclass:
makeWrapper = staticmethod(_libsumo.vehicletype_makeWrapper)
__swig_getmethods__["handleVariable"] = lambda x: _libsumo.vehicletype_handleVariable
if _newclass:
handleVariable = staticmethod(_libsumo.vehicletype_handleVariable)
__swig_destroy__ = _libsumo.delete_vehicletype
__del__ = lambda self: None
vehicletype_swigregister = _libsumo.vehicletype_swigregister
vehicletype_swigregister(vehicletype)
def vehicletype_getIDList():
return _libsumo.vehicletype_getIDList()
vehicletype_getIDList = _libsumo.vehicletype_getIDList
def vehicletype_getIDCount():
return _libsumo.vehicletype_getIDCount()
vehicletype_getIDCount = _libsumo.vehicletype_getIDCount
def vehicletype_getLength(typeID):
return _libsumo.vehicletype_getLength(typeID)
vehicletype_getLength = _libsumo.vehicletype_getLength
def vehicletype_getMaxSpeed(typeID):
return _libsumo.vehicletype_getMaxSpeed(typeID)
vehicletype_getMaxSpeed = _libsumo.vehicletype_getMaxSpeed
def vehicletype_getActionStepLength(typeID):
return _libsumo.vehicletype_getActionStepLength(typeID)
vehicletype_getActionStepLength = _libsumo.vehicletype_getActionStepLength
def vehicletype_getSpeedFactor(typeID):
return _libsumo.vehicletype_getSpeedFactor(typeID)
vehicletype_getSpeedFactor = _libsumo.vehicletype_getSpeedFactor
def vehicletype_getSpeedDeviation(typeID):
return _libsumo.vehicletype_getSpeedDeviation(typeID)
vehicletype_getSpeedDeviation = _libsumo.vehicletype_getSpeedDeviation
def vehicletype_getAccel(typeID):
return _libsumo.vehicletype_getAccel(typeID)
vehicletype_getAccel = _libsumo.vehicletype_getAccel
def vehicletype_getDecel(typeID):
return _libsumo.vehicletype_getDecel(typeID)
vehicletype_getDecel = _libsumo.vehicletype_getDecel
def vehicletype_getEmergencyDecel(typeID):
return _libsumo.vehicletype_getEmergencyDecel(typeID)
vehicletype_getEmergencyDecel = _libsumo.vehicletype_getEmergencyDecel
def vehicletype_getApparentDecel(typeID):
return _libsumo.vehicletype_getApparentDecel(typeID)
vehicletype_getApparentDecel = _libsumo.vehicletype_getApparentDecel
def vehicletype_getImperfection(typeID):
return _libsumo.vehicletype_getImperfection(typeID)
vehicletype_getImperfection = _libsumo.vehicletype_getImperfection
def vehicletype_getTau(typeID):
return _libsumo.vehicletype_getTau(typeID)
vehicletype_getTau = _libsumo.vehicletype_getTau
def vehicletype_getVehicleClass(typeID):
return _libsumo.vehicletype_getVehicleClass(typeID)
vehicletype_getVehicleClass = _libsumo.vehicletype_getVehicleClass
def vehicletype_getEmissionClass(typeID):
return _libsumo.vehicletype_getEmissionClass(typeID)
vehicletype_getEmissionClass = _libsumo.vehicletype_getEmissionClass
def vehicletype_getShapeClass(typeID):
return _libsumo.vehicletype_getShapeClass(typeID)
vehicletype_getShapeClass = _libsumo.vehicletype_getShapeClass
def vehicletype_getMinGap(typeID):
return _libsumo.vehicletype_getMinGap(typeID)
vehicletype_getMinGap = _libsumo.vehicletype_getMinGap
def vehicletype_getWidth(typeID):
return _libsumo.vehicletype_getWidth(typeID)
vehicletype_getWidth = _libsumo.vehicletype_getWidth
def vehicletype_getHeight(typeID):
return _libsumo.vehicletype_getHeight(typeID)
vehicletype_getHeight = _libsumo.vehicletype_getHeight
def vehicletype_getColor(typeID):
return _libsumo.vehicletype_getColor(typeID)
vehicletype_getColor = _libsumo.vehicletype_getColor
def vehicletype_getMinGapLat(typeID):
return _libsumo.vehicletype_getMinGapLat(typeID)
vehicletype_getMinGapLat = _libsumo.vehicletype_getMinGapLat
def vehicletype_getMaxSpeedLat(typeID):
return _libsumo.vehicletype_getMaxSpeedLat(typeID)
vehicletype_getMaxSpeedLat = _libsumo.vehicletype_getMaxSpeedLat
def vehicletype_getLateralAlignment(typeID):
return _libsumo.vehicletype_getLateralAlignment(typeID)
vehicletype_getLateralAlignment = _libsumo.vehicletype_getLateralAlignment
def vehicletype_getParameter(typeID, key):
return _libsumo.vehicletype_getParameter(typeID, key)
vehicletype_getParameter = _libsumo.vehicletype_getParameter
def vehicletype_setLength(typeID, length):
return _libsumo.vehicletype_setLength(typeID, length)
vehicletype_setLength = _libsumo.vehicletype_setLength
def vehicletype_setMaxSpeed(typeID, speed):
return _libsumo.vehicletype_setMaxSpeed(typeID, speed)
vehicletype_setMaxSpeed = _libsumo.vehicletype_setMaxSpeed
def vehicletype_setVehicleClass(typeID, clazz):
return _libsumo.vehicletype_setVehicleClass(typeID, clazz)
vehicletype_setVehicleClass = _libsumo.vehicletype_setVehicleClass
def vehicletype_setSpeedFactor(typeID, factor):
return _libsumo.vehicletype_setSpeedFactor(typeID, factor)
vehicletype_setSpeedFactor = _libsumo.vehicletype_setSpeedFactor
def vehicletype_setEmissionClass(typeID, clazz):
return _libsumo.vehicletype_setEmissionClass(typeID, clazz)
vehicletype_setEmissionClass = _libsumo.vehicletype_setEmissionClass
def vehicletype_setShapeClass(typeID, shapeClass):
return _libsumo.vehicletype_setShapeClass(typeID, shapeClass)
vehicletype_setShapeClass = _libsumo.vehicletype_setShapeClass
def vehicletype_setWidth(typeID, width):
return _libsumo.vehicletype_setWidth(typeID, width)
vehicletype_setWidth = _libsumo.vehicletype_setWidth
def vehicletype_setHeight(typeID, height):
return _libsumo.vehicletype_setHeight(typeID, height)
vehicletype_setHeight = _libsumo.vehicletype_setHeight
def vehicletype_setMinGap(typeID, minGap):
return _libsumo.vehicletype_setMinGap(typeID, minGap)
vehicletype_setMinGap = _libsumo.vehicletype_setMinGap
def vehicletype_setAccel(typeID, accel):
return _libsumo.vehicletype_setAccel(typeID, accel)
vehicletype_setAccel = _libsumo.vehicletype_setAccel
def vehicletype_setDecel(typeID, decel):
return _libsumo.vehicletype_setDecel(typeID, decel)
vehicletype_setDecel = _libsumo.vehicletype_setDecel
def vehicletype_setEmergencyDecel(typeID, decel):
return _libsumo.vehicletype_setEmergencyDecel(typeID, decel)
vehicletype_setEmergencyDecel = _libsumo.vehicletype_setEmergencyDecel
def vehicletype_setApparentDecel(typeID, decel):
return _libsumo.vehicletype_setApparentDecel(typeID, decel)
vehicletype_setApparentDecel = _libsumo.vehicletype_setApparentDecel
def vehicletype_setImperfection(typeID, imperfection):
return _libsumo.vehicletype_setImperfection(typeID, imperfection)
vehicletype_setImperfection = _libsumo.vehicletype_setImperfection
def vehicletype_setTau(typeID, tau):
return _libsumo.vehicletype_setTau(typeID, tau)
vehicletype_setTau = _libsumo.vehicletype_setTau
def vehicletype_setColor(typeID, c):
return _libsumo.vehicletype_setColor(typeID, c)
vehicletype_setColor = _libsumo.vehicletype_setColor
def vehicletype_setMinGapLat(typeID, minGapLat):
return _libsumo.vehicletype_setMinGapLat(typeID, minGapLat)
vehicletype_setMinGapLat = _libsumo.vehicletype_setMinGapLat
def vehicletype_setMaxSpeedLat(typeID, speed):
return _libsumo.vehicletype_setMaxSpeedLat(typeID, speed)
vehicletype_setMaxSpeedLat = _libsumo.vehicletype_setMaxSpeedLat
def vehicletype_setLateralAlignment(typeID, latAlignment):
return _libsumo.vehicletype_setLateralAlignment(typeID, latAlignment)
vehicletype_setLateralAlignment = _libsumo.vehicletype_setLateralAlignment
def vehicletype_setActionStepLength(typeID, actionStepLength, resetActionOffset):
return _libsumo.vehicletype_setActionStepLength(typeID, actionStepLength, resetActionOffset)
vehicletype_setActionStepLength = _libsumo.vehicletype_setActionStepLength
def vehicletype_copy(origTypeID, newTypeID):
return _libsumo.vehicletype_copy(origTypeID, newTypeID)
vehicletype_copy = _libsumo.vehicletype_copy
def vehicletype_getVType(id):
return _libsumo.vehicletype_getVType(id)
vehicletype_getVType = _libsumo.vehicletype_getVType
def vehicletype_setSpeedDeviation(typeID, deviation):
return _libsumo.vehicletype_setSpeedDeviation(typeID, deviation)
vehicletype_setSpeedDeviation = _libsumo.vehicletype_setSpeedDeviation
def vehicletype_setParameter(id, name, value):
return _libsumo.vehicletype_setParameter(id, name, value)
vehicletype_setParameter = _libsumo.vehicletype_setParameter
def vehicletype_subscribe(*args, **kwargs):
return _libsumo.vehicletype_subscribe(*args, **kwargs)
vehicletype_subscribe = _libsumo.vehicletype_subscribe
def vehicletype_subscribeContext(*args, **kwargs):
return _libsumo.vehicletype_subscribeContext(*args, **kwargs)
vehicletype_subscribeContext = _libsumo.vehicletype_subscribeContext
def vehicletype_getAllSubscriptionResults():
return _libsumo.vehicletype_getAllSubscriptionResults()
vehicletype_getAllSubscriptionResults = _libsumo.vehicletype_getAllSubscriptionResults
def vehicletype_getSubscriptionResults(objID):
return _libsumo.vehicletype_getSubscriptionResults(objID)
vehicletype_getSubscriptionResults = _libsumo.vehicletype_getSubscriptionResults
def vehicletype_getAllContextSubscriptionResults():
return _libsumo.vehicletype_getAllContextSubscriptionResults()
vehicletype_getAllContextSubscriptionResults = _libsumo.vehicletype_getAllContextSubscriptionResults
def vehicletype_getContextSubscriptionResults(objID):
return _libsumo.vehicletype_getContextSubscriptionResults(objID)
vehicletype_getContextSubscriptionResults = _libsumo.vehicletype_getContextSubscriptionResults
def vehicletype_makeWrapper():
return _libsumo.vehicletype_makeWrapper()
vehicletype_makeWrapper = _libsumo.vehicletype_makeWrapper
def vehicletype_handleVariable(objID, variable, wrapper):
return _libsumo.vehicletype_handleVariable(objID, variable, wrapper)
vehicletype_handleVariable = _libsumo.vehicletype_handleVariable
class vehicle(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vehicle, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vehicle, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_getmethods__["getIDList"] = lambda x: _libsumo.vehicle_getIDList
if _newclass:
getIDList = staticmethod(_libsumo.vehicle_getIDList)
__swig_getmethods__["getIDCount"] = lambda x: _libsumo.vehicle_getIDCount
if _newclass:
getIDCount = staticmethod(_libsumo.vehicle_getIDCount)
__swig_getmethods__["getSpeed"] = lambda x: _libsumo.vehicle_getSpeed
if _newclass:
getSpeed = staticmethod(_libsumo.vehicle_getSpeed)
__swig_getmethods__["getAcceleration"] = lambda x: _libsumo.vehicle_getAcceleration
if _newclass:
getAcceleration = staticmethod(_libsumo.vehicle_getAcceleration)
__swig_getmethods__["getSpeedWithoutTraCI"] = lambda x: _libsumo.vehicle_getSpeedWithoutTraCI
if _newclass:
getSpeedWithoutTraCI = staticmethod(_libsumo.vehicle_getSpeedWithoutTraCI)
__swig_getmethods__["getPosition"] = lambda x: _libsumo.vehicle_getPosition
if _newclass:
getPosition = staticmethod(_libsumo.vehicle_getPosition)
__swig_getmethods__["getPosition3D"] = lambda x: _libsumo.vehicle_getPosition3D
if _newclass:
getPosition3D = staticmethod(_libsumo.vehicle_getPosition3D)
__swig_getmethods__["getAngle"] = lambda x: _libsumo.vehicle_getAngle
if _newclass:
getAngle = staticmethod(_libsumo.vehicle_getAngle)
__swig_getmethods__["getSlope"] = lambda x: _libsumo.vehicle_getSlope
if _newclass:
getSlope = staticmethod(_libsumo.vehicle_getSlope)
__swig_getmethods__["getRoadID"] = lambda x: _libsumo.vehicle_getRoadID
if _newclass:
getRoadID = staticmethod(_libsumo.vehicle_getRoadID)
__swig_getmethods__["getLaneID"] = lambda x: _libsumo.vehicle_getLaneID
if _newclass:
getLaneID = staticmethod(_libsumo.vehicle_getLaneID)
__swig_getmethods__["getLaneIndex"] = lambda x: _libsumo.vehicle_getLaneIndex
if _newclass:
getLaneIndex = staticmethod(_libsumo.vehicle_getLaneIndex)
__swig_getmethods__["getTypeID"] = lambda x: _libsumo.vehicle_getTypeID
if _newclass:
getTypeID = staticmethod(_libsumo.vehicle_getTypeID)
__swig_getmethods__["getRouteID"] = lambda x: _libsumo.vehicle_getRouteID
if _newclass:
getRouteID = staticmethod(_libsumo.vehicle_getRouteID)
__swig_getmethods__["getRouteIndex"] = lambda x: _libsumo.vehicle_getRouteIndex
if _newclass:
getRouteIndex = staticmethod(_libsumo.vehicle_getRouteIndex)
__swig_getmethods__["getLanePosition"] = lambda x: _libsumo.vehicle_getLanePosition
if _newclass:
getLanePosition = staticmethod(_libsumo.vehicle_getLanePosition)
__swig_getmethods__["getLateralLanePosition"] = lambda x: _libsumo.vehicle_getLateralLanePosition
if _newclass:
getLateralLanePosition = staticmethod(_libsumo.vehicle_getLateralLanePosition)
__swig_getmethods__["getCO2Emission"] = lambda x: _libsumo.vehicle_getCO2Emission
if _newclass:
getCO2Emission = staticmethod(_libsumo.vehicle_getCO2Emission)
__swig_getmethods__["getCOEmission"] = lambda x: _libsumo.vehicle_getCOEmission
if _newclass:
getCOEmission = staticmethod(_libsumo.vehicle_getCOEmission)
__swig_getmethods__["getHCEmission"] = lambda x: _libsumo.vehicle_getHCEmission
if _newclass:
getHCEmission = staticmethod(_libsumo.vehicle_getHCEmission)
__swig_getmethods__["getPMxEmission"] = lambda x: _libsumo.vehicle_getPMxEmission
if _newclass:
getPMxEmission = staticmethod(_libsumo.vehicle_getPMxEmission)
__swig_getmethods__["getNOxEmission"] = lambda x: _libsumo.vehicle_getNOxEmission
if _newclass:
getNOxEmission = staticmethod(_libsumo.vehicle_getNOxEmission)
__swig_getmethods__["getFuelConsumption"] = lambda x: _libsumo.vehicle_getFuelConsumption
if _newclass:
getFuelConsumption = staticmethod(_libsumo.vehicle_getFuelConsumption)
__swig_getmethods__["getNoiseEmission"] = lambda x: _libsumo.vehicle_getNoiseEmission
if _newclass:
getNoiseEmission = staticmethod(_libsumo.vehicle_getNoiseEmission)
__swig_getmethods__["getElectricityConsumption"] = lambda x: _libsumo.vehicle_getElectricityConsumption
if _newclass:
getElectricityConsumption = staticmethod(_libsumo.vehicle_getElectricityConsumption)
__swig_getmethods__["getPersonNumber"] = lambda x: _libsumo.vehicle_getPersonNumber
if _newclass:
getPersonNumber = staticmethod(_libsumo.vehicle_getPersonNumber)
__swig_getmethods__["getPersonIDList"] = lambda x: _libsumo.vehicle_getPersonIDList
if _newclass:
getPersonIDList = staticmethod(_libsumo.vehicle_getPersonIDList)
__swig_getmethods__["getLeader"] = lambda x: _libsumo.vehicle_getLeader
if _newclass:
getLeader = staticmethod(_libsumo.vehicle_getLeader)
__swig_getmethods__["getWaitingTime"] = lambda x: _libsumo.vehicle_getWaitingTime
if _newclass:
getWaitingTime = staticmethod(_libsumo.vehicle_getWaitingTime)
__swig_getmethods__["getAccumulatedWaitingTime"] = lambda x: _libsumo.vehicle_getAccumulatedWaitingTime
if _newclass:
getAccumulatedWaitingTime = staticmethod(_libsumo.vehicle_getAccumulatedWaitingTime)
__swig_getmethods__["getAdaptedTraveltime"] = lambda x: _libsumo.vehicle_getAdaptedTraveltime
if _newclass:
getAdaptedTraveltime = staticmethod(_libsumo.vehicle_getAdaptedTraveltime)
__swig_getmethods__["getEffort"] = lambda x: _libsumo.vehicle_getEffort
if _newclass:
getEffort = staticmethod(_libsumo.vehicle_getEffort)
__swig_getmethods__["isRouteValid"] = lambda x: _libsumo.vehicle_isRouteValid
if _newclass:
isRouteValid = staticmethod(_libsumo.vehicle_isRouteValid)
__swig_getmethods__["getRoute"] = lambda x: _libsumo.vehicle_getRoute
if _newclass:
getRoute = staticmethod(_libsumo.vehicle_getRoute)
__swig_getmethods__["getSignals"] = lambda x: _libsumo.vehicle_getSignals
if _newclass:
getSignals = staticmethod(_libsumo.vehicle_getSignals)
__swig_getmethods__["getBestLanes"] = lambda x: _libsumo.vehicle_getBestLanes
if _newclass:
getBestLanes = staticmethod(_libsumo.vehicle_getBestLanes)
__swig_getmethods__["getNextTLS"] = lambda x: _libsumo.vehicle_getNextTLS
if _newclass:
getNextTLS = staticmethod(_libsumo.vehicle_getNextTLS)
__swig_getmethods__["getNextStops"] = lambda x: _libsumo.vehicle_getNextStops
if _newclass:
getNextStops = staticmethod(_libsumo.vehicle_getNextStops)
__swig_getmethods__["getStopState"] = lambda x: _libsumo.vehicle_getStopState
if _newclass:
getStopState = staticmethod(_libsumo.vehicle_getStopState)
__swig_getmethods__["getDistance"] = lambda x: _libsumo.vehicle_getDistance
if _newclass:
getDistance = staticmethod(_libsumo.vehicle_getDistance)
__swig_getmethods__["getDrivingDistance"] = lambda x: _libsumo.vehicle_getDrivingDistance
if _newclass:
getDrivingDistance = staticmethod(_libsumo.vehicle_getDrivingDistance)
__swig_getmethods__["getDrivingDistance2D"] = lambda x: _libsumo.vehicle_getDrivingDistance2D
if _newclass:
getDrivingDistance2D = staticmethod(_libsumo.vehicle_getDrivingDistance2D)
__swig_getmethods__["getAllowedSpeed"] = lambda x: _libsumo.vehicle_getAllowedSpeed
if _newclass:
getAllowedSpeed = staticmethod(_libsumo.vehicle_getAllowedSpeed)
__swig_getmethods__["getSpeedMode"] = lambda x: _libsumo.vehicle_getSpeedMode
if _newclass:
getSpeedMode = staticmethod(_libsumo.vehicle_getSpeedMode)
__swig_getmethods__["getLaneChangeMode"] = lambda x: _libsumo.vehicle_getLaneChangeMode
if _newclass:
getLaneChangeMode = staticmethod(_libsumo.vehicle_getLaneChangeMode)
__swig_getmethods__["getRoutingMode"] = lambda x: _libsumo.vehicle_getRoutingMode
if _newclass:
getRoutingMode = staticmethod(_libsumo.vehicle_getRoutingMode)
__swig_getmethods__["getLine"] = lambda x: _libsumo.vehicle_getLine
if _newclass:
getLine = staticmethod(_libsumo.vehicle_getLine)
__swig_getmethods__["getVia"] = lambda x: _libsumo.vehicle_getVia
if _newclass:
getVia = staticmethod(_libsumo.vehicle_getVia)
__swig_getmethods__["getLaneChangeState"] = lambda x: _libsumo.vehicle_getLaneChangeState
if _newclass:
getLaneChangeState = staticmethod(_libsumo.vehicle_getLaneChangeState)
__swig_getmethods__["getLastActionTime"] = lambda x: _libsumo.vehicle_getLastActionTime
if _newclass:
getLastActionTime = staticmethod(_libsumo.vehicle_getLastActionTime)
__swig_getmethods__["getParameter"] = lambda x: _libsumo.vehicle_getParameter
if _newclass:
getParameter = staticmethod(_libsumo.vehicle_getParameter)
__swig_getmethods__["getVehicleType"] = lambda x: _libsumo.vehicle_getVehicleType
if _newclass:
getVehicleType = staticmethod(_libsumo.vehicle_getVehicleType)
__swig_getmethods__["getLength"] = lambda x: _libsumo.vehicle_getLength
if _newclass:
getLength = staticmethod(_libsumo.vehicle_getLength)
__swig_getmethods__["getMaxSpeed"] = lambda x: _libsumo.vehicle_getMaxSpeed
if _newclass:
getMaxSpeed = staticmethod(_libsumo.vehicle_getMaxSpeed)
__swig_getmethods__["getActionStepLength"] = lambda x: _libsumo.vehicle_getActionStepLength
if _newclass:
getActionStepLength = staticmethod(_libsumo.vehicle_getActionStepLength)
__swig_getmethods__["getSpeedFactor"] = lambda x: _libsumo.vehicle_getSpeedFactor
if _newclass:
getSpeedFactor = staticmethod(_libsumo.vehicle_getSpeedFactor)
__swig_getmethods__["getSpeedDeviation"] = lambda x: _libsumo.vehicle_getSpeedDeviation
if _newclass:
getSpeedDeviation = staticmethod(_libsumo.vehicle_getSpeedDeviation)
__swig_getmethods__["getAccel"] = lambda x: _libsumo.vehicle_getAccel
if _newclass:
getAccel = staticmethod(_libsumo.vehicle_getAccel)
__swig_getmethods__["getDecel"] = lambda x: _libsumo.vehicle_getDecel
if _newclass:
getDecel = staticmethod(_libsumo.vehicle_getDecel)
__swig_getmethods__["getEmergencyDecel"] = lambda x: _libsumo.vehicle_getEmergencyDecel
if _newclass:
getEmergencyDecel = staticmethod(_libsumo.vehicle_getEmergencyDecel)
__swig_getmethods__["getApparentDecel"] = lambda x: _libsumo.vehicle_getApparentDecel
if _newclass:
getApparentDecel = staticmethod(_libsumo.vehicle_getApparentDecel)
__swig_getmethods__["getImperfection"] = lambda x: _libsumo.vehicle_getImperfection
if _newclass:
getImperfection = staticmethod(_libsumo.vehicle_getImperfection)
__swig_getmethods__["getTau"] = lambda x: _libsumo.vehicle_getTau
if _newclass:
getTau = staticmethod(_libsumo.vehicle_getTau)
__swig_getmethods__["getVehicleClass"] = lambda x: _libsumo.vehicle_getVehicleClass
if _newclass:
getVehicleClass = staticmethod(_libsumo.vehicle_getVehicleClass)
__swig_getmethods__["getEmissionClass"] = lambda x: _libsumo.vehicle_getEmissionClass
if _newclass:
getEmissionClass = staticmethod(_libsumo.vehicle_getEmissionClass)
__swig_getmethods__["getShapeClass"] = lambda x: _libsumo.vehicle_getShapeClass
if _newclass:
getShapeClass = staticmethod(_libsumo.vehicle_getShapeClass)
__swig_getmethods__["getMinGap"] = lambda x: _libsumo.vehicle_getMinGap
if _newclass:
getMinGap = staticmethod(_libsumo.vehicle_getMinGap)
__swig_getmethods__["getWidth"] = lambda x: _libsumo.vehicle_getWidth
if _newclass:
getWidth = staticmethod(_libsumo.vehicle_getWidth)
__swig_getmethods__["getHeight"] = lambda x: _libsumo.vehicle_getHeight
if _newclass:
getHeight = staticmethod(_libsumo.vehicle_getHeight)
__swig_getmethods__["getColor"] = lambda x: _libsumo.vehicle_getColor
if _newclass:
getColor = staticmethod(_libsumo.vehicle_getColor)
__swig_getmethods__["getMinGapLat"] = lambda x: _libsumo.vehicle_getMinGapLat
if _newclass:
getMinGapLat = staticmethod(_libsumo.vehicle_getMinGapLat)
__swig_getmethods__["getMaxSpeedLat"] = lambda x: _libsumo.vehicle_getMaxSpeedLat
if _newclass:
getMaxSpeedLat = staticmethod(_libsumo.vehicle_getMaxSpeedLat)
__swig_getmethods__["getLateralAlignment"] = | |
<filename>eelbrain/_result_plots.py
# Author: <NAME> <<EMAIL>>
from math import floor, log10
from os import makedirs
from os.path import basename, dirname, exists, expanduser, isdir, join
import matplotlib as mpl
import numpy as np
from . import fmtxt, plot, testnd
from .plot._base import POINT
from ._data_obj import combine
# usage: with mpl.rc_context(RC):
FONT = 'Helvetica'
RC = {
'figure.dpi': 300,
'savefig.dpi': 300,
'savefig.transparent': True,
# Font
'font.family': 'sans-serif',
'font.sans-serif': FONT,
'font.size': 9,
# make sure equations use same font
'mathtext.fontset': 'custom',
'font.cursive': FONT,
'font.serif': FONT,
# subplot
'figure.subplot.top': 0.95,
# legend
'legend.fontsize': 6,
'legend.frameon': False,
}
for key in mpl.rcParams:
if 'width' in key:
RC[key] = mpl.rcParams[key] * 0.5
class PlotDestDir:
"""Generate paths for saving plots in figure-specific subdirectories
Parameters
----------
root : str
Directory in which to save files.
pix_fmt : str
Pixel graphics format (default ``png``).
vec_fmt : str
Vector graphics format (default ``pdf``).
name : str
Name for the info report (default is ``basename(root)``).
"""
def __init__(self, root, pix_fmt='png', vec_fmt='pdf', name=None):
root = expanduser(root)
if not exists(root):
makedirs(root)
else:
assert isdir(root)
assert pix_fmt.isalnum()
assert vec_fmt.isalnum()
if name is None:
name = basename(root)
if not name:
name = basename(dirname(root))
self.root = root
self._pix_fmt = pix_fmt
self._vec_fmt = vec_fmt
self.pix = join(root, '%s.' + pix_fmt)
self.vec = join(root, '%s.' + vec_fmt)
self.mov = join(root, '%s.mov')
self.txt = join(root, '%s.txt')
self.name = name
self.report = fmtxt.Report(name)
self._active_section = [self.report]
def with_ext(self, ext):
"""Generate path template ``%s.{ext}``"""
assert ext.isalnum()
return join(self.root, '%s.' + ext)
def subdir(self, dirname, name=None):
"""PlotDestDir object for a sub-directory"""
return PlotDestDir(join(self.root, dirname), self._pix_fmt, self._vec_fmt, name)
# MARK: report
def section(self, heading, level=1):
if level <= 0:
raise ValueError("level=%r; must be >= 1, section 0 is the document")
elif level > len(self._active_section):
raise RuntimeError("Can't add section with level %i before adding "
"section with level %i" % (level, level - 1))
while len(self._active_section) > level:
self._active_section.pop(-1)
section = self._active_section[-1].add_section(heading)
self._active_section.append(section)
def info(self, content):
"""Add ``info_string`` to the info list"""
section = self._active_section[-1]
section.append(content)
def save_info(self, format='html'):
"""Save info to ``info.txt``"""
dst = join(self.root, self.name)
try:
getattr(self.report, 'save_' + format)(dst)
except AttributeError:
raise ValueError("format=%r; Invalid format" % (format,))
def cname(cid):
if isinstance(cid, tuple):
return '-'.join(map(str, cid))
else:
return str(cid)
class ClusterPlotter:
"""Make plots for spatio-temporal clusters
returned by :meth:`MneExperiment.load_result_plotter`
Parameters
----------
ds : Dataset
Dataset with the data on which the test is based.
res : NDTest
Test result object with spatio-temporal cluster test result.
colors : dict
Colors for plotting data in a ``{cell: color}`` dictionary.
dst : str
Directory in which to place results.
vec_fmt : str
Format for vector graphics (default 'pdf').
pix_fmt : str
Format for pixel graphics (default 'png').
labels : dict
Labels for data in a ``{cell: label}`` dictionary (the default is to
use cell names).
h : scalar
Plot height in inches (default 1.2).
rc : dict
Matplotlib rc-parameters dictionary (the default is optimized for the
default plot size ``h=1.2``).
Notes
-----
After loading a :class:`ClusterPlotter`, its ``rc``, ``colors``, ``labels``
and ``h`` attributes can be updated to create different plot layouts without
reloading the data.
"""
def __init__(self, ds, res, colors, dst, vec_fmt='pdf', pix_fmt='png',
labels=None, h=1.2, rc=None):
self.rc = RC.copy()
if rc is not None:
self.rc.update(rc)
self.ds = ds
self.res = res
self.colors = colors
self.labels = labels
self.h = h
self._dst = PlotDestDir(dst, pix_fmt, vec_fmt)
self._is_anova = isinstance(self.res, testnd.anova)
def _ids(self, ids):
if isinstance(ids, (float, int)):
return self._ids_for_p(ids)
elif isinstance(ids, dict):
if not self._is_anova:
raise TypeError("ids can not be dict for results other than ANOVA")
out = []
for effect, cids in ids.items():
if isinstance(cids, float):
out.extend(self._ids_for_p(cids, effect))
else:
out.extend((effect, cid) for cid in cids)
return out
else:
return ids
def _ids_for_p(self, p, effect=None):
"Find cluster IDs for clusters with p-value <= p"
if effect is None:
clusters = self.res.find_clusters(p)
else:
clusters = self.res.find_clusters(p, effect=effect)
clusters[:, 'effect'] = effect
if self._is_anova:
return list(zip(clusters['effect'], clusters['id']))
else:
return clusters['id']
def _get_clusters(self, ids):
return [self._get_cluster(cid) for cid in ids]
def _get_cluster(self, cid):
if self._is_anova:
effect, cid = cid
return self.res.cluster(cid, effect)
else:
return self.res.cluster(cid)
def plot_color_list(self, name, cells, w=None, colors=None):
if colors is None:
colors = self.colors
with mpl.rc_context(self.rc):
p = plot.ColorList(colors, cells, self.labels, w=w, show=False)
p.save(self._dst.vec % "colorlist %s" % name, transparent=True)
p.close()
def plot_color_grid(self, name, row_cells, column_cells):
with mpl.rc_context(self.rc):
p = plot.ColorGrid(row_cells, column_cells, self.colors, labels=self.labels)
p.save(self._dst.vec % "colorgrid %s" % name, transparent=True)
p.close()
def plot_clusters_spatial(self, ids, views, w=600, h=480, prefix=''):
"""Plot spatial extent of the clusters
Parameters
----------
ids : sequence | dict | scalar <= 1
IDs of the clusters that should be plotted. For ANOVA results, this
should be an ``{effect_name: id_list}`` dict. Instead of a list of
IDs a scalar can be provided to plot all clusters with p-values
smaller than this.
views : str | list of str | dict
Can a str or list of str to use the same views for all clusters. A dict
can have as keys labels or cluster IDs.
w, h : int
Size in pixels. The default (600 x 480) corresponds to 2 x 1.6 in
at 300 dpi.
prefix : str
Prefix to use for the image files (optional, can be used to
distinguish different groups of images sharing the same color-bars).
Notes
-----
The horizontal colorbar is 1.5 in wide, the vertical colorbar is 1.6 in
high.
"""
ids = self._ids(ids)
clusters = self._get_clusters(ids)
clusters_spatial = [c.sum('time') for c in clusters]
if isinstance(views, str):
views = (views,)
# vmax
vmin = min(c.min() for c in clusters_spatial)
vmax = max(c.max() for c in clusters_spatial)
abs_vmax = max(vmax, abs(vmin))
# anatomical extent
brain_colorbar_done = False
for cid, cluster in zip(ids, clusters_spatial):
name = cname(cid)
if prefix:
name = prefix + ' ' + name
for hemi in ('lh', 'rh'):
if not cluster.sub(source=hemi).any():
continue
brain = plot.brain.cluster(cluster, abs_vmax, views='lat',
background=(1, 1, 1), colorbar=False,
parallel=True, hemi=hemi, w=w, h=h)
for view in views:
brain.show_view(view)
brain.save_image(self._dst_pix % ' '.join((name, hemi, view)),
'rgba', True)
if not brain_colorbar_done:
with mpl.rc_context(self.rc):
label = "Sum of %s-values" % cluster.info['meas']
clipmin = 0 if vmin == 0 else None
clipmax = 0 if vmax == 0 else None
if prefix:
cbar_name = '%s cbar %%s' % prefix
else:
cbar_name = 'cbar %s'
h_cmap = 0.7 + POINT * mpl.rcParams['font.size']
p = brain.plot_colorbar(label, clipmin=clipmin, clipmax=clipmax,
width=0.1, h=h_cmap, w=1.5, show=False)
p.save(self._dst.vec % cbar_name % 'h', transparent=True)
p.close()
w_cmap = 0.8 + 0.1 * abs(floor(log10(vmax)))
p = brain.plot_colorbar(label, clipmin=clipmin, clipmax=clipmax,
width=0.1, h=1.6, w=w_cmap,
orientation='vertical', show=False)
p.save(self._dst.vec % cbar_name % 'v', transparent=True)
p.close()
brain_colorbar_done = True
brain.close()
def _get_data(self, model, sub, subagg):
"""Plot values in cluster
Parameters
----------
subagg : str
Index in ds: within index, collapse across other predictors.
"""
ds = self.ds
modelname = model
if sub:
ds = ds.sub(sub)
modelname += '[%s]' % sub
if subagg:
idx_subagg = ds.eval(subagg)
ds_full = ds.sub(np.invert(idx_subagg))
ds_agg = ds.sub(idx_subagg).aggregate("subject", drop_bad=True)
ds = combine((ds_full, ds_agg), incomplete='fill in')
ds['condition'] = ds.eval(model).as_factor()
model = 'condition'
modelname += '(agg %s)' % subagg
return ds, model, modelname
def plot_values(self, ids, model, ymax, ymin, dpi=300, sub=None,
subagg=None, cells=None, pairwise=False, colors=None,
prefix=None, w=None, filter=None, legend=False):
"""Plot values in cluster
Parameters
----------
ids : sequence | dict | scalar <= 1
IDs of the clusters that should be plotted. For ANOVA results, this
should be an ``{effect_name: id_list}`` dict. Instead of a list of
IDs a scalar can be provided to plot all clusters with p-values
smaller than this.
model : str
Model defining cells which to plot separately.
ymax : scalar
Top of the y-axis.
ymin : scalar
Bottom of the y axis.
dpi : int
Figure DPI.
sub : str
Only use a subset of the data.
subagg : str
Index in ds: within index, collapse across other predictors.
cells : sequence of cells in model
Modify visible cells and their order. Only applies to the barplot.
Does not affect filename.
pairwise : bool
Add pairwise tests to barplots.
colors : dict
Substitute colors (default are the colors provided at
initialization).
| |
** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(
self,
q_head,
k_head_h,
v_head_h,
k_head_r,
seg_mat=None,
attn_mask=None,
head_mask=None,
output_attentions=False,
pos_seq=None,
):
"""Core relative positional attention operations."""
"""
Args:
pos_seq: of shape [bsz, qlen, klen]
"""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
# bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd_tmp = torch.einsum("ibnd,knd->bnik", q_head + self.r_r_bias, k_head_r) # i: qlen, k: -MAX_BAR_ENCODING ~ MAX_BAR_ENCODING
pos_seq = pos_seq[:, None, :, :].expand(-1, bd_tmp.shape[1], -1, -1)
bd = torch.gather(bd_tmp, -1, pos_seq + MAX_BAR_ENCODING)
# bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(
self,
h,
g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
pos_seq=None,
):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
# k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
k_head_r = torch.einsum("ih,hnd->ind", r, self.r)
# k_head_r_3d = torch.einsum("ijbh,hnd->ijbnd", pos_emb, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
# type casting for fp16 support
# print(r.shape, self.r.shape)
# k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r)
k_head_r = torch.einsum("ih,hnd->ind", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
if output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self,
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
pos_seq=None,
):
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=mems,
target_mapping=target_mapping,
head_mask=head_mask,
output_attentions=output_attentions,
pos_seq=pos_seq,
)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g
)
output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
def ff_chunk(self, output_x):
output_x = self.ff(output_x)
return output_x
class XLNetPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = XLNetConfig
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
@dataclass
class XLNetModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetModel`.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they
have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetLMHeadModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetLMHeadModel`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding.
The token ids which have their past given to | |
"Don't Fragment", 1: "More Fragments", })] # noqa: E501
class AVP_0_538 (AVP_FL_NV):
name = 'IP-Option-Type'
avpLen = 12
fields_desc = [
AVP_FL_NV,
Enumerated(
'val',
None,
{
0: "end_of_list",
1: "nop",
2: "security",
3: "loose_source_route",
4: "timestamp",
5: "extended_security",
6: "commercial_security",
7: "record_route",
8: "stream_id",
9: "strict_source_route",
10: "experimental_measurement",
11: "mtu_probe",
12: "mtu_reply",
13: "flow_control",
14: "access_control",
15: "encode",
16: "imi_traffic_descriptor",
17: "extended_IP",
18: "traceroute",
19: "address_extension",
20: "router_alert",
21: "selective_directed_broadcast_mode",
23: "dynamic_packet_state",
24: "upstream_multicast_packet",
25: "quick_start",
30: "rfc4727_experiment",
})]
class AVP_0_541 (AVP_FL_NV):
name = 'TCP-Option-Type'
avpLen = 12
fields_desc = [
AVP_FL_NV,
Enumerated(
'val',
None,
{
0: "EOL",
1: "NOP",
2: "MSS",
3: "WScale",
4: "SAckOK",
5: "SAck",
8: "Timestamp",
14: "AltChkSum",
15: "AltChkSumOpt",
25: "Mood",
})]
class AVP_0_546 (AVP_FL_NV):
name = 'ICMP-Type-Number'
avpLen = 12
fields_desc = [
AVP_FL_NV,
Enumerated(
'val',
None,
{
0: "echo-reply",
3: "dest-unreach",
4: "source-quench",
5: "redirect",
8: "echo-request",
9: "router-advertisement",
10: "router-solicitation",
11: "time-exceeded",
12: "parameter-problem",
13: "timestamp-request",
14: "timestamp-reply",
15: "information-request",
16: "information-response",
17: "address-mask-request",
18: "address-mask-reply",
})]
class AVP_0_547 (AVP_FL_NV):
name = 'ICMP-Code'
avpLen = 12
fields_desc = [
AVP_FL_NV, Enumerated('val', None, {0: "TBD", })]
class AVP_0_570 (AVP_FL_NV):
name = 'Timezone-Flag'
avpLen = 12
fields_desc = [
AVP_FL_NV, Enumerated('val', None, {0: "UTC", 1: "LOCAL", 2: "OFFSET", })] # noqa: E501
class AVP_0_575 (AVP_FL_NV):
name = 'QoS-Semantics'
avpLen = 12
fields_desc = [
AVP_FL_NV,
Enumerated(
'val',
None,
{
0: "QoS_Desired",
1: "QoS_Available",
2: "QoS_Delivered",
3: "Minimum_QoS",
4: "QoS_Authorized",
})]
class AVP_10415_500 (AVP_FL_V):
name = 'Abort-Cause'
avpLen = 16
fields_desc = [AVP_FL_V,
Enumerated('val',
None,
{0: "BEARER_RELEASED",
1: "INSUFFICIENT_SERVER_RESOURCES",
2: "INSUFFICIENT_BEARER_RESOURCES",
})]
class AVP_10415_511 (AVP_FL_V):
name = 'Flow-Status'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "ENABLED-UPLINK", 1: "ENABLED-DOWNLINK", 2: "ENABLED", 3: "DISABLED", 4: "REMOVED", })] # noqa: E501
class AVP_10415_512 (AVP_FL_V):
name = 'Flow-Usage'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "NO_INFORMATION", 1: "RTCP", 2: "AF_SIGNALLING", })] # noqa: E501
class AVP_10415_513 (AVP_FL_V):
name = 'Specific-Action'
avpLen = 16
fields_desc = [
AVP_FL_V,
Enumerated(
'val',
None,
{
1: "CHARGING_CORRELATION_EXCHANGE",
2: "INDICATION_OF_LOSS_OF_BEARER",
3: "INDICATION_OF_RECOVERY_OF_BEARER",
4: "INDICATION_OF_RELEASE_OF_BEARER",
6: "IP-CAN_CHANGE",
7: "INDICATION_OF_OUT_OF_CREDIT",
8: "INDICATION_OF_SUCCESSFUL_RESOURCES_ALLOCATION",
9: "INDICATION_OF_FAILED_RESOURCES_ALLOCATION",
10: "INDICATION_OF_LIMITED_PCC_DEPLOYMENT",
11: "USAGE_REPORT",
12: "ACCESS_NETWORK_INFO_REPORT",
})]
class AVP_10415_520 (AVP_FL_V):
name = 'Media-Type'
avpLen = 16
fields_desc = [AVP_FL_V,
Enumerated('val',
None,
{0: "AUDIO",
1: "VIDEO",
2: "DATA",
3: "APPLICATION",
4: "CONTROL",
5: "TEXT",
6: "MESSAGE",
4294967295: "OTHER",
})]
class AVP_10415_523 (AVP_FL_V):
name = 'SIP-Forking-Indication'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "SINGLE_DIALOGUE", 1: "SEVERAL_DIALOGUES", })]
class AVP_10415_527 (AVP_FL_V):
name = 'Service-Info-Status'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "FINAL_SERVICE_INFORMATION", 1: "PRELIMINARY_SERVICE_INFORMATION", })] # noqa: E501
class AVP_10415_529 (AVP_FL_V):
name = 'AF-Signalling-Protocol'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "NO_INFORMATION", 1: "SIP", })]
class AVP_10415_533 (AVP_FL_V):
name = 'Rx-Request-Type'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "INITIAL_REQUEST", 1: "UPDATE_REQUEST", })] # noqa: E501
class AVP_10415_536 (AVP_FL_V):
name = 'Required-Access-Info'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "USER_LOCATION", 1: "MS_TIME_ZONE", })] # noqa: E501
class AVP_10415_614 (AVP_FL_V):
name = 'Server-Assignment-Type'
avpLen = 16
fields_desc = [
AVP_FL_V,
Enumerated(
'val',
None,
{
0: "NO_ASSIGNMENT",
1: "REGISTRATION",
2: "RE_REGISTRATION",
3: "UNREGISTERED_USER",
4: "TIMEOUT_DEREGISTRATION",
5: "USER_DEREGISTRATION",
6: "TIMEOUT_DEREGISTRATION_STORE_SERVER_NAME",
7: "USER_DEREGISTRATION_STORE_SERVER_NAME",
8: "ADMINISTRATIVE_DEREGISTRATION",
9: "AUTHENTICATION_FAILURE",
10: "AUTHENTICATION_TIMEOUT",
11: "DEREGISTRATION_TOO_MUCH_DATA",
12: "AAA_USER_DATA_REQUEST",
13: "PGW_UPDATE",
})]
class AVP_10415_616 (AVP_FL_V):
name = 'Reason-Code'
avpLen = 16
fields_desc = [AVP_FL_V,
Enumerated('val',
None,
{0: "PERMANENT_TERMINATION",
1: "NEW_SERVER_ASSIGNED",
2: "SERVER_CHANGE",
3: "REMOVE_S-CSCF",
})]
class AVP_10415_623 (AVP_FL_V):
name = 'User-Authorization-Type'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "REGISTRATION", 1: "DE_REGISTRATION", 2: "REGISTRATION_AND_CAPABILITIES", })] # noqa: E501
class AVP_10415_624 (AVP_FL_V):
name = 'User-Data-Already-Available'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "USER_DATA_NOT_AVAILABLE", 1: "USER_DATA_ALREADY_AVAILABLE", })]
class AVP_10415_633 (AVP_FL_V):
name = 'Originating-Request'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "ORIGINATING", })]
class AVP_10415_638 (AVP_FL_V):
name = 'Loose-Route-Indication'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "LOOSE_ROUTE_NOT_REQUIRED", 1: "LOOSE_ROUTE_REQUIRED", })] # noqa: E501
class AVP_10415_648 (AVP_FL_V):
name = 'Multiple-Registration-Indication'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "NOT_MULTIPLE_REGISTRATION", 1: "MULTIPLE_REGISTRATION", })]
class AVP_10415_650 (AVP_FL_V):
name = 'Session-Priority'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "PRIORITY-0", 1: "PRIORITY-1", 2: "PRIORITY-2", 3: "PRIORITY-3", 4: "PRIORITY-4", })] # noqa: E501
class AVP_10415_652 (AVP_FL_V):
name = 'Priviledged-Sender-Indication'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "NOT_PRIVILEDGED_SENDER", 1: "PRIVILEDGED_SENDER", })]
class AVP_10415_703 (AVP_FL_V):
name = 'Data-Reference'
avpLen = 16
fields_desc = [
AVP_FL_V,
Enumerated(
'val',
None,
{
0: "RepositoryData",
1: "Undefined",
2: "Undefined",
3: "Undefined",
4: "Undefined",
5: "Undefined",
6: "Undefined",
7: "Undefined",
8: "Undefined",
9: "Undefined",
10: "IMSPublicIdentity",
11: "IMSUserState",
12: "S-CSCFName",
13: "InitialFilterCriteria",
14: "LocationInformation",
15: "UserState",
16: "ChargingInformation",
17: "MSISDN",
18: "PSIActivation",
19: "DSAI",
20: "Reserved",
21: "ServiceLevelTraceInfo",
22: "IPAddressSecureBindingInformation",
23: "ServicePriorityLevel",
24: "SMSRegistrationInfo",
25: "UEReachabilityForIP",
26: "TADSinformation",
27: "STN-SR",
28: "UE-SRVCC-Capability",
29: "ExtendedPriority",
30: "CSRN",
31: "ReferenceLocationInformation",
})]
class AVP_10415_705 (AVP_FL_V):
name = 'Subs-Req-Type'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "Subscribe", 1: "Unsubscribe", })] # noqa: E501
class AVP_10415_706 (AVP_FL_V):
name = 'Requested-Domain'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "CS-Domain", 1: "PS-Domain", })]
class AVP_10415_707 (AVP_FL_V):
name = 'Current-Location'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "DoNotNeedInitiateActiveLocationRetrieval", 1: "InitiateActiveLocationRetrieval", })] # noqa: E501
class AVP_10415_708 (AVP_FL_V):
name = 'Identity-Set'
avpLen = 16
fields_desc = [
AVP_FL_V,
Enumerated(
'val',
None,
{
0: "ALL_IDENTITIES",
1: "REGISTERED_IDENTITIES",
2: "IMPLICIT_IDENTITIES",
3: "ALIAS_IDENTITIES",
})]
class AVP_10415_710 (AVP_FL_V):
name = 'Send-Data-Indication'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "USER_DATA_NOT_REQUESTED", 1: "USER_DATA_REQUESTED", })] # noqa: E501
class AVP_10415_712 (AVP_FL_V):
name = 'One-Time-Notification'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "ONE_TIME_NOTIFICATION_REQUESTED", })] # noqa: E501
class AVP_10415_714 (AVP_FL_V):
name = 'Serving-Node-Indication'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "ONLY_SERVING_NODES_REQUIRED", })] # noqa: E501
class AVP_10415_717 (AVP_FL_V):
name = 'Pre-paging-Supported'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "PREPAGING_NOT_SUPPORTED", 1: "PREPAGING_SUPPORTED", })] # noqa: E501
class AVP_10415_718 (AVP_FL_V):
name = 'Local-Time-Zone-Indication'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {
0: "ONLY_LOCAL_TIME_ZONE_REQUESTED", 1: "LOCAL_TIME_ZONE_WITH_LOCATION_INFO_REQUESTED", })] # noqa: E501
class AVP_10415_829 (AVP_FL_V):
name = 'Role-Of-Node'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "HPLMN", 1: "VPLMN", 2: "FORWARDING_ROLE", })] # noqa: E501
class AVP_10415_862 (AVP_FL_V):
name = 'Node-Functionality'
avpLen = 16
fields_desc = [
AVP_FL_V,
Enumerated(
'val',
None,
{
0: "S-CSCF",
1: "P-CSCF",
2: "I-CSCF",
5: "BGCF",
6: "AS",
7: "IBCF",
8: "S-GW",
9: "P-GW",
10: "HSGW",
11: "E-CSCF ",
12: "MME ",
13: "TRF",
14: "TF",
15: "ATCF",
16: "Proxy Function",
17: "ePDG",
})]
class AVP_10415_864 (AVP_FL_V):
name = 'Originator'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "Calling Party", 1: "Called Party", })] # noqa: E501
class AVP_10415_867 (AVP_FL_V):
name = 'PS-Append-Free-Format-Data'
avpLen = 16
fields_desc = [
AVP_FL_V, Enumerated('val', None, {0: "'Append' ", 1: "'Overwrite' ", })] # noqa: E501
class AVP_10415_870 (AVP_FL_V):
name = 'Trigger-Type'
avpLen = 16
fields_desc = [AVP_FL_V,
Enumerated('val',
None,
{1: "CHANGE_IN_SGSN_IP_ADDRESS ",
2: "CHANGE_IN_QOS",
3: "CHANGE_IN_LOCATION",
4: "CHANGE_IN_RAT",
5: "CHANGE_IN_UE_TIMEZONE",
10: "CHANGEINQOS_TRAFFIC_CLASS",
11: "CHANGEINQOS_RELIABILITY_CLASS",
12: "CHANGEINQOS_DELAY_CLASS",
13: "CHANGEINQOS_PEAK_THROUGHPUT",
14: "CHANGEINQOS_PRECEDENCE_CLASS",
15: "CHANGEINQOS_MEAN_THROUGHPUT",
16: "CHANGEINQOS_MAXIMUM_BIT_RATE_FOR_UPLINK",
17: "CHANGEINQOS_MAXIMUM_BIT_RATE_FOR_DOWNLINK",
18: "CHANGEINQOS_RESIDUAL_BER",
19: "CHANGEINQOS_SDU_ERROR_RATIO",
20: "CHANGEINQOS_TRANSFER_DELAY",
21: "CHANGEINQOS_TRAFFIC_HANDLING_PRIORITY",
22: "CHANGEINQOS_GUARANTEED_BIT_RATE_FOR_UPLINK", # noqa: E501
23: "CHANGEINQOS_GUARANTEED_BIT_RATE_FOR_DOWNLINK", # noqa: E501
24: "CHANGEINQOS_APN_AGGREGATE_MAXIMUM_BIT_RATE", # noqa: E501
30: "CHANGEINLOCATION_MCC",
31: "CHANGEINLOCATION_MNC",
32: "CHANGEINLOCATION_RAC",
33: "CHANGEINLOCATION_LAC",
34: "CHANGEINLOCATION_CellId",
35: "CHANGEINLOCATION_TAC",
36: "CHANGEINLOCATION_ECGI",
40: "CHANGE_IN_MEDIA_COMPOSITION",
50: "CHANGE_IN_PARTICIPANTS_NMB",
51: "CHANGE_IN_ THRSHLD_OF_PARTICIPANTS_NMB",
52: "CHANGE_IN_USER_PARTICIPATING_TYPE",
60: "CHANGE_IN_SERVICE_CONDITION",
61: "CHANGE_IN_SERVING_NODE",
70: "CHANGE_IN_USER_CSG_INFORMATION",
71: "CHANGE_IN_HYBRID_SUBSCRIBED_USER_CSG_INFORMATION", # noqa: E501
72: "CHANGE_IN_HYBRID_UNSUBSCRIBED_USER_CSG_INFORMATION", # noqa: E501
73: "CHANGE_OF_UE_PRESENCE_IN_PRESENCE_REPORTING_AREA", # noqa: E501
})]
class AVP_10415_872 (AVP_FL_V):
name = 'Reporting-Reason'
avpLen = 16
fields_desc = [
AVP_FL_V,
Enumerated(
'val',
None,
{
0: "THRESHOLD",
1: "QHT",
2: "FINAL",
3: "QUOTA_EXHAUSTED",
4: "VALIDITY_TIME",
5: "OTHER_QUOTA_TYPE",
6: "RATING_CONDITION_CHANGE",
7: "FORCED_REAUTHORISATION",
8: "POOL_EXHAUSTED",
})]
class AVP_10415_882 (AVP_FL_V):
name = 'Media-Initiator-Flag'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "called party", 1: "calling party", 2: "unknown", })] # noqa: E501
class AVP_10415_883 (AVP_FL_V):
name = 'PoC-Server-Role'
avpLen = 16
fields_desc = [AVP_FL_V, Enumerated('val', None, {0: "Participating PoC Server", 1: "Controlling PoC | |
var,
axes='per-activation', epsilon=1e-4):
"""
Performs batch normalization of the given inputs, using the given mean and
variance.
Parameters
----------
axes : 'per-activation', 'spatial' or a tuple of ints
The axes along which the input should be normalized. ``'per-activation'``
normalizes per activation and is equal to ``axes=(0,)``.
``'spatial'`` shares normalization factors across spatial dimensions
(i.e., all dimensions past the second), which for 4D inputs would be
equal to ``axes=(0, 2, 3)``.
gamma : tensor
Scale factors. The shape must match the shape of `inputs`,
except for the axes in `axes`. These axes should be set to 1 or be
skipped altogether (such that `gamma.ndim == inputs.ndim - len(axes)`).
beta : tensor
Biases. Must match the tensor layout of `gamma`.
mean : tensor
Means. Usually these are running averages computed during training.
Must match the tensor layout of `gamma`.
var : tensor
Variances. Usually these are running averages computed during training.
Must match the tensor layout of `gamma`.
epsilon : float
Epsilon value used in the batch normalization formula. Minimum allowed
value is 1e-5 (imposed by cuDNN).
Returns
-------
out : tensor
Batch-normalized inputs.
Notes
-----
If per-activation or spatial normalization is selected, this operation
will use the cuDNN implementation. (This requires cuDNN 5 or newer.)
The returned value is equivalent to:
.. code-block:: python
# for per-activation normalization
axes = (0,)
# for spatial normalization
axes = (0,) + tuple(range(2, inputs.ndim))
gamma, beta, mean, var = (T.addbroadcast(t, *axes)
for t in (gamma, beta, mean, var))
out = (inputs - mean) * gamma / T.sqrt(var + epsilon) + beta
"""
ndim = inputs.ndim
axes, non_bc_axes = _prepare_batch_normalization_axes(axes, ndim)
# have the parameter tensors been broadcasted yet?
if gamma.ndim == ndim:
params_ndim = ndim
else:
params_ndim = len(non_bc_axes)
params_dimshuffle_pattern = ['x'] * ndim
for i, axis in enumerate(non_bc_axes):
params_dimshuffle_pattern[axis] = i
if gamma.ndim != params_ndim or beta.ndim != params_ndim:
raise ValueError("gamma and beta dimensionality must match the "
"number of non-normalized axes, or have the "
"same number of dimensions as the inputs; "
"got %d and %d instead of %d" %
(gamma.ndim, beta.ndim, params_ndim))
if mean.ndim != params_ndim or var.ndim != params_ndim:
raise ValueError("mean and var must be of the same dimensionality "
"as gamma and beta; got %d and %d instead of %d" %
(mean.ndim, var.ndim, params_ndim))
# epsilon will be converted to floatX later. we need to check
# for rounding errors now, since numpy.float32(1e-5) < 1e-5.
epsilon = np.cast[theano.config.floatX](epsilon)
if epsilon < 1e-5:
raise ValueError("epsilon must be at least 1e-5, got %s" % str(epsilon))
gamma = as_tensor_variable(gamma)
beta = as_tensor_variable(beta)
mean = as_tensor_variable(mean)
var = as_tensor_variable(var)
if params_ndim != ndim:
gamma = gamma.dimshuffle(params_dimshuffle_pattern)
beta = beta.dimshuffle(params_dimshuffle_pattern)
mean = mean.dimshuffle(params_dimshuffle_pattern)
var = var.dimshuffle(params_dimshuffle_pattern)
else:
gamma = T.addbroadcast(gamma, *axes)
beta = T.addbroadcast(beta, *axes)
mean = T.addbroadcast(mean, *axes)
var = T.addbroadcast(var, *axes)
batchnorm_op = AbstractBatchNormInference(axes=axes)
return batchnorm_op(inputs, gamma, beta, mean, var, epsilon=epsilon)
class AbstractBatchNormTrain(Op):
"""
Abstract Op for Batch Normalization.
Parameters
----------
axes : a tuple of ints
The axes along which the input should be normalized.
x : tensor
The input to be normalized along `axes`.
scale : tensor
`scale` should have the same number of dimensions as `x`.
All dimensions listed in `axes` should have length 1.
bias : tensor
`bias` should have the same number of dimensions as `x`.
All dimensions listed in `axes` should have length 1.
epsilon
Epsilon value used in the batch normalization formula. Minimum allowed
value is 1e-5 (imposed by cuDNN).
running_average_factor : float
Factor for updating the values or `running_mean` and `running_var`.
If the factor is close to one, the running averages will update quickly,
if the factor is close to zero it will update slowly.
running_mean : tensor or None
Previous value of the running mean. If this is given, the new value
``running_mean * (1 - running_average_factor) + batch mean * running_average_factor``
will be returned as one of the outputs of this function.
`running_mean` and `running_var` should either both be given or
both be None.
running_var : tensor or None
Previous value of the running variance. If this is given, the new value
``running_var * (1 - running_average_factor) + (m / (m - 1)) * batch var * running_average_factor``
will be returned as one of the outputs of this function,
where `m` is the product of lengths of the averaged-over dimensions.
`running_mean` and `running_var` should either both be given or
both be None.
"""
__props__ = ('axes',)
def __init__(self, axes=(0,)):
assert isinstance(axes, (tuple, list))
assert len(axes) > 0
axes = tuple(int(a) for a in axes)
self.axes = axes
def infer_shape(self, node, shape):
return [shape[0]] + [shape[1]] * (len(node.outputs) - 1)
def make_node(self, x, scale, bias, epsilon=1e-4,
running_average_factor=0.1,
running_mean=None, running_var=None):
x = as_tensor_variable(x)
scale = as_tensor_variable(scale)
bias = as_tensor_variable(bias)
epsilon = as_tensor_variable(epsilon)
running_average_factor = as_tensor_variable(running_average_factor)
if running_mean is not None:
running_mean = as_tensor_variable(running_mean)
if running_var is not None:
running_var = as_tensor_variable(running_var)
assert x.ndim == scale.ndim == bias.ndim
assert ((running_mean is None and running_var is None) or
(running_mean is not None and running_var is not None))
assert (running_mean is None or running_mean.ndim == x.ndim)
assert (running_var is None or running_var.ndim == x.ndim)
# Upcast to common dtype on the non-scalar
# Keep as is dtype of scalar (epsilon and running_average_factor)
if running_mean:
x, scale, bias, running_mean, running_var = as_common_dtype(
x, scale, bias, running_mean, running_var)
else:
x, scale, bias = as_common_dtype(x, scale, bias)
inputs = [x, scale, bias, epsilon, running_average_factor]
output_types = [x.type(), scale.type(), scale.type()]
if running_mean is not None and running_var is not None:
inputs.append(running_mean)
inputs.append(running_var)
output_types.append(scale.type())
output_types.append(scale.type())
return Apply(self, inputs, output_types)
def L_op(self, inputs, outputs, grads):
x, scale, bias, epsilon, running_average_factor = inputs[:5]
dy = grads[0]
_, x_mean, x_invstd = outputs[:3]
disconnected_outputs = [
theano.gradient.DisconnectedType()(), # epsilon
theano.gradient.DisconnectedType()()] # running_average_factor
# Optional running_mean and running_var.
for i in range(5, len(inputs)):
disconnected_outputs.append(theano.gradient.DisconnectedType()())
return AbstractBatchNormTrainGrad(self.axes)(
x, dy, scale, x_mean, x_invstd, epsilon) + disconnected_outputs
def connection_pattern(self, node):
# Specificy that epsilon and running_average_factor are not connected to outputs.
patterns = [[True, True, True], # x
[True, True, True], # scale
[True, True, True], # bias
[False, False, False], # epsilon
[False, False, False]] # running_average_factor
# Optional running_mean and running_var are only
# connected to their new values.
for i in range(5, len(node.inputs)):
patterns[0].append(True)
for pattern in patterns[1:]:
pattern.append(False)
patterns.append([False] * (3 + i - 5) + [True])
return patterns
def perform(self, node, inputs, output_storage):
x, scale, bias, epsilon, running_average_factor = inputs[:5]
axes = self.axes
if min(axes) < 0 or max(axes) >= x.ndim:
raise ValueError('axes should be less than ndim (<%d), but %s given' % (x.ndim, str(axes)))
mean = x.mean(axes, keepdims=True)
var = x.var(axes, keepdims=True)
invstd = 1.0 / np.sqrt(var + epsilon)
out = (x - mean) * (scale * invstd) + bias
output_storage[0][0] = out
output_storage[1][0] = mean
output_storage[2][0] = invstd
if len(inputs) > 5:
running_mean = inputs[5]
running_mean = running_mean * (1.0 - running_average_factor) + \
mean * running_average_factor
output_storage[3][0] = running_mean
if len(inputs) > 6:
m = float(np.prod(x.shape) / np.prod(scale.shape))
running_var = inputs[6]
running_var = running_var * (1.0 - running_average_factor) + \
(m / (m - 1)) * var * running_average_factor
output_storage[4][0] = running_var
class AbstractBatchNormInference(Op):
"""
Abstract Op for Batch Normalization.
Parameters
----------
axes : a tuple of ints
The axes along which the input is normalized.
epsilon
Epsilon value used in the batch normalization formula. Minimum allowed
value is 1e-5 (imposed by cuDNN).
"""
__props__ = ('axes',)
def __init__(self, axes=(0,)):
assert isinstance(axes, (tuple, list))
assert len(axes) > 0
axes = tuple(int(a) for a in axes)
self.axes = axes
def infer_shape(self, node, shape):
return [shape[0]]
def make_node(self, x, scale, bias, estimated_mean, estimated_variance, epsilon=1e-4):
x = as_tensor_variable(x)
scale = as_tensor_variable(scale)
bias = as_tensor_variable(bias)
estimated_mean = as_tensor_variable(estimated_mean)
estimated_variance = as_tensor_variable(estimated_variance)
epsilon = as_tensor_variable(epsilon)
# Upcast to common dtype on the non-scalar
# Keep as is dtype of scalar (epsilon)
x, scale, bias, estimated_mean, estimated_variance = as_common_dtype(
| |
<filename>src/rayoptics/parax/paraxialdesign.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 <NAME>
""" First order paraxial design space
.. Created on Sat Mar 31 21:14:42 2018
.. codeauthor: <NAME>
"""
import numpy as np
from rayoptics.optical.model_constants import ht, slp, aoi
from rayoptics.optical.model_constants import pwr, tau, indx, rmd
import rayoptics.optical.model_constants as mc
import rayoptics.parax.firstorder as fo
from rayoptics.seq.gap import Gap
from rayoptics.elem.surface import Surface
from rayoptics.util.line_intersection import get_intersect
from numpy.linalg import norm
def bbox_from_poly(poly):
minx, miny = np.min(poly, axis=0)
maxx, maxy = np.max(poly, axis=0)
return np.array([[minx, miny], [maxx, maxy]])
class ParaxialModel():
def __init__(self, opt_model, opt_inv=1.0, ifcs_mapping=None, **kwargs):
self.opt_model = opt_model
self.seq_model = opt_model.seq_model
self.ifcs_mapping = ifcs_mapping
self.layers = {'ifcs': self} if ifcs_mapping is None else None
self.sys = []
self.ax = []
self.pr = []
self.opt_inv = opt_inv
def __json_encode__(self):
attrs = dict(vars(self))
del attrs['opt_model']
del attrs['seq_model']
del attrs['parax_data']
del attrs['layers']
return attrs
def sync_to_restore(self, opt_model):
self.opt_model = opt_model
self.seq_model = opt_model.seq_model
if not hasattr(self, 'ifcs_mapping'):
self.ifcs_mapping = None
if not hasattr(self, 'layers'):
self.layers = {'ifcs': self}
def update_model(self, **kwargs):
self.parax_data = self.opt_model.optical_spec.parax_data
self.build_lens()
def build_lens(self):
# rebuild the `sys` description from the seq_model path
self.sys = sys = self.seq_path_to_paraxial_lens(self.seq_model.path())
# precalculate the reduced forms of the paraxial axial and chief rays
if self.parax_data is not None:
ax_ray, pr_ray, fod = self.parax_data
self.opt_inv = fod.opt_inv
self.ax = []
self.pr = []
for i in range(0, len(sys)):
n = sys[i][indx]
self.ax.append([ax_ray[i][ht], n*ax_ray[i][slp], n*ax_ray[i][aoi]])
self.pr.append([pr_ray[i][ht], n*pr_ray[i][slp], n*pr_ray[i][aoi]])
def init_from_nodes(self, nodes, rndx_and_imode=None):
"""Construct a diagram using `nodes`, a list of diagram vertices. """
self.ax = []
self.pr = []
self.sys = []
if rndx_and_imode is None:
rndx_and_imode = [(1., 'transmit')] * len(nodes)
for vertex, ni in zip(nodes, rndx_and_imode):
self.ax.append([vertex[1], 0.0, 0.0])
self.pr.append([vertex[0], 0.0, 0.0])
self.sys.append([0.0, 0.0, *ni])
self.sys[0][rmd] = 'dummy'
self.sys[-1][rmd] = 'dummy'
self.nodes_to_parax(nodes)
return self
def parax_to_nodes(self, type_sel=mc.ht):
""" render the paraxial model into a node list """
nodes = [[x[type_sel], y[type_sel]] for x, y in zip(self.pr, self.ax)]
nodes = np.array(nodes)
return nodes
def nodes_to_parax(self, nodes, type_sel=mc.ht):
""" Update the parax model from the node list, `nodes`. """
if type_sel == mc.ht:
for i, node in enumerate(nodes):
self.apply_ht_dgm_data(i, node)
elif type_sel == mc.slp:
for i, node in enumerate(nodes):
self.apply_slope_dgm_data(i, node)
return self
def get_pt(self, idx):
return self.pr[idx][mc.ht], self.ax[idx][mc.ht]
def set_pt(self, idx, pt):
self.pr[idx][mc.ht] = pt[0]
self.ax[idx][mc.ht] = pt[1]
def get_gap_for_node(self, node):
if self.ifcs_mapping is None:
# just return the node in the seq_model
gap_idx = node
else: # use the node_defs to map to the seq_model
node_defs, map_to_ifcs = self.ifcs_mapping
kernel = node_defs[node]
if len(kernel) == 1:
gap_idx = kernel[0]
elif len(kernel) == 2:
prev_gap_idx, after_gap_idx = kernel
gap_idx = prev_gap_idx
elif len(kernel) == 3:
idx, prev_gap_idx, after_gap_idx = kernel
gap_idx = idx if idx != after_gap_idx else after_gap_idx
return self.seq_model.gaps[gap_idx], self.seq_model.z_dir[gap_idx]
# --- add/delete points from diagram
def add_node(self, node, new_vertex, type_sel, interact_mode):
""" Add a node in the paraxial data structures """
ns = self.seq_model.get_num_surfaces()
if node >= ns - 1:
node = ns - 2
n = self.sys[node][indx]
new_node = node + 1
self.sys.insert(new_node, [0.0, 0.0, n, interact_mode])
ax_node = [0.0, 0.0, 0.0]
ax_node[type_sel] = new_vertex[1]
self.ax.insert(new_node, ax_node)
pr_node = [0.0, 0.0, 0.0]
pr_node[type_sel] = new_vertex[0]
self.pr.insert(new_node, pr_node)
if type_sel == ht:
self.apply_ht_dgm_data(new_node, new_vertex=new_vertex)
elif type_sel == slp:
self.apply_slope_dgm_data(new_node, new_vertex=new_vertex)
if interact_mode == 'reflect':
for i in range(new_node, len(self.sys)):
self.sys[i][indx] = -self.sys[i][indx]
return new_node
def assign_object_to_node(self, node, factory, **inputs):
""" create a new element from `factory` and replace `node` with it """
# extract optical properties of node
n = self.sys[node][indx]
power = self.sys[node][pwr]
thi = n*self.sys[node][tau]
sd = abs(self.ax[node][ht]) + abs(self.pr[node][ht])
# create an element with the node's properties
seq, ele, e_node = descriptor = factory(power=power, sd=sd)
n_before = self.sys[node-1][indx]
thi_before = n_before*self.sys[node-1][tau]
self.seq_model.gaps[node-1].thi = thi_before
# insert the path sequence and elements into the
# sequential and element models
kwargs = {'idx': node-1, 't': thi, **inputs}
self.opt_model.insert_ifc_gp_ele(*descriptor, **kwargs)
path_stop = node + len(seq)
inserted_seq = list(self.seq_model.path(start=node-1, stop=path_stop))
sys_seq = self.seq_path_to_paraxial_lens(inserted_seq[1:])
pp_info = self.compute_principle_points(inserted_seq)
self.replace_node_with_seq(node, sys_seq, pp_info)
self.compute_signed_rindx()
return descriptor, kwargs
def compute_signed_rindx(self):
"""Reset the state of the refractive index array.
This method resets the signs of the refractive indices so that they are
negative following an odd number of reflections, but positive otherwise.
"""
flip = 1
for ss in self.sys:
ss[indx] = abs(ss[indx])
if ss[rmd] == 'reflect':
flip = -flip
if flip < 0:
ss[indx] = -ss[indx]
def replace_node_with_seq(self, node, sys_seq, pp_info):
""" replaces the data at node with sys_seq """
sys = self.sys
ax = self.ax
pr = self.pr
if len(sys_seq) == 1:
sys[node] = sys_seq[0]
else:
opt_inv = self.opt_inv
efl, pp1, ppk, ffl, bfl = pp_info[2]
sys[node-1][tau] -= pp1/sys[node-1][indx]
# sys_seq[-1][tau] = sys[node][tau] - ppk/sys_seq[-1][indx]
p0 = [ax[node][ht], pr[node][ht]]
pn = [ax[node+1][ht], pr[node+1][ht]]
slp0 = [ax[node][slp], pr[node][slp]]
for n, ss in enumerate(sys_seq[:-1], start=node):
sys.insert(n, ss)
ax_ht = ax[n-1][ht] + sys[n-1][tau]*ax[n-1][slp]
ax_slp = ax[n-1][slp] - ax_ht*sys[n][pwr]
ax.insert(n, [ax_ht, ax_slp, 0.0])
pr_ht = pr[n-1][ht] + sys[n-1][tau]*pr[n-1][slp]
pr_slp = pr[n-1][slp] - pr_ht*sys[n][pwr]
pr.insert(n, [pr_ht, pr_slp, 0.0])
# replace the original node data
ax[n+1][slp] = slp0[0]
pr[n+1][slp] = slp0[1]
sys[n+1][pwr] = (ax[n][slp]*pr[n+1][slp] -
ax[n+1][slp]*pr[n][slp])/opt_inv
# sys_seq[-1][pwr]
p1 = [ax[n][ht], pr[n][ht]]
p2 = [ax[n][ht]+ax[n][slp], pr[n][ht]+pr[n][slp]]
p2int = np.array(get_intersect(p1, p2, p0, pn))
ax[n+1][ht] = p2int[0]
pr[n+1][ht] = p2int[1]
sys[n][tau] = (
(ax[n][ht]*pr[n+1][ht] - ax[n+1][ht]*pr[n][ht])/opt_inv)
sys[n+1][tau] = (p2int[0]*pn[1] - pn[0]*p2int[1])/opt_inv
def get_object_for_node(self, node):
''' basic 1:1 relationship between seq and parax model sequences '''
ifc = self.seq_model.ifcs[node]
e_node = self.opt_model.part_tree.parent_node(ifc)
args = [[ifc, None, None, 1, 1]], [e_node.id], e_node
kwargs = {'idx': node}
return args, kwargs
def delete_node(self, surf):
""" delete the node at position surf """
del self.sys[surf]
del self.ax[surf]
del self.pr[surf]
# --- edit diagram points
def apply_ht_dgm_data(self, surf, new_vertex=None):
""" This routine calculates all data dependent on the input
height coordinates (y,ybar) at surface surf.
"""
sys = self.sys
ax_ray = self.ax
pr_ray = self.pr
opt_inv = self.opt_inv
if new_vertex is not None:
pr_ray[surf][ht] = new_vertex[0]
ax_ray[surf][ht] = new_vertex[1]
nsm1 = len(sys) - 1
if surf == 0:
surf += 1
p = surf - 1
c = surf
sys[p][tau] = ((ax_ray[p][ht]*pr_ray[c][ht] -
ax_ray[c][ht]*pr_ray[p][ht]) / opt_inv)
ax_ray[p][slp] = (ax_ray[c][ht] - ax_ray[p][ht])/sys[p][tau]
pr_ray[p][slp] = (pr_ray[c][ht] - pr_ray[p][ht])/sys[p][tau]
if (surf > 1):
p2 = surf - 2
sys[p][pwr] = ((ax_ray[p2][slp]*pr_ray[p][slp] -
ax_ray[p][slp]*pr_ray[p2][slp])
/ opt_inv)
if (surf < nsm1):
s = surf + 1
sys[c][tau] = (ax_ray[c][ht]*pr_ray[s][ht] -
ax_ray[s][ht]*pr_ray[c][ht])/opt_inv
ax_ray[c][slp] = (ax_ray[s][ht] - ax_ray[c][ht])/sys[c][tau]
pr_ray[c][slp] = (pr_ray[s][ht] - pr_ray[c][ht])/sys[c][tau]
sys[c][pwr] = (ax_ray[p][slp]*pr_ray[c][slp] -
ax_ray[c][slp]*pr_ray[p][slp])/opt_inv
if s < nsm1:
sys[s][pwr] = (ax_ray[c][slp]*pr_ray[s][slp] -
ax_ray[s][slp]*pr_ray[c][slp])/opt_inv
else:
ax_ray[c][slp] = ax_ray[p][slp]
pr_ray[c][slp] = pr_ray[p][slp]
sys[c][pwr] = 0
sys[c][tau] = 0
def apply_slope_dgm_data(self, surf, new_vertex=None):
""" This routine calculates all data dependent on the input
slope coordinates (nu,nubar) at surface surf.
"""
sys = self.sys
ax_ray = self.ax
pr_ray = self.pr
opt_inv = self.opt_inv
if new_vertex is not None:
pr_ray[surf][slp] = new_vertex[0]
ax_ray[surf][slp] = new_vertex[1]
nsm1 = len(sys) - 1
if nsm1 == 0:
p = 0
c = 1
ax_ray[c][ht] = ax_ray[p][slp]*sys[p][tau] + ax_ray[p][ht]
pr_ray[c][ht] = pr_ray[p][slp]*sys[p][tau] + pr_ray[p][ht]
else:
if (surf == 0):
surf += 1
p = surf - 1
c = surf
sys[c][pwr] = (ax_ray[p][slp]*pr_ray[c][slp] -
ax_ray[c][slp]*pr_ray[p][slp])/opt_inv
ax_ray[c][ht] = (ax_ray[p][slp] - ax_ray[c][slp])/sys[c][pwr]
pr_ray[c][ht] = (pr_ray[p][slp] - pr_ray[c][slp])/sys[c][pwr]
sys[p][tau] = (ax_ray[p][ht]*pr_ray[c][ht] -
ax_ray[c][ht]*pr_ray[p][ht])/opt_inv
if (surf < nsm1):
s = surf + 1
ax_ray[s][ht] = ax_ray[c][slp]*sys[c][tau] + ax_ray[c][ht]
pr_ray[s][ht] = pr_ray[c][slp]*sys[c][tau] + pr_ray[c][ht]
def update_composite_node(self, node, new_vertex=None):
# print(f'update: {node}, {new_vertex}')
self.apply_ht_dgm_data(node, new_vertex)
if self.ifcs_mapping is not None:
node_defs, map_to_ifcs = self.ifcs_mapping
nodes = self.parax_to_nodes()
nodes_ifcs = calc_ifcs_nodes(map_to_ifcs, nodes)
# print(f'nodes_ifcs {nodes_ifcs}')
self.opt_model['parax_model'].nodes_to_parax(nodes_ifcs)
def update_rindex(self, surf):
"""Update the refractive index using the `gap` at *surf*."""
gap = self.seq_model.gaps[surf]
wvl = self.seq_model.central_wavelength()
self.sys[surf][indx] = gap.medium.rindex(wvl)
# ParaxTrace() - This routine performs a paraxial raytrace from object
# (surface 0) to image. The last operation is a
# transfer to the image surface.
def paraxial_trace(self):
""" regenerate paraxial axial and chief rays from power and reduced
distance
"""
sys = self.sys
ax_ray = self.ax
pr_ray | |
<reponame>insignification/python-goto
import dis
import struct
import array
import types
import functools
import weakref
import warnings
try:
_array_to_bytes = array.array.tobytes
except AttributeError:
_array_to_bytes = array.array.tostring
try:
_range = xrange
except NameError:
_range = range
class _Bytecode:
def __init__(self):
code = (lambda: x if x else y).__code__.co_code
opcode, oparg = struct.unpack_from('BB', code, 2)
# Starting with Python 3.6, the bytecode format has been changed to use
# 16-bit words (8-bit opcode + 8-bit argument) for each instruction,
# as opposed to previously 24-bit (8-bit opcode + 16-bit argument) for
# instructions that expect an argument or just 8-bit for those that don't.
# https://bugs.python.org/issue26647
if dis.opname[opcode] == 'POP_JUMP_IF_FALSE':
self.argument = struct.Struct('B')
self.have_argument = 0
# As of Python 3.6, jump targets are still addressed by their byte
# unit. This, however, is matter to change, so that jump targets,
# in the future, will refer to the code unit (address in bytes / 2).
# https://bugs.python.org/issue26647
self.jump_unit = 8 // oparg
else:
self.argument = struct.Struct('<H')
self.have_argument = dis.HAVE_ARGUMENT
self.jump_unit = 1
self.has_loop_blocks = 'SETUP_LOOP' in dis.opmap
self.has_pop_except = 'POP_EXCEPT' in dis.opmap
self.has_setup_with = 'SETUP_WITH' in dis.opmap
self.has_setup_except = 'SETUP_EXCEPT' in dis.opmap
self.has_begin_finally = 'BEGIN_FINALLY' in dis.opmap
try:
import __pypy__
self.pypy_finally_semantics = True
except:
self.pypy_finally_semantics = False
@property
def argument_bits(self):
return self.argument.size * 8
_BYTECODE = _Bytecode()
# use a weak dictionary in case code objects can be garbage-collected
_patched_code_cache = weakref.WeakKeyDictionary()
try:
_patched_code_cache[_Bytecode.__init__.__code__] = None
except TypeError:
_patched_code_cache = {} # ...unless not supported
def _make_code(code, codestring, data):
try:
# code.replace is new in 3.8+
return code.replace(co_code=codestring,
co_nlocals=data.nlocals,
co_varnames=data.varnames,
co_consts=data.consts,
co_names=data.names)
except:
args = [
code.co_argcount, data.nlocals, code.co_stacksize,
code.co_flags, codestring, data.consts,
data.names, data.varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars
]
try:
args.insert(1, code.co_kwonlyargcount) # PY3
except AttributeError:
pass
return types.CodeType(*args)
def _parse_instructions(code, yield_nones_at_end=0):
extended_arg = 0
extended_arg_offset = None
pos = 0
while pos < len(code):
offset = pos
if extended_arg_offset is not None:
offset = extended_arg_offset
opcode = struct.unpack_from('B', code, pos)[0]
pos += 1
oparg = None
if opcode >= _BYTECODE.have_argument:
oparg = extended_arg | _BYTECODE.argument.unpack_from(code, pos)[0]
pos += _BYTECODE.argument.size
if opcode == dis.EXTENDED_ARG:
extended_arg = oparg << _BYTECODE.argument_bits
extended_arg_offset = offset
continue
extended_arg = 0
extended_arg_offset = None
yield (dis.opname[opcode], oparg, offset)
for _ in _range(yield_nones_at_end):
yield (None, None, None)
def _get_instruction_size(opname, oparg=0):
size = 1
extended_arg = oparg >> _BYTECODE.argument_bits
if extended_arg != 0:
size += _get_instruction_size('EXTENDED_ARG', extended_arg)
oparg &= (1 << _BYTECODE.argument_bits) - 1
opcode = dis.opmap[opname]
if opcode >= _BYTECODE.have_argument:
size += _BYTECODE.argument.size
return size
def _get_instructions_size(ops):
size = 0
for op in ops:
if isinstance(op, str):
size += _get_instruction_size(op)
else:
size += _get_instruction_size(*op)
return size
def _write_instruction(buf, pos, opname, oparg=0):
extended_arg = oparg >> _BYTECODE.argument_bits
if extended_arg != 0:
pos = _write_instruction(buf, pos, 'EXTENDED_ARG', extended_arg)
oparg &= (1 << _BYTECODE.argument_bits) - 1
opcode = dis.opmap[opname]
buf[pos] = opcode
pos += 1
if opcode >= _BYTECODE.have_argument:
_BYTECODE.argument.pack_into(buf, pos, oparg)
pos += _BYTECODE.argument.size
return pos
def _write_instructions(buf, pos, ops):
for op in ops:
if isinstance(op, str):
pos = _write_instruction(buf, pos, op)
else:
pos = _write_instruction(buf, pos, *op)
return pos
def _warn_bug(msg):
warnings.warn("Internal error detected" +
" - result of with_goto may be incorrect. (%s)" % msg)
def _find_labels_and_gotos(code):
labels = {}
gotos = []
block_stack = []
block_counter = 0
last_block = None
opname1 = oparg1 = offset1 = None
opname2 = oparg2 = offset2 = None
opname3 = oparg3 = offset3 = None
def replace_block_in_stack(stack, old_block, new_block):
for i, block in enumerate(stack):
if block == old_block:
stack[i] = new_block
def replace_block(old_block, new_block):
replace_block_in_stack(block_stack, old_block, new_block)
for label in labels:
replace_block_in_stack(labels[label][2], old_block, new_block)
for goto in gotos:
replace_block_in_stack(goto[3], old_block, new_block)
def push_block(opname, target_offset=None):
new_counter = block_counter + 1
block_stack.append((opname, target_offset, new_counter))
return new_counter # to be assigned to block_counter
def pop_block():
if block_stack:
return block_stack.pop()
else:
_warn_bug("can't pop block")
def pop_block_of_type(type):
if block_stack and block_stack[-1][0] != type:
# in 3.8, only finally blocks are supported, so we must determine
# except/finally ourselves, and replace the block's type
if not _BYTECODE.has_setup_except and \
type == "<EXCEPT>" and \
block_stack[-1][0] == '<FINALLY>':
replace_block(block_stack[-1], (type,) + block_stack[-1][1:])
else:
_warn_bug("mismatched block type")
return pop_block()
jump_targets = set(dis.findlabels(code.co_code))
dead = False
for opname4, oparg4, offset4 in _parse_instructions(code.co_code, 3):
endoffset1 = offset2
if offset1 in jump_targets:
dead = False
# check for block exits
while block_stack and offset1 == block_stack[-1][1]:
exitname, _, _ = last_block = pop_block()
if exitname == 'SETUP_EXCEPT' and _BYTECODE.has_pop_except:
block_counter = push_block('<EXCEPT>')
elif exitname == 'SETUP_FINALLY':
block_counter = push_block('<FINALLY>')
# check for special opcodes
if opname1 in ('LOAD_GLOBAL', 'LOAD_NAME'):
if opname2 == 'LOAD_ATTR' and opname3 == 'POP_TOP':
name = code.co_names[oparg1]
if name == 'label':
if oparg2 in labels:
co_name = code.co_names[oparg2]
raise SyntaxError('Ambiguous label {0!r}'.format(co_name))
labels[oparg2] = (offset1,
offset4,
list(block_stack))
elif name == 'goto':
gotos.append((offset1,
offset4,
oparg2,
list(block_stack),
0))
elif opname2 == 'LOAD_ATTR' and opname3 == 'STORE_ATTR':
if code.co_names[oparg1] == 'goto' and \
code.co_names[oparg2] in ('param', 'params'):
gotos.append((offset1,
offset4,
oparg3,
list(block_stack),
code.co_names[oparg2]))
elif opname1 in ('SETUP_LOOP', 'FOR_ITER',
'SETUP_EXCEPT', 'SETUP_FINALLY',
'SETUP_WITH', 'SETUP_ASYNC_WITH'):
block_counter = push_block(opname1, endoffset1 + oparg1)
elif opname1 == 'POP_EXCEPT':
last_block = pop_block_of_type('<EXCEPT>')
elif opname1 == 'END_FINALLY' and not dead:
# (python compilers put dead END_FINALLY's in weird places)
last_block = pop_block_of_type('<FINALLY>')
elif opname1 in ('WITH_CLEANUP', 'WITH_CLEANUP_START'):
if _BYTECODE.has_setup_with:
# temporary block to match END_FINALLY
block_counter = push_block('<FINALLY>')
else:
# python 2.6 - finally was actually with
replace_block(last_block, ('SETUP_WITH',) + last_block[1:])
if opname1 in ('JUMP_ABSOLUTE', 'JUMP_FORWARD'):
dead = True
opname1, oparg1, offset1 = opname2, oparg2, offset2
opname2, oparg2, offset2 = opname3, oparg3, offset3
opname3, oparg3, offset3 = opname4, oparg4, offset4
if block_stack:
_warn_bug("block stack not empty")
return labels, gotos
def _inject_nop_sled(buf, pos, end):
while pos < end:
pos = _write_instruction(buf, pos, 'NOP')
def _inject_ops(buf, pos, end, ops):
size = _get_instructions_size(ops)
if pos + size > end:
# not enough space, add code at buffer end and jump there
buf_end = len(buf)
go_to_end_ops = [('JUMP_ABSOLUTE', buf_end // _BYTECODE.jump_unit)]
if pos + _get_instructions_size(go_to_end_ops) > end:
# not sure if reachable
raise SyntaxError('Goto in an incredibly huge function')
pos = _write_instructions(buf, pos, go_to_end_ops)
_inject_nop_sled(buf, pos, end)
buf.extend([0] * size)
_write_instructions(buf, buf_end, ops)
else:
pos = _write_instructions(buf, pos, ops)
_inject_nop_sled(buf, pos, end)
class _CodeData:
def __init__(self, code):
self.nlocals = code.co_nlocals
self.varnames = code.co_varnames
self.consts = code.co_consts
self.names = code.co_names
def get_const(self, value):
try:
i = self.consts.index(value)
except ValueError:
i = len(self.consts)
self.consts += (value,)
return i
def get_name(self, value):
try:
i = self.names.index(value)
except ValueError:
i = len(self.names)
self.names += (value,)
return i
def add_var(self, name):
idx = len(self.varnames)
self.varnames += (name,)
self.nlocals += 1
return idx
def _patch_code(code):
new_code = _patched_code_cache.get(code)
if new_code is not None:
return new_code
labels, gotos = _find_labels_and_gotos(code)
buf = array.array('B', code.co_code)
temp_var = None
data = _CodeData(code)
for pos, end, _ in labels.values():
_inject_nop_sled(buf, pos, end)
for pos, end, label, origin_stack, params in gotos:
try:
_, target, target_stack = labels[label]
except KeyError:
raise SyntaxError('Unknown label {0!r}'.format(code.co_names[label]))
ops = []
# prepare
common_depth = min(len(origin_stack), len(target_stack))
for i in _range(common_depth):
if origin_stack[i] != target_stack[i]:
common_depth = i
break
if params:
if temp_var is None:
temp_var = data.add_var('goto.temp')
# must do this before any blocks are pushed/popped
ops.append(('STORE_FAST', temp_var))
many_params = (params != 'param')
# pop blocks
for block, _, _ in reversed(origin_stack[common_depth:]):
if block == 'FOR_ITER':
if not _BYTECODE.has_loop_blocks:
ops.append('POP_TOP')
elif block == '<EXCEPT>':
ops.append('POP_EXCEPT')
elif block == '<FINALLY>':
ops.append('END_FINALLY')
else:
ops.append('POP_BLOCK')
if block in ('SETUP_WITH', 'SETUP_ASYNC_WITH'):
ops.append('POP_TOP')
# pypy 3.6 keeps a block around until END_FINALLY;
# python 3.8 reuses SETUP_FINALLY for SETUP_EXCEPT
# (where END_FINALLY is not accepted).
# What will pypy 3.8 do?
if _BYTECODE.pypy_finally_semantics and \
block in ('SETUP_FINALLY', 'SETUP_WITH',
'SETUP_ASYNC_WITH'):
if _BYTECODE.has_begin_finally:
ops.append('BEGIN_FINALLY')
else:
ops.append(('LOAD_CONST', data.get_const(None)))
ops.append('END_FINALLY')
# push blocks
def setup_block_absolute(block, block_end):
# there's no SETUP_*_ABSOLUTE, so we setup forward to an JUMP_ABSOLUTE
jump_abs_op = ('JUMP_ABSOLUTE', block_end)
skip_jump_op = ('JUMP_FORWARD', _get_instruction_size(*jump_abs_op))
setup_block_op = (block, _get_instruction_size(*skip_jump_op))
ops.extend((setup_block_op, skip_jump_op, jump_abs_op))
tuple_i = 0
for block, block_target, _ in target_stack[common_depth:]:
if block in ('FOR_ITER', 'SETUP_WITH', 'SETUP_ASYNC_WITH'):
if not params:
raise SyntaxError(
'Jump into block without the necessary params')
ops.append(('LOAD_FAST', temp_var))
if many_params:
ops.append(('LOAD_CONST', | |
"freturn"
raise Return(self.stack.pop())
def opcode_0xaf(self):
"dreturn"
raise Return(self.stack.pop())
def opcode_0xb0(self):
"areturn"
raise Return(self.stack.pop())
def opcode_0xb1(self):
"return"
raise Return(None)
def opcode_0xb2(self):
"getstatic"
index = self.nextword()
fieldref = self.const[index]
classref = self.const[fieldref.class_index]
cls = self.loader.getclass(self.const[classref.name_index])
nametyperef = self.const[fieldref.name_and_type_index]
name = self.const[nametyperef.name_index]
descr = descriptor(self.const[nametyperef.descriptor_index])
# TODO: lookup in supercls
#print name,cls.__name__
#print "decr:",descr
if isinstance(cls, JClass):
# supercls lookup
# while not cls.__name__ == "java/lang/Object":
while not cls == None:
if cls.static_fields.has_key(unicode(name), descr):
# get it and stop lookup at the first match
value = cls.static_fields.get(unicode(name),descr)
break
cls = cls.supercls
else: # for javaclasses.py
value = getattr(cls, name) # XXX not Rpython
#print "value:",value
self.stack.push(value)
def opcode_0xb3(self):
"putstatic"
index = self.nextword()
fieldref = self.const[index]
classref = self.const[fieldref.class_index]
cls = self.loader.getclass(self.const[classref.name_index])
nametyperef = self.const[fieldref.name_and_type_index]
name = self.const[nametyperef.name_index]
descr = descriptor(self.const[nametyperef.descriptor_index])
value = self.stack.pop()
#print name, cls.__name__
if isinstance(cls, JClass):
# supercls lookup
#while not cls.__name__ == "java/lang/Object":
while not cls == None:
if cls.static_fields.has_key(unicode(name), descr):
# set it and stop lookup at the first match
cls.static_fields.set(unicode(name), value, descr)
break
cls = cls.supercls
else: # javaclasses.JavaIoPrintStream
assert isinstance(cls, javaclasses.JavaIoPrintStream)
setattr(cls,name,value) # XXX not Rpython
def opcode_0xb4(self):
"getfield"
index = self.nextword()
fieldref = self.const[index]
nametyperef = self.const[fieldref.name_and_type_index]
name = self.const[nametyperef.name_index]
descr = descriptor(self.const[nametyperef.descriptor_index])
objectref = self.stack.pop()
if objectref==None:
throw_NullPointerException(self.loader)
#print
#print "objref:", objectref
#print objectref.jcls.__name__,":"
#print objectref.fields.print_map()
#print "keyname:",name
#print "super:",objectref.jcls.supercls.__name__
value = objectref.fields.get(unicode(name), descr)
#print "value:",value
self.stack.push(value)
def opcode_0xb5(self):
"putfield"
index = self.nextword()
fieldref = self.const[index]
nametyperef = self.const[fieldref.name_and_type_index]
name = self.const[nametyperef.name_index]
descr = descriptor(self.const[nametyperef.descriptor_index])
value = self.stack.pop()
objectref = self.stack.pop()
#print
#print objectref.jcls.__name__,":"
objectref.fields.set(unicode(name), value, descr)
def prepare_invoke(self):
index = self.nextword()
methodref = self.const[index]
nametyperef = self.const[methodref.name_and_type_index]
name = self.const[nametyperef.name_index]
type = self.const[nametyperef.descriptor_index]
descr = classloader.descriptor(type)
classref = self.const[methodref.class_index]
cls = self.loader.getclass(self.const[classref.name_index])
argcount = len(descr) - 1
args = Stack()
for i in range(argcount):
try:
args.push(self.DESCR_CAST[descr[argcount-i-1]](self.stack.pop()))
except KeyError:
args.push(self.stack.pop()) # no number (or chr)
real_name = classloader.encode_name(name, descr)
return cls, methodref, name, args, descr[-1], real_name
def push_result(self, rettype, result):
if rettype is not None:
if rettype=="long":
self.stack.push(r_longlong(result))
elif rettype=="float":
self.stack.push(r_singlefloat(result))
else:
self.stack.push(result)
def opcode_0xb6(self):
"invokevirtual"
cls, methodref, name, args, rettype, real_name = self.prepare_invoke()
objectref = self.stack.pop()
result = self.invokevirtual(cls, objectref, name, real_name, args)
self.push_result(rettype, result)
def opcode_0xb7(self):
"invokespecial"
cls, methodref, name, args, rettype, real_name = self.prepare_invoke()
objectref = self.stack.pop()
result = self.invokespecial(cls, objectref, name, real_name, args)
self.push_result(rettype, result)
def opcode_0xb8(self):
"invokestatic"
cls, methodref, name, args, rettype, real_name = self.prepare_invoke()
result = self.invokestatic(cls, name, real_name, args)
self.push_result(rettype, result)
def opcode_0xb9(self):
"invokeinterface"
cls, methodref, name, args, rettype, real_name = self.prepare_invoke()
count = self.nextbyte()
zero = self.nextbyte()
assert count != 0 # not used (historical)
assert zero == 0
objectref = self.stack.pop()
result = self.invokeinterface(cls, objectref, name, real_name, args)
self.push_result(rettype, result)
def opcode_0xbb(self):
"new"
index = self.nextword()
classref = self.const[index]
jcls = self.loader.getclass(self.const[classref.name_index])
# TODO: no special case for non-JClasses
if isinstance(jcls, JClass):
self.stack.push(Objectref(jcls, True))
else:
self.stack.push(self.instantiate(jcls))
def newarray(self, type_char, defaultitem, count, *counts):
if len(counts) == 0:#basecase of recursion
if isinstance(defaultitem, JClass):
result = []
# no init, ref is null
for i in range(count):
result.append(Objectref(defaultitem, False))
else:
result = [defaultitem] * count
else:
result = []
for i in range(count):
result.append(self.newarray(type_char, defaultitem, *counts))
# returning form recursion...
if isinstance(defaultitem, JClass):
array_cls_name = "["*(len(counts)+1)+"L"+defaultitem.__name__+";"
return Arrayref(result,defaultitem, self.loader.getclass(array_cls_name))
else:
t = type_char
assert t=='Z' or t=='S' or t=='B' or t=='I' or t=='J' or t=='F' or t=='D' or t=='C'
array_cls_name = "["*(len(counts)+1)+type_char
#if len(counts) == 0:
#return Arrayref(result,defaultitem, self.loader.getPrimArrayClass(type_char))
#else:
return Arrayref(result,defaultitem, self.loader.getclass(array_cls_name))
def opcode_0xbc(self):
"newarray"
typecode = self.nextbyte()
count = self.stack.pop()
self.stack.push(self.newarray(CHAR_BY_TYPECODE[typecode], DEFAULT_BY_TYPECODE[typecode], count))
def opcode_0xbd(self):
"anewarray"
typeindex = self.nextword()
count = self.stack.pop()
typename = self.const[self.const[typeindex].name_index]
# part of a multidim. array
if typename.startswith('['):
index = 0
for c in typename:
if c != '[':
break
index = index +1
self.stack.push(self.newarray(typename[index:], typename, count))
# single dim. of ref. array
else:
jcls = self.loader.getclass(typename)
#print "anewarray:",cls.__name__
self.stack.push(self.newarray(None, jcls, count))
def opcode_0xbe(self):
"arraylength"
array = self.stack.pop()
if array==None:
throw_NullPointerException(self.loader)
self.stack.push(len(array.arrayref))
# TODO: implementation not finished
def opcode_0xbf(self):
"athrow"
objectref = self.stack.pop()
#print "athrow:",objectref.jcls.__name__
# TODO: assert objref isinstance throwable
raise JException(objectref)
# TODO: if synchronized do "something" :)
def handle_exception(self, objectref):
#print "handle exception:", objectref.jcls.__name__
#clsnind = self.const[self.cls.this_class].name_index
#print "inside methodname:",self.const[self.method.name_index]
#print "inside class:",self.const[clsnind]
if objectref==None: # somebody has thrown null
#special case:
#raising here again would complicated the interp. loop
jcls = self.loader.getclass("java/lang/NullPointerException")
string = make_String("null", self.loader)
objectref = Objectref(jcls, True)
objectref.fields.set("detailMessage", string, "ref")
assert self.co.exception_table_length>=0
# search for exceptionhandles (catch blocks)
for i in range(self.co.exception_table_length):
exception = self.co.exception_table[i]
if exception.catch_type==0:
# JVMS Page 123 and 7.13 (finally)
# or end of synchronized block
if self.next_instr-1>=exception.start_pc and self.next_instr-1<exception.end_pc:
#handler found
self.next_instr = exception.handler_pc
self.stack.push(objectref)
return
else:
cls_info = self.const[exception.catch_type]
type_name = self.const[cls_info.name_index]
if objectref.jcls.__name__ == type_name and self.next_instr-1>=exception.start_pc and self.next_instr-1<exception.end_pc:
# handler found
self.next_instr = exception.handler_pc
self.stack.push(objectref)
return
# check if this is a RunntimeException
# This exceptions can (maybe) not found in the method-sig
tempcls = objectref.jcls
while not tempcls.__name__ == "java/lang/Throwable":
if tempcls.__name__ == "java/lang/RuntimeException":
# no exceptionhandlers in this class
# the next frame must handle that
raise JException(objectref)
tempcls = tempcls.supercls
# no exceptionshandlers (catch blocks) in this method
# find the exceptions which are thrown by this method
attr = self.cls.getattr(self.method, 'Exceptions', classfile.Exceptions_attribute)
for i in range(attr.number_of_exceptions):
excep_index = attr.exceptions_index_table[i]
cls_info = self.const[excep_index]
excep_name = self.const[cls_info.name_index]
# exception thrown by this method
# lookup of superclasses
tempcls = objectref.jcls
# TODO: throw new Thowable();
while not tempcls.__name__ == "java/lang/Throwable":
if tempcls.__name__ == excep_name:
# no exceptionhandlers in this class
# the next frame must handle that
raise JException(objectref)
tempcls = tempcls.supercls
raise Exception("Exception Handling Error-no catch and no throws!")
def opcode_0xc0(self):
"checkcast"
index = self.nextword()
classref = self.const[index]
# XXX missing: array or interface
# FIXME: is using astack directly
cls = self.loader.getclass(self.const[classref.name_index])
objectref = self.stack.astack[-1]
self.checkcast(objectref, cls)
# TODO: arraytpye
def checkcast(self, objectref, cls):
if objectref is None:
return
elif objectref.jcls == cls:
return
# check interfaces
for iface_num in objectref.jcls.cls.interfaces:
cls_info = objectref.jcls.cls.constant_pool[iface_num]
if_name= objectref.jcls.cls.constant_pool[cls_info.name_index]
if if_name == cls.__name__:
return
# Lookup of superclasses
if not objectref.jcls.__name__ == "java/lang/Object":
obj2 = Objectref(objectref.jcls.supercls) #FIXME when sp. cases are done
return self.checkcast(obj2, cls)
clsname1 = objectref.jcls.__name__.replace("/",".")
clsname2 = cls.__name__.replace("/",".")
throw_ClassCastException(self.loader, clsname1, clsname2)
def opcode_0xc1(self):
"instanceof"
index = self.nextword()
classref = self.const[index]
# XXX missing: array or interface
jcls = self.loader.getclass(self.const[classref.name_index])
objectref = self.stack.pop()
self.stack.push(self.instanceof(objectref, jcls))
def opcode_0xc2(self):
"monitorenter"
from java_threading import monitorenter
objectref = self.stack.pop()
monitorenter(self.loader, objectref)
def opcode_0xc3(self):
"monitorexit"
from java_threading import monitorexit
objectref = self.stack.pop()
monitorexit(objectref)
# TODO: arraytype #maybe done?
def instanceof(self, objectref, cls):
if objectref == None:
return False # null is never ref. of any class
if isinstance(objectref, Arrayref):
assert isinstance(objectref.jcls, JArrayClass)
assert isinstance(cls, JClass)
#print "cls:",cls.__name__
#print "obj:",objectref.jcls.__name__
if (cls.__name__ == objectref.jcls.__name__ or
cls.__name__ == "java/lang/Object"): # all arrays are Objects
return True
return False
if isinstance(objectref, Objectref) and objectref.jcls==cls:
return True
for iface_num in objectref.jcls.cls.interfaces:
cls_info = objectref.jcls.cls.constant_pool[iface_num]
if_name= objectref.jcls.cls.constant_pool[cls_info.name_index]
if if_name == cls.__name__:
return True
# Lookup of superclasses
if not objectref.jcls.__name__ == "java/lang/Object":
obj2 = Objectref(objectref.jcls.supercls) #FIXME when sp. cases are done
return self.instanceof(obj2, cls)
# XXX a class form javaclasses
# TODO: remove this when hooks are implemented
return objectref==cls
def opcode_0xc4(self):
"wide"
opcode = self.nextbyte()
index = self.nextword()
if opcode == 21: #iload
self.stack.push(self.locals.get(index, "int"))
elif opcode == 22: #lload
self.stack.push(self.locals.get(index, "long"))
elif opcode == 23: #fload
self.stack.push(self.locals.get(index, "float"))
elif opcode == 24: #dload
self.stack.push(self.locals.get(index, "double"))
elif opcode == 25: #aload
self.stack.push(self.locals.get(index, "ref"))
elif opcode == 54: #istore
self.locals.set(index, self.stack.pop(), "int")
elif opcode == 55: #lstore
self.locals.set(index, self.stack.pop(), "long")
elif opcode == 56: #fstore
self.locals.set(index, self.stack.pop(), "float")
elif opcode == 57: #dstore
self.locals.set(index, self.stack.pop(), "double")
elif opcode == 58: #astore
self.locals.set(index, self.stack.pop(), "ref")
elif opcode == 129: #ret
# offset = self.locals.get(index, "int")
# self.next_instr = self.next_instr + offset -1
raise NotImplementedError("ret")
else: #iinc
assert opcode== 132
const = self.nextword()
value = self.locals.get(index, "int") + const
self.locals.set(index, value, | |
<reponame>nbonaker/ticker
import sys, copy, os
sys.path.append("../../")
sys.path.append("../")
from utils import Utils, PhraseUtils
from min_edit_distance import MinEditDistance
from click_distr import ClickDistribution
from channel_config import ChannelConfig
class TickerAudioResults(object):
def __init__(self):
self.root_dir = "../../../user_trials/audio_experiment/ticker/"
self.file_out = "./results/graphs/results_ticker.cPickle"
self.phrase_file = "phrases.txt"
self.users = [3] #[3]
self.sessions = [2,3,4,5]
self.sub_sessions = [1,2,3,4,5,6]
self.sub_session_ids = {1:[1,2,3,4,5,6],2:[2,3,4],3:[2,3,4],4:[2,3,4],5:[1,2]}
self.dist_measure = MinEditDistance()
self.word_length = 5.0
#NB change this variable according to experiment settings (was not saved)
#Extra waiting time at the end to give the user a chance to click, this value was set to 300ms during all
#"no-noise" experiments.
self.extra_wait_time = 0.0 #0.3
self.utils = Utils()
#Diagnostic: count the total number of clicks
self.nclicks_total = 0
self.phrase_utils = PhraseUtils()
def initDataStructures(self, i_display):
(users, wpm_min,wpm_theory,wpm,cpc,char_err,speeds,phrases)=({},{},{},{},{},{},{},[])
for user in self.users:
user_dir= "%suser_%.2d/" % (self.root_dir, user)
if not os.path.exists(user_dir):
continue
for session in self.sessions:
s = "%d" % session
session_dir = "%ssession_%.2d/" % (user_dir, session)
if not os.path.exists(session_dir):
continue
for sub_session in self.sub_session_ids[session]:
ss = "%d" % sub_session
sub_session_dir = "%ssub_session_%.2d/" % (session_dir,sub_session)
if not os.path.exists(sub_session_dir):
continue
if not users.has_key(ss):
(users[ss], wpm_min[ss],wpm_theory[ss],wpm[ss],cpc[ss],char_err[ss],
speeds[ss])=({},{},{},{},{},{},{})
(users[ss][s], wpm_min[ss][s],wpm_theory[ss][s],wpm[ss][s],cpc[ss][s],char_err[ss][s],
speeds[ss][s])=([],[],[],[],[],[],[])
if i_display:
self.dispHeading()
return (users,wpm_min,wpm_theory,wpm,cpc,char_err,speeds)
####################################### Load Functions
def compute(self, i_display):
(users,wpm_min,wpm_theory,wpm,cpc,char_err,speeds) = self.initDataStructures(i_display)
for user in self.users:
user_dir= "%suser_%.2d/" % (self.root_dir, user)
if not os.path.exists(user_dir):
continue
for sub_session in self.sub_sessions:
ss = "%d" % sub_session
if not users.has_key(ss):
continue
for session in self.sessions:
s = "%d" % session
if not users[ss].has_key(s):
continue
sub_session_dir = "%ssession_%.2d/sub_session_%.2d/" % (user_dir,int(s),int(ss))
if not os.path.exists(sub_session_dir):
continue
phrases = self.utils.loadText(sub_session_dir + self.phrase_file).split("\n")[0:-1]
for phrase_cnt in range(0,len(phrases)):
words = phrases[phrase_cnt].split('_')
for word_cnt in range(0, len(words)):
filename = "%sclick_stats_%.2d_%.2d.cPickle" % (sub_session_dir, phrase_cnt, word_cnt)
if not os.path.exists(filename):
continue
#print "file_name = ", filename
click_stats = self.utils.loadPickle(filename)
if not click_stats['is_calibrated']:
continue
results = self.getResults(user, s, ss, click_stats, words[word_cnt], i_display)
saved_results = (users,wpm_min,wpm_theory,wpm,cpc,char_err,speeds)
saved_results = self.updateResults(results, saved_results, s, ss)
#Save the results
r = {}
(r['users'], r['wpm_min'],r['wpm_theory'],r['wpm'],r['cpc'],r['char_err'],r['speeds']) = saved_results
print "Saving to file ", self.file_out
self.utils.savePickle(r, self.file_out)
if i_display:
print "Total clicks received = ", self.nclicks_total
def updateResults(self, i_results, i_saved_results, i_session, i_sub_session):
#The final output results
(users,wpm_min,wpm_theory,wpm,cpc,char_err,speeds) = i_saved_results
#All the results to display
(user, grnd_truth, selection, iterations, n_click_iter, n_undo, is_word_err,
char_read_time, char_read_theory,end_delay,iter_time, cur_wpm_min,cur_wpm_theory,cur_wpm,
click_distr, n_clicks, cur_cpc, min_edit_dist, overlap, speed) = i_results
#Update the final results to save
users[i_sub_session][i_session].append(user)
wpm_min[i_sub_session][i_session].append(cur_wpm_min)
wpm_theory[i_sub_session][i_session].append(cur_wpm_theory)
wpm[i_sub_session][i_session].append(cur_wpm)
cpc[i_sub_session][i_session].append(cur_cpc)
char_err[i_sub_session][i_session].append(min_edit_dist)
speeds[i_sub_session][i_session].append(speed)
return (users,wpm_min,wpm_theory,wpm,cpc,char_err,speeds)
################################################# Get
def getResults(self, i_user, i_session, i_subsession, i_click_stats, i_cur_word, i_display):
c = dict(i_click_stats)
grnd_truth = c['grnd_truth']
cur_word = self.phrase_utils.getWord(i_cur_word)
if not ( grnd_truth == cur_word):
print "grnd_truth = ", grnd_truth, " should be " , cur_word
raise ValueError("Grnd truth incorrect")
selection = self.phrase_utils.getWord(c['selected_word'])
#The number of times the user used the undo function
n_undo = c['nundo']
#Total number of alphabet sequence iteration
iterations = c['niterations']
#Total number of iterations where clicks were received
n_click_iter = c['nclick_iterations']
#Is there a timeout or word-selection error?
is_word_err = c['word_error'] or (not (grnd_truth == selection))
#Normalise the time it took to read only the alphbaet sequence correctlt
#The delay at the end of the sequence (after reading the character)
#Initially this was not recorded
if c['settings'].has_key('end_delay'):
end_delay = c['settings']['end_delay']
else:
end_delay = self.extra_wait_time
#The measured time taken to read a character
char_read_time = c['alphabet_read_time']*float(c['nclick_iterations']) / float(iterations - n_undo)
#The theoretical time it should take to read a character
(file_length, nchannels, overlap) = (c['settings']['file_length'],5,c['settings']['overlap'])
root_dir = "../../"
channel_config = ChannelConfig(nchannels,overlap ,file_length, root_dir)
char_read_theory = channel_config.getSoundTimes()[-1,-1]
#Compute all the wpm from the char reading times and the number of iterations
#The min possible wpm for the number of iterations
speed = 60.0/(self.word_length*char_read_theory)
wpm_min = 60.0/(char_read_theory * iterations)* (float(len(selection)) / self.word_length)
#The theoretical wpm
wpm_theory = 60.0/((char_read_theory+end_delay)*iterations)* (float(len(selection))/ self.word_length)
#The measured wpm
#Iter time = total time used to compute the measured wpm
iter_time = (char_read_time+end_delay) * iterations
wpm = 60.0/iter_time* (float(len(selection))/ self.word_length)
#The number of clicks used to write the word
n_clicks = c['nclicks']
self.nclicks_total += n_clicks
#The speed (how much the sounds overlapped)
overlap = c['settings']['overlap']
#The click distribution after the word was selected
click_distr = copy.deepcopy(c['click_distr_after'])
#The clicks per character
cpc = float(n_clicks) / float(len(grnd_truth))
#The error rate
min_edit_dist = 0.0
if is_word_err:
if not (selection == ""):
min_edit_dist = self.dist_measure.compute(grnd_truth,selection)
min_edit_dist = (100.0*float(min_edit_dist) / len(grnd_truth))
cpc = float(n_clicks) / float(len(selection))
else:
min_edit_dist = 100.0
(wpm_theory, wpm) = (0.0, 0.0)
r = (i_user, grnd_truth, selection, iterations, n_click_iter, n_undo, is_word_err,
char_read_time, char_read_theory,end_delay,iter_time, wpm_min,wpm_theory,wpm,
click_distr, n_clicks, cpc, min_edit_dist, overlap, speed )
if i_display:
self.dispUserResults(r, i_session, i_subsession )
#Some diagnostic tests
if (not i_user == 3) or ((i_user==3) and (int(i_session)==5)):
dist = abs(char_read_theory-char_read_time)
if dist > 1E-1:
raise ValueError("Character reading times should be equal")
return r
################################################### Display
def dispUserResults(self, i_results, i_session, i_subsession):
(user, grnd_truth, selection, iterations, n_click_iter, n_undo, is_word_err,
char_read_time, char_read_theory,end_delay,iter_time, wpm_min,wpm_theory,wpm,
click_distr, n_clicks, cpc, min_edit_dist, overlap, speed) = i_results
g = ( "{0:{1}}".format( "%d" % user, 4 ) + "|")
g += ( "{0:{1}}".format( "%s" % i_session, 8 ) + "|")
g += ( "{0:{1}}".format( "%s" % i_subsession, 8 ) + "|")
g += ( "{0:{1}}".format( "%s" % grnd_truth, 12 ) + "|")
g += ( "{0:{1}}".format( "%s" % selection, 12 ) + "|")
g += ( "{0:{1}}".format( "%d" % is_word_err, 3 ) + "|")
g += ( "{0:{1}}".format( "%.2d" % iterations, 4 ) + "|")
g += ( "{0:{1}}".format( "%.2d" % n_click_iter, 5 ) + "|")
g += ( "{0:{1}}".format( "%.2d" % n_undo, 5 ) + "|")
g += ( "{0:{1}}".format( "%2.2f" % char_read_theory, 7 ) + "|")
g += ( "{0:{1}}".format( "%2.2f" % char_read_time, 8 ) + "|")
g += ( "{0:{1}}".format( "%2.2f" % end_delay, 5 ) + "|")
g += ( "{0:{1}}".format( "%1.2f" % wpm_min, 7 ) + "|")
g += self.phrase_utils.getDispVal(wpm_theory, "%1.2f", 7)
g += self.phrase_utils.getDispVal(wpm, "%1.2f", 4)
g += ( "{0:{1}}".format( "%3.2f" % iter_time, 8 ) + "|")
g += self.phrase_utils.getDispVal(cpc, "%1.2f", 4)
g += ( "{0:{1}}".format( "%3.2f" % min_edit_dist, 6 ) + "|")
g += ( "{0:{1}}".format( "%1.2f" % overlap, 4 ) + "|")
g += ( "{0:{1}}".format( "%1.2f" % speed, 5 ) + "|")
g += ( "{0:{1}}".format( "%.2d" % n_clicks, 6 ) + "|")
print g
def dispHeading(self):
self.dispHeadingDescriptions()
h = ( "{0:{1}}".format( "user", 4 ) + "|")
h += ( "{0:{1}}".format( "session", 8 ) + "|")
h += ( "{0:{1}}".format( "subsess", 8 ) + "|")
h += ( "{0:{1}}".format( "grnd_truth", 12 ) + "|")
h += ( "{0:{1}}".format( "select", 12 ) + "|")
h += ( "{0:{1}}".format( "err", 3 ) + "|")
h += ( "{0:{1}}".format( "iter", 4 ) + "|")
h += ( "{0:{1}}".format( "citer", 5 ) + "|")
h += ( "{0:{1}}".format( "nundo", 5 ) + "|")
h += ( "{0:{1}}".format( "chr_thr", 7 ) + "|")
h += ( "{0:{1}}".format( "chr_time", 8 ) + "|")
h += ( "{0:{1}}".format( "delay", 5 ) + "|")
h += ( "{0:{1}}".format( "wpm_min", 7 ) + "|")
h += ( "{0:{1}}".format( "wpm_thr", 7 ) + "|")
h += ( "{0:{1}}".format( "wpm", 4 ) + "|")
h += ( "{0:{1}}".format( "tot_time", 8 ) + "|")
h += ( "{0:{1}}".format( "cpc", 4 ) + "|")
h += ( "{0:{1}}".format( "%error", 6 ) + "|")
h += ( "{0:{1}}".format( "over", 4 ) + "|")
h += ( "{0:{1}}".format( "speed", 5 ) + "|")
h += ( "{0:{1}}".format( "nclicks", 6 ) + "|")
print h
def dispHeadingDescriptions(self):
print "grnd_truth: The word the user is supposed to write"
print "select: The word selected by the user"
print "err (1/0): 1 if an error occured (time-out error or if select is not equal to grnd_truth"
print "iter: The total number of alphabet-sequence repetitions (scans) to select the word"
print "citer: The number of alphabet-sequence repetitions (scans) where the user clicked"
print "nundo: The number of times the user clicked 4 or | |
<gh_stars>0
# -*- coding: utf-8 -*-
'''
Semiparametric Support Vector Machine model under POM3.
'''
__author__ = "<NAME>"
__date__ = "January 2021"
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import accuracy_score
from MMLL.models.POM3.CommonML.POM3_CommonML import POM3_CommonML_Master, POM3_CommonML_Worker
from MMLL.models.Common_to_models import Common_to_models
from MMLL.models.POM3.Kmeans.Kmeans import Kmeans_Master, Kmeans_Worker
class SVM_model(Common_to_models):
"""
This class contains the Semiparametric SVM model.
"""
def __init__(self, logger):
"""
Create a :class:`SVM_model` instance.
Parameters
----------
logger: :class:`mylogging.Logger`
Logging object instance.
"""
self.logger = logger
self.is_trained = False
self.supported_formats = ['pkl', 'onnx', 'pmml']
self.name = 'SVM'
self.centroids = None
self.sigma = None
self.weights = None
def kernelMatrix(self, setDim1, setDim2):
"""
Computes a kernel matrix given two datasets.
Parameters
----------
setDim1: ndarray
Array containing M input patterns.
setDim2: ndarray
Array containing N input patterns.
Returns
-------
preds: ndarray
A MxN kernel matrix, every position contains the kernel evaluation of a data from setDim1 with another from setDim2.
"""
sqrtDists = cdist(setDim1, setDim2, 'euclidean')
gamma = 1 / (self.sigma**2)
return np.exp(-gamma * np.power(sqrtDists, 2))
def predict(self, X_b):
"""
Uses the model to predict new outputs given the inputs.
Parameters
----------
X_b: ndarray
Array containing the input patterns.
Returns
-------
preds: ndarray
Array containing the predictions.
"""
Kmn = self.kernelMatrix(X_b, self.centroids)
softoutput = Kmn.dot(self.weights)
hardoutput = softoutput
hardoutput[hardoutput>=0] = 1
hardoutput[hardoutput<0] = -1
return hardoutput
class SVM_Master(POM3_CommonML_Master):
"""
This class implements SVM, run at Master node. It inherits from :class:`POM3_CommonML_Master`.
"""
def __init__(self, comms, logger, verbose=False, NC=None, Nmaxiter=None, tolerance =None, sigma=None, C=None, NmaxiterGD=None, eta=None):
"""
Create a :class:`SVM_Master` instance.
Parameters
----------
comms: :class:`Comms_master`
Object providing communication functionalities.
logger: :class:`mylogging.Logger`
Logging object instance.
verbose: boolean
Indicates whether to print messages on screen nor not.
NC: int
Number of support vectors in the semiparametric model.
Nmaxiter: int
Maximum number of iterations.
tolerance: float
Minimum tolerance for continuing training.
sigma: float
The parameter of the gaussian kernel.
C: float
The cost parameter in the cost function.
NmaxiterGD: int
Maximum number of iterations for the SVM.
eta: float
The step of the gradient descent algorithm.
"""
self.num_centroids = int(NC)
self.Nmaxiter = int(Nmaxiter)
self.tolerance = tolerance
self.sigma = sigma
self.C = C
self.NmaxiterGD = NmaxiterGD
self.eta = eta
super().__init__(comms, logger, verbose) # Initialize common class for POM3
"""
# Initialize Kmeans first
self.kmeans_master = Kmeans_Master(self.comms, self.logger, self.verbose, self.num_centroids, self.Nmaxiter, self.tolerance)
self.public_keys = self.kmeans_master.public_keys # Store the public keys of all workers
self.num_features = self.kmeans_master.num_features # Store encrypted sequences of all workers
"""
self.name = 'POM3_SVM_Master' # Name of the class
self.weights = np.zeros((self.num_centroids, 1)) # Weights for the SVM
self.iter = 0 # Number of iterations already executed
self.is_trained = False # Flag to know if the model is trained
self.initialization_ready = False # Flag to know if the initialization needed for POM3 is ready
def train_Master_(self):
'''
Main loop controlling the training of the algorithm.
Parameters
----------
None
'''
self.weights = np.zeros((self.num_centroids, 1))
self.iter = 0
self.is_trained = False
self.state_dict['CN'] = 'START_TRAIN'
while self.state_dict['CN'] != 'INITIALIZATION_READY':
self.Update_State_Master()
self.TakeAction_Master()
self.CheckNewPacket_Master()
# Now communications should work sequentially (not sending a message to next worker until the actual one replied)
self.display(self.name + ': Initialization ready, starting sequential communications')
encrypted_weights = self.encrypt_list(self.weights, self.public_keys[self.workers_addresses[0]]) # Encrypt weights using worker 0 public key
while self.iter != self.NmaxiterGD:
for index_worker, worker in enumerate(self.workers_addresses):
action = 'UPDATE_MODEL'
data = {'weights': encrypted_weights}
packet = {'to': 'MLModel', 'action': action, 'data': data}
# Send message to specific worker and wait until receiving reply
packet = self.send_worker_and_wait_receive(packet, worker)
encrypted_weights = packet['data']['weights']
encrypted_weights = self.transform_encrypted_domain_workers(encrypted_weights, worker, self.workers_addresses[(index_worker+1)%self.Nworkers])
self.iter += 1
self.display(self.name + ': Iteration %d' %self.iter)
self.display(self.name + ': Stopping training, maximum number of iterations reached!')
action = 'SEND_FINAL_MODEL'
for index_worker, worker in enumerate(self.workers_addresses):
data = {'weights': encrypted_weights}
packet = {'to': 'MLModel', 'action': action, 'data': data}
# Send message to specific worker and wait until receiving reply
packet = self.send_worker_and_wait_receive(packet, worker)
encrypted_weights = packet['data']['weights']
encrypted_weights = self.transform_encrypted_domain_workers(encrypted_weights, worker, self.workers_addresses[(index_worker+1)%self.Nworkers])
self.is_trained = True
self.display(self.name + ': Training is done')
def Update_State_Master(self):
'''
Function to control the state of the execution.
Parameters
----------
None
'''
if self.checkAllStates('ACK_LAUNCH_KMEANS', self.state_dict):
for worker in self.workers_addresses:
self.state_dict[worker] = ''
self.state_dict['CN'] = 'TRAIN_KMEANS'
if self.checkAllStates('ACK_INITIALIZE_PARAMETERS', self.state_dict):
for worker in self.workers_addresses:
self.state_dict[worker] = ''
self.state_dict['CN'] = 'INITIALIZATION_READY'
def TakeAction_Master(self):
"""
Function to take actions according to the state.
Parameters
----------
None
"""
to = 'MLmodel'
# Train Kmeans first
if self.state_dict['CN'] == 'START_TRAIN':
action = 'LAUNCH_KMEANS'
packet = {'to': to, 'action': action}
self.comms.broadcast(packet, self.workers_addresses)
self.display(self.name + ': Sent ' + action + ' to all workers')
self.state_dict['CN'] = 'wait'
# Train Kmeans
if self.state_dict['CN'] == 'TRAIN_KMEANS':
kmeans_master = Kmeans_Master(self.comms, self.logger, self.verbose, self.num_centroids, self.Nmaxiter, self.tolerance)
kmeans_master.workers_addresses = self.workers_addresses
kmeans_master.Nworkers = self.Nworkers
kmeans_master.train_Master()
self.public_keys = kmeans_master.public_keys # Store the public key
self.encrypted_Xi = kmeans_master.encrypted_Xi
# Terminate Kmeans workers
kmeans_master.terminate_workers_()
self.state_dict['CN'] = 'INITIALIZE_PARAMETERS'
# Asking the workers to send initialize centroids
if self.state_dict['CN'] == 'INITIALIZE_PARAMETERS':
action = 'INITIALIZE_PARAMETERS'
data = {'sigma': self.sigma, 'C': self.C, 'eta': self.eta}
packet = {'to': to, 'action': action, 'data': data}
self.comms.broadcast(packet, self.workers_addresses)
self.display(self.name + ': Sent ' + action + ' to all workers')
self.state_dict['CN'] = 'wait'
#===============================================================
# Worker
#===============================================================
class SVM_Worker(POM3_CommonML_Worker):
'''
Class implementing a semiparametric SVM, run at Worker node. It inherits from :class:`POM3_CommonML_Worker`.
'''
def __init__(self, master_address, comms, logger, verbose=False, Xtr_b=None, ytr=None):
"""
Create a :class:`SVM_Worker` instance.
Parameters
----------
master_address: string
Identifier of the master instance.
comms: :class:`Comms_worker`
Object providing communication functionalities.
logger: :class:`mylogging.Logger`
Logging object instance.
verbose: boolean
Indicates whether to print messages on screen nor not.
Xtr_b: ndarray
Array containing the inputs for training.
ytr: ndarray
Array containing the labels for training.
"""
self.Xtr_b = Xtr_b
self.ytr = ytr
super().__init__(master_address, comms, logger, verbose) # Initialize common class for POM3
self.name = 'POM3_SVM_Worker' # Name of the class
self.model = SVM_model(logger) # Model
self.is_trained = False # Flag to know if the model has been trained
"""
# Train Kmeans first
kmeans_worker = Kmeans_Worker(master_address, comms, logger, verbose, Xtr_b.copy())
kmeans_worker.run_worker()
# Save results from Kmeans
self.model.centroids = kmeans_worker.model.centroids # Save centroids
self.num_centroids = self.model.centroids.shape[0] # Number of centroids
self.public_key = kmeans_worker.public_key # Public key
self.private_key = kmeans_worker.private_key # Private key
self.precision = kmeans_worker.precision # Store precision
self.r_values = kmeans_worker.r_values # Random values for encryption
self.preprocessor_ready = kmeans_worker.preprocessor_ready # Store flag for preprocessing
if self.preprocessor_ready:
self.Xtr_b = np.copy(kmeans_worker.Xtr_b) # Normalize data
self.prep_model = kmeans_worker.prep_model # Store preprocessing object
"""
def ProcessReceivedPacket_Worker(self, packet):
"""
Process the received packet at worker.
Parameters
----------
packet: dictionary
Packet received from the master.
"""
if packet['action'] == 'LAUNCH_KMEANS':
self.display(self.name + ' %s: Launching Kmeans worker' %self.worker_address)
# Initialize Kmeans
kmeans_worker = Kmeans_Worker(self.master_address, self.comms, self.logger, self.verbose, self.Xtr_b.copy())
#kmeans_worker.public_key = self.public_key
#kmeans_worker.private_key = self.private_key
#kmeans_worker.precision = self.precision
#kmeans_worker.num_workers = self.num_workers
#self.preprocessors = kmeans_worker.preprocessors
# Reply to master
action = 'ACK_LAUNCH_KMEANS'
packet = {'action': action}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
# Run Kmeans
kmeans_worker.run_worker()
if kmeans_worker.model.centroids is not None:
self.model.centroids = kmeans_worker.model.centroids # Save centroids
self.num_centroids = self.model.centroids.shape[0] # Number of centroids
self.public_key = kmeans_worker.public_key # Public key
self.private_key = kmeans_worker.private_key # Private key
self.r_values = kmeans_worker.r_values # Random values for encryption
self.precision = kmeans_worker.precision # Precision
self.num_workers = kmeans_worker.num_workers # Number of workers
self.preprocessors = kmeans_worker.preprocessors # Preprocessors
if packet['action'] == 'INITIALIZE_PARAMETERS':
self.display(self.name + ' %s: Storing C and sigma' %self.worker_address)
self.model.sigma = packet['data']['sigma']
self.C = packet['data']['C']
self.eta = packet['data']['eta']
self.Kmn = self.model.kernelMatrix(self.Xtr_b, self.model.centroids)
self.Y_col = np.reshape(self.ytr, (len(self.ytr), 1))
action = 'ACK_INITIALIZE_PARAMETERS'
packet = {'action': action}
self.comms.send(packet, self.master_address)
self.display(self.name + ' %s: Sent %s to master' %(self.worker_address, action))
if packet['action'] == 'UPDATE_MODEL':
self.display(self.name + ' %s: Obtaining gradients' %self.worker_address)
encrypted_weights = packet['data']['weights']
# Unencrypt received centroids
self.model.weights = np.asarray(self.decrypt_list(encrypted_weights))
gradients, cost_function = self.get_gradients(self.model.weights)
self.model.weights = self.model.weights - self.eta*gradients
encrypted_weights = np.asarray(self.encrypt_list_rvalues(self.model.weights))
action = 'UPDATE_MODEL'
data = {'cost_function': cost_function, 'weights': encrypted_weights}
packet = {'action': | |
== "c890":
prodname = "ROUTERS/ISRG1/890"
elif prodcode == "ISRG1GENERIC":
prodname = "ROUTERS/ISRG1/MODULES"
elif prodcode == "c1900":
prodname = "ROUTERS/ISRG2/1900"
elif prodcode == "c1900-2900":
prodname = "ROUTERS/ISRG2/1900-2900"
elif prodcode == "c1900c":
prodname = "ROUTERS/ISRG2/1900-CHINA"
elif prodcode == "c2900":
prodname = "ROUTERS/ISRG2/2900"
elif prodcode == "c2911a":
prodname = "ROUTERS/ISRG2/2911a"
elif prodcode == "c2951":
prodname = "ROUTERS/ISRG2/2951"
elif prodcode == "c3900":
prodname = "ROUTERS/ISRG2/3900"
elif prodcode == "c3900e":
prodname = "ROUTERS/ISRG2/3900E"
elif prodcode == "c800m":
prodname = "ROUTERS/ISRG2/800m"
elif prodcode == "c800j":
prodname = "ROUTERS/ISRG2/800J"
elif prodcode == "c860vae":
prodname = "ROUTERS/ISRG2/860-VAE"
elif prodcode == "c860vae2":
prodname = "ROUTERS/ISRG2/860-VAE2"
elif prodcode == "c860vaej":
prodname = "ROUTERS/ISRG2/860-VAEJ"
elif prodcode == "c860vaew":
prodname = "ROUTERS/ISRG2/860-VAEW"
elif prodcode == "c880data":
prodname = "ROUTERS/ISRG2/880"
elif prodcode == "c880voice":
prodname = "ROUTERS/ISRG2/880-CUBE"
elif prodcode == "c890s":
prodname = "ROUTERS/ISRG2/890"
elif prodcode == "c900":
prodname = "ROUTERS/ISRG2/900"
elif prodcode == "ISRG2GENERIC":
prodname = "ROUTERS/ISRG2/MODULES"
elif prodcode == "c800g2":
prodname = "ROUTERS/ISRG2/800"
elif prodcode == "c800g3":
prodname = "ROUTERS/ISRG3/800"
elif prodcode == "ir1101":
prodname = "ROUTERS/ISRG3/IR-1101"
elif prodcode == "ir800":
prodname = "ROUTERS/ISRG3/IR-800"
elif prodcode == "c1000router":
prodname = "ROUTERS/BRANCH/1000"
elif prodcode == "c1100router":
prodname = "ROUTERS/ISRG3/ISR-1100"
elif prodcode == "isr4200":
prodname = "ROUTERS/ISRG3/ISR-4200"
elif prodcode == "isr4200-4300":
prodname = "ROUTERS/ISRG3/ISR-4200-4300"
elif prodcode == "isr4300":
prodname = "ROUTERS/ISRG3/ISR-4300"
elif prodcode == "isr4400":
prodname = "ROUTERS/ISRG3/ISR-4400"
elif prodcode == "isr4400v2":
prodname = "ROUTERS/ISRG3/ISR-4461"
elif prodcode == "mwr1900":
prodname = "ROUTERS/MOBILE/MWR-1900"
elif prodcode == "mwr1941":
prodname = "ROUTERS/MOBILE/MWR-1941"
elif prodcode == "mwr2941":
prodname = "ROUTERS/MOBILE/MWR-2941"
elif prodcode == "c3201":
prodname = "ROUTERS/RUGGED/3201-AP"
elif prodcode == "c3202":
prodname = "ROUTERS/RUGGED/3202-AP"
elif prodcode == "c3205":
prodname = "ROUTERS/RUGGED/3205-AP"
elif prodcode == "c3220":
prodname = "ROUTERS/RUGGED/3220"
elif prodcode == "c3230":
prodname = "ROUTERS/RUGGED/3230"
elif prodcode == "c3250":
prodname = "ROUTERS/RUGGED/3250"
elif prodcode == "c3270":
prodname = "ROUTERS/RUGGED/3270"
elif prodcode == "c10k":
prodname = "ROUTERS/SP/10000/PRE1"
elif prodcode == "c10k2":
prodname = "ROUTERS/SP/10000/PRE2"
elif prodcode == "c10k3":
prodname = "ROUTERS/SP/10000/PRE3"
elif prodcode == "c10k4":
prodname = "ROUTERS/SP/10000/PRE4"
elif prodcode == "c10700":
prodname = "ROUTERS/SP/10700"
elif prodcode == "c12k":
prodname = "ROUTERS/SP/12000"
elif prodcode == "c12kprp":
prodname = "ROUTERS/SP/12000"
elif prodcode == "gsr":
prodname = "ROUTERS/SP/12000"
elif prodcode == "XR12000":
prodname = "ROUTERS/SP/12000-XR"
elif prodcode == "c7000":
prodname = "ROUTERS/SP/7000"
elif prodcode == "c7100":
prodname = "ROUTERS/SP/7100"
elif prodcode == "c7200":
prodname = "ROUTERS/SP/7200/NPEG1"
elif prodcode == "c7200p":
prodname = "ROUTERS/SP/7200/NPEG2"
elif prodcode == "c7300":
prodname = "ROUTERS/SP/7300"
elif prodcode == "c7301":
prodname = "ROUTERS/SP/7301"
elif prodcode == "c7304":
prodname = "ROUTERS/SP/7304"
elif prodcode == "spa":
prodname = "ROUTERS/SP/7304"
elif prodcode == "c7400":
prodname = "ROUTERS/SP/7400"
elif prodcode == "rsp":
prodname = "ROUTERS/SP/7500"
elif prodcode == "c7600":
prodname = "ROUTERS/SP/7600"
elif prodcode == "c7600rsp72043":
prodname = "ROUTERS/SP/7600/RSP720"
elif prodcode == "rsp72043":
prodname = "ROUTERS/SP/7600/RSP720"
elif prodcode == "c7svcsami":
prodname = "ROUTERS/SP/7600/SAMI"
elif prodcode == "c7600s3223":
prodname = "ROUTERS/SP/7600/SUP-32"
elif prodcode == "c7600s72033":
prodname = "ROUTERS/SP/7600/SUP-720"
elif prodcode == "8000":
prodname = "ROUTERS/SP/8000"
elif prodcode == "csr1000v":
prodname = "ROUTERS/VIRTUAL/CSR-1000V"
elif prodcode == "csr1000v_milplr":
prodname = "ROUTERS/VIRTUAL/CSR-1000V"
elif prodcode == "vios":
prodname = "ROUTERS/VIRTUAL/IOS-V"
elif prodcode == "iosxrvdemo":
prodname = "ROUTERS/VIRTUAL/IOS-XRv"
elif prodcode == "iosxrvfull":
prodname = "ROUTERS/VIRTUAL/IOS-XRv9000"
elif prodcode == "csa":
prodname = "SECURITY/CISCO-SECURITY-AGENT"
elif prodcode == "csm":
prodname = "SECURITY/CISCO-SECURITY-MANAGER"
elif prodcode == "asa":
prodname = "SECURITY/FIREWALL/ASA"
elif prodcode == "asacx":
prodname = "SECURITY/FIREWALL/ASA-CX-MODULE"
elif prodcode == "c6svc-fwm":
prodname = "SECURITY/FIREWALL/CATALYST-6500-FWSM"
elif prodcode == "firepower":
prodname = "SECURITY/FIREWALL/FirePOWER"
elif prodcode == "pix":
prodname = "SECURITY/FIREWALL/PIX"
elif prodcode == "acs":
prodname = "SECURITY/IDENTITY/ACS"
elif prodcode == "ise":
prodname = "SECURITY/IDENTITY/IDENTITY-SERVICES-ENGINE"
elif prodcode == "isepic":
prodname = "SECURITY/IDENTITY/IDENTITY-SERVICES-ENGINE-PIC"
elif prodcode == "ciscoutd":
prodname = "SECURITY/IOS-XE-UTD"
elif prodcode == "ipsids":
prodname = "SECURITY/IDS-IPS"
elif prodcode == "iosids":
prodname = "SECURITY/IOS-IDS"
elif prodcode == "ironport":
prodname = "SECURITY/IRONPORT"
elif prodcode == "mars":
prodname = "SECURITY/MARS"
elif prodcode == "vpn3000":
prodname = "SECURITY/VPN-3000"
elif prodcode == "anyconnect":
prodname = "SECURITY/VPN-CLIENTS/ANYCONNECT"
elif prodcode == "vpnclient":
prodname = "SECURITY/VPN-CLIENTS/IPSEC-CLIENT"
elif prodcode == "aci":
prodname = "SERVERS/APIC"
elif prodcode == "css":
prodname = "SERVERS/CSS"
elif prodcode == "dcnm":
prodname = "SERVERS/DATA-CENTER-NETWORK-MANAGER"
elif prodcode == "dnac":
prodname = "SERVERS/DNAC"
elif prodcode == "hyperflex":
prodname = "SERVERS/HYPERFLEX"
elif prodcode == "onepk":
prodname = "SERVERS/ONE-PK"
elif prodcode == "ucsgeneric":
prodname = "SERVERS/UCS"
elif prodcode == "smallbusiness":
prodname = "Small-Business"
elif prodcode == "c125":
prodname = "SERVERS/UCS/C-SERIES/C125M5"
elif prodcode == "c200":
prodname = "SERVERS/UCS/C-SERIES/C200M1-C200M2-C210M1-C210M2"
elif prodcode == "c220":
prodname = "SERVERS/UCS/C-SERIES/C220M3"
elif prodcode == "c220m4":
prodname = "SERVERS/UCS/C-SERIES/C220M4"
elif prodcode == "c220m5":
prodname = "SERVERS/UCS/C-SERIES/C220M5"
elif prodcode == "c2x":
prodname = "SERVERS/UCS/C-SERIES/C22M3-C22M4"
elif prodcode == "c240":
prodname = "SERVERS/UCS/C-SERIES/C240M3"
elif prodcode == "c240m4":
prodname = "SERVERS/UCS/C-SERIES/C240M4"
elif prodcode == "c240m5":
prodname = "SERVERS/UCS/C-SERIES/C240M5"
elif prodcode == "c250":
prodname = "SERVERS/UCS/C-SERIES/C250M1-C250M2"
elif prodcode == "c260":
prodname = "SERVERS/UCS/C-SERIES/C260M2"
elif prodcode == "c2xxm3":
prodname = "SERVERS/UCS/C-SERIES/C2XXM3"
elif prodcode == "c2xxm4":
prodname = "SERVERS/UCS/C-SERIES/C2XXM4"
elif prodcode == "c2xxm5":
prodname = "SERVERS/UCS/C-SERIES/C2XXM5"
elif prodcode == "c3160":
prodname = "SERVERS/UCS/C-SERIES/C3160"
elif prodcode == "c3260":
prodname = "SERVERS/UCS/C-SERIES/C3260"
elif prodcode == "c420":
prodname = "SERVERS/UCS/C-SERIES/C420M3"
elif prodcode == "c460":
prodname = "SERVERS/UCS/C-SERIES/C460M1-C460M2"
elif prodcode == "c460m4":
prodname = "SERVERS/UCS/C-SERIES/C460M4"
elif prodcode == "c480m5":
prodname = "SERVERS/UCS/C-SERIES/C480M5"
elif prodcode == "ucsbseries":
prodname = "SERVERS/UCS/B-SERIES/"
elif prodcode == "ucscseries":
prodname = "SERVERS/UCS/C-SERIES/"
elif prodcode == "ucseseries":
prodname = "SERVERS/UCS/E-SERIES/"
elif prodcode == "e100":
prodname = "SERVERS/UCS/E-SERIES/E1XX"
elif prodcode == "c6400r":
prodname = "SERVICE-GATEWAY/6400-NSP"
elif prodcode == "c6400r2sp":
prodname = "SERVICE-GATEWAY/6400-NSP"
elif prodcode == "c6400s":
prodname = "SERVICE-GATEWAY/6400-NSP"
elif prodcode == "ni2":
prodname = "SERVICE-GATEWAY/6XXX-DSL-Switch"
elif prodcode == "m9100":
prodname = "STORAGE/MDS-9100"
elif prodcode == "m9200":
prodname = "STORAGE/MDS-9200"
elif prodcode == "m9250":
prodname = "STORAGE/MDS-9250"
elif prodcode == "m9500":
prodname = "STORAGE/MDS-9500"
elif prodcode == "m9700":
prodname = "STORAGE/MDS-9700"
elif prodcode == "ls1010":
prodname = "SWITCHES/ATM/Lightspeed-1010"
elif prodcode == "cbs30x0":
prodname = "SWITCHES/BLADE-SWITCHES/CATALYST-3000-DELL-Blade"
elif prodcode == "cbs31x0":
prodname = "SWITCHES/BLADE-SWITCHES/CATALYST-3100-DELL-Blade"
elif prodcode == "cigesm":
prodname = "SWITCHES/BLADE-SWITCHES/IBM-Blade-Switch"
elif prodcode == "cgesm":
prodname = "SWITCHES/BLADE-SWITCHES/IBM-Blade-Switch"
elif prodcode == "cat1200":
prodname = "SWITCHES/CATALYST/Catalyst-1200"
elif prodcode == "cat1600":
prodname = "SWITCHES/CATALYST/Catalyst-1600"
elif prodcode == "cat1900":
prodname = "SWITCHES/CATALYST/Catalyst-1900"
elif prodcode == "c2350":
prodname = "SWITCHES/CATALYST/Catalyst-2350"
elif prodcode == "c2360":
prodname = "SWITCHES/CATALYST/Catalyst-2360"
elif prodcode == "cat2800":
prodname = "SWITCHES/CATALYST/Catalyst-2800"
elif prodcode == "c2800":
prodname = "SWITCHES/CATALYST/Catalyst-2800"
elif prodcode == "c29atm":
prodname = "SWITCHES/CATALYST/Catalyst-2900-ATM"
elif prodcode == "c2900XL":
prodname = "SWITCHES/CATALYST/Catalyst-2900XL"
elif prodcode == "c2900xl":
prodname = "SWITCHES/CATALYST/Catalyst-2900XL"
elif prodcode == "c2918":
prodname = "SWITCHES/CATALYST/Catalyst-2918"
elif prodcode == "c2928":
prodname = "SWITCHES/CATALYST/Catalyst-2928"
elif prodcode == "c2940":
prodname = "SWITCHES/CATALYST/Catalyst-2940"
elif prodcode == "cat2948g":
prodname = "SWITCHES/CATALYST/Catalyst-2948G"
elif prodcode == "c2950":
prodname = "SWITCHES/CATALYST/Catalyst-2950"
elif prodcode == "c2950lre":
prodname = "SWITCHES/CATALYST/Catalyst-2950-LRE"
elif prodcode == "c2955":
prodname = "SWITCHES/CATALYST/Catalyst-2955"
elif prodcode == "c2960":
prodname = "SWITCHES/CATALYST/Catalyst-2960"
elif prodcode == "c2960l":
prodname = "SWITCHES/CATALYST/Catalyst-2960L"
elif prodcode == "c2960s":
prodname = "SWITCHES/CATALYST/Catalyst-2960S"
elif prodcode == "c2960x":
prodname = "SWITCHES/CATALYST/Catalyst-2960X"
elif prodcode == "c2970":
prodname = "SWITCHES/CATALYST/Catalyst-2970"
elif prodcode == "c2975":
prodname = "SWITCHES/CATALYST/Catalyst-2975"
elif prodcode == "cat3000":
prodname = "SWITCHES/CATALYST/Catalyst-3000"
elif prodcode == "c3500xl":
prodname = "SWITCHES/CATALYST/Catalyst-3500XL"
elif prodcode == "c3500XL":
prodname = "SWITCHES/CATALYST/Catalyst-3500XL"
elif prodcode == "c3550":
prodname = "SWITCHES/CATALYST/Catalyst-3550"
elif prodcode == "c3560":
prodname = "SWITCHES/CATALYST/Catalyst-3560"
elif prodcode == "c3560e":
prodname = "SWITCHES/CATALYST/Catalyst-3560E"
elif prodcode == "c3560x":
prodname = "SWITCHES/CATALYST/Catalyst-3560X"
elif prodcode == "c3750":
prodname = "SWITCHES/CATALYST/Catalyst-3750"
elif prodcode == "c3750e":
prodname = "SWITCHES/CATALYST/Catalyst-3750E"
elif prodcode == "c3750me":
prodname = "SWITCHES/METRO/Catalyst-3750ME"
elif prodcode == "c3750x":
prodname = "SWITCHES/CATALYST/Catalyst-3750X"
elif prodcode == "cat3k_caa":
prodname = "SWITCHES/CATALYST/Catalyst-3850-3650"
elif prodcode == "cat4000":
prodname = "SWITCHES/CATALYST/Catalyst-4000"
elif prodcode == "cat4000s12":
prodname = "SWITCHES/CATALYST/Catalyst-4000-SUP-I-II"
elif prodcode == "c4224":
prodname = "SWITCHES/CATALYST/Catalyst-4224"
elif prodcode == "cat4232":
prodname = "SWITCHES/CATALYST/Catalyst-4232"
elif prodcode == "cat4500":
prodname = "SWITCHES/CATALYST/Catalyst-4500"
elif prodcode == "cat4500e":
prodname = "SWITCHES/CATALYST/Catalyst-4500E"
elif prodcode == "c4500e":
prodname = "SWITCHES/CATALYST/Catalyst-4500E"
elif prodcode == "cat4500es8":
prodname = "SWITCHES/CATALYST/Catalyst-4500E-SUP8E"
elif prodcode == "cat4840g":
prodname = "SWITCHES/CATALYST/Catalyst-4840G"
elif prodcode == "cat5000":
prodname = "SWITCHES/CATALYST/Catalyst-5000"
elif prodcode == "ce500":
prodname = "SWITCHES/CATALYST/Catalyst-500E"
elif prodcode == "c6500":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800"
elif prodcode == "cat6000":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800"
elif prodcode == "c6sup":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-1-MSFC1"
elif prodcode == "c6sup11":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-1-MSFC1"
elif prodcode == "c6sup12":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-1-MSFC2"
elif prodcode == "c6k222":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-2-MSFC2"
elif prodcode == "c6sup22":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-2-MSFC2"
elif prodcode == "s222":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-2-MSFC2"
elif prodcode == "s2t54":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-2T"
elif prodcode == "s3223":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-32-MSFC2"
elif prodcode == "s32p3":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-32-PISA"
elif prodcode == "s72033":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-720-MSFC3"
elif prodcode == "s6t64":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/SUP-6T"
elif prodcode == "c6848x":
prodname = "SWITCHES/CATALYST/Catalyst-6840-X"
elif prodcode == "c6880x":
prodname = "SWITCHES/CATALYST/Catalyst-6880-X"
elif prodcode == "c8000be":
prodname = "SWITCHES/CATALYST/Catalyst-8300-Edge"
elif prodcode == "c8000aep":
prodname = "SWITCHES/CATALYST/Catalyst-8500-Edge"
elif prodcode == "cat8510c":
prodname = "SWITCHES/CATALYST/Catalyst-8510CSR"
elif prodcode == "cat8510m":
prodname = "SWITCHES/CATALYST/Catalyst-8510MSR"
elif prodcode == "cat8540c":
prodname = "SWITCHES/CATALYST/Catalyst-8540CSR"
elif prodcode == "cat8540m":
prodname = "SWITCHES/CATALYST/Catalyst-8540MSR"
elif prodcode == "cat9k":
prodname = "SWITCHES/CATALYST/Catalyst-9000"
elif prodcode == "cat9k_lite":
prodname = "SWITCHES/CATALYST/Catalyst-9200"
elif prodcode == "c1000":
prodname = "SWITCHES/COMPACT/Catalyst-1000"
elif prodcode == "c2960c405":
prodname = "SWITCHES/COMPACT/Catalyst-2960C"
elif prodcode == "c2960c405ex":
prodname = "SWITCHES/COMPACT/Catalyst-2960CG"
elif prodcode == "c2960cx":
prodname = "SWITCHES/COMPACT/Catalyst-2960CX"
elif prodcode == "c3560c":
prodname = "SWITCHES/COMPACT/Catalyst-3560C"
elif prodcode == "c3560c405":
prodname = "SWITCHES/COMPACT/Catalyst-3560C"
elif prodcode == "c3560c405ex":
prodname = "SWITCHES/COMPACT/Catalyst-3560CG"
elif prodcode == "c3560cx":
prodname = "SWITCHES/COMPACT/Catalyst-3560CX"
elif prodcode == "cdb":
prodname = "SWITCHES/COMPACT/CATALYST-DIGITAL-BUILDING"
elif prodcode == "c2020":
prodname = "SWITCHES/EMBEDDED/2020"
elif prodcode == "ess3x00":
prodname = "SWITCHES/EMBEDDED/3300"
elif prodcode == "cgs2520":
prodname = "SWITCHES/GRID/CGS-2520"
elif prodcode == "grwicdes":
prodname = "SWITCHES/GRID/CGS-Module"
elif prodcode == "ie2000":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-2000"
elif prodcode == "ie2000u":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-2000U"
elif prodcode == "ies":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-3000"
elif prodcode == "ie3010":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-3010"
elif prodcode == "ie3x00":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-3x00"
elif prodcode == "ie4000":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-4000"
elif prodcode == "ie4010":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-4010"
elif prodcode == "ie5000":
prodname = "SWITCHES/INDUSTRIAL-ETHERNET/IE-5000"
elif prodcode == "s6523":
prodname = "SWITCHES/METRO/Catalyst-6500ME"
elif prodcode == "me1200":
prodname = "SWITCHES/METRO/ME-1200"
elif prodcode == "ucs_ctrlr":
prodname = "SWITCHES/METRO/ME-1200/UCS-CONTROLLLER"
elif prodcode == "me240x":
prodname = "SWITCHES/METRO/ME-2400"
elif prodcode == "me2600x":
prodname = "SWITCHES/METRO/ME-2600X"
elif prodcode == "me340x":
prodname = "SWITCHES/METRO/ME-3400"
elif prodcode == "me360x":
prodname = "SWITCHES/METRO/ME-3600"
elif prodcode == "me360x_t":
prodname = "SWITCHES/METRO/ME-3600"
elif prodcode == "me380x":
prodname = "SWITCHES/METRO/ME-3800"
elif prodcode == "c2960sm":
prodname = "SWITCHES/MODULES/Catalyst-2960-SERVICE-MODULE"
elif prodcode == "c3kx":
prodname = "SWITCHES/MODULES/Catalyst-3000-SERVICE-MODULE"
elif prodcode == "c4gwy":
prodname = "SWITCHES/CATALYST/Catalyst-4500/ACCESS-GATEWAY-MODULE"
elif prodcode == "c5atm":
prodname = "SWITCHES/CATALYST/Catalyst-5000/ATM"
elif prodcode == "c5rsfc":
prodname = "SWITCHES/CATALYST/Catalyst-5000/ROUTE-SWITCH-FEATURE-CARD"
elif prodcode == "c5rsm":
prodname = "SWITCHES/CATALYST/Catalyst-5000/ROUTE-SWITCH-MODULE"
elif prodcode == "c6atm":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/ATM"
elif prodcode == "wscmm":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/CMM"
elif prodcode == "wsidsm2":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/IDSM2"
elif prodcode == "c6msfc":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MSFC1"
elif prodcode == "c6msfc2":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MSFC2"
elif prodcode == "c6msfc2a":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MSFC2A"
elif prodcode == "c6msfc3":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MSFC3"
elif prodcode == "c6svc5fmwam":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MWAM"
elif prodcode == "c6svc6fmwam":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MWAM"
elif prodcode == "c6svcimwam":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MWAM"
elif prodcode == "svcmwam":
prodname = "SWITCHES/CATALYST/Catalyst-6500-6800/MWAM"
elif prodcode == "Nexus":
prodname = "SWITCHES/NEXUS/"
elif prodcode == "n1000v":
prodname = "SWITCHES/NEXUS/Nexus-1000v"
elif prodcode == "n3000":
prodname = "SWITCHES/NEXUS/Nexus-3000"
elif prodcode | |
import json
import os
import re
import sqlite3
import tempfile
import urllib.request
from .Base import Base
from .Lib import Defaults, SQLiteContext
class M365Digester(Base):
"""App object"""
__db = None
__db_context_handle = None
__rule_list = dict()
__wildcard_adjustments = 0
__duplicate_count = 0
__domain_subset_duplicate_count = 0
__excluded_count = 0
@property
def rule_list(self):
return self.__rule_list
def open_db(self, db_target: str) -> bool:
"""
Open sqlite database connection
"""
try:
self.__db_context_handle = db_target
self.__db = sqlite3.connect(self.__db_context_handle)
self.debug(f"Opened SQLite3 Database file {self.__db_context_handle} connection, version {sqlite3.version}")
return True
except sqlite3.Error as e:
self.error(f"Unable to open SQLite3 database file {self.__db_context_handle}, error: {e}")
raise e
def close_db(self):
"""
Close sqlite database connection
"""
self.__db.close()
def remove_db(self) -> bool:
"""
Delete temporary sqlite database file after usage
"""
if self.db_is_in_memory():
return True
if self.config.get('keep_sqlitedb', False):
self.info(f"Keeping SQLite db file: {self.__db_context_handle}.")
return True
try:
os.remove(self.__db_context_handle)
return True
except Exception as e:
self.warning(f"Unable to remove sqlite db file '{self.__db_context_handle}'. Error: {e}")
return False
def init_db(self) -> bool:
sqlitedb_table_create = self.config.get('sqlitedb_table_create', Defaults.sqlitedb_table_create)
try:
c = self.__db.cursor()
c.execute(sqlitedb_table_create)
return True
except sqlite3.Error as e:
self.error(f"Unable to create table using query '{sqlitedb_table_create}'. Error: {e}")
return False
def db_is_in_memory(self):
if self.config.get('sqlitedb_context', Defaults.sqlitedb_context) == SQLiteContext.MEMORY:
return True
return False
def db_cursor(self):
return self.__db.cursor()
def db_commit(self):
if self.db_is_in_memory():
return
else:
self.__db.commit()
def db_add_acl_to_rule_list(self, acl_address, service_area_name):
"""
Adds rules (and their sources) to the rule database.
This function does its best to not duplicate entries upon insertion, but it may not be perfect
FIXME: This function only works with DNS entries, NOT IPv4/6 ADDRESSES. It will mangle an IPv4 ADDRESS with a
wildcard in
"""
if self.config.get('wildcard_replace_enabled', Defaults.wildcard_replace_enabled):
wildcard_regex_pattern = self.config.get('wildcard_regex_pattern', Defaults.wildcard_regex_pattern)
# Use a regex for wildcard analysis. This is slow (compared to a fancy lambda) but this is readable, and we
# are not here for speed
x = re.search(wildcard_regex_pattern, acl_address)
# If the incoming acl_address contains a wildcard, replace it with a single dot - the SQUID way of handling
# 'wildcards'
if x:
self.__wildcard_adjustments += 1
old_address = acl_address
pattern = re.compile(wildcard_regex_pattern)
acl_address = pattern.sub('.', old_address)
self.debug(f"ACL address '{old_address}' contains a wildcard. Altered to '{acl_address}'")
# Check if the acl already exists in the rule set we are working on, if its there, count that we ignored a
# duplicate, otherwise add to list
sql_query = f"SELECT id, {Defaults.sqlitedb_column_address_name}, {Defaults.sqlitedb_column_service_area_name} FROM acls" \
f" WHERE {Defaults.sqlitedb_column_address_name} = ?;"
c = self.db_cursor()
c.execute(sql_query, (acl_address,))
rows = c.fetchall()
if len(rows) > 0:
# Looks like there was a duplicate..
existing_acl_service_area_name = rows[0][2]
self.debug(
f"Ignoring duplicate entry '{acl_address}' which would have been added to list '{service_area_name}'."
f"Existing '{acl_address}' is in '{existing_acl_service_area_name}'")
self.__duplicate_count += 1
return -1
c.close()
sql_insert = f"INSERT OR IGNORE INTO acls(" \
f"{Defaults.sqlitedb_column_address_name}, " \
f"{Defaults.sqlitedb_column_service_area_name}) " \
f"VALUES (?,?);"
c = self.db_cursor()
c.execute(sql_insert, (acl_address, service_area_name))
self.db_commit()
return c.lastrowid
def db_remove_acl_from_all_lists(self, acl_address):
"""
Removes a rule from the rule database.
"""
# Check if the rule is in there...
sql_query = f"SELECT id, {Defaults.sqlitedb_column_address_name}, {Defaults.sqlitedb_column_service_area_name} FROM acls" \
f" WHERE {Defaults.sqlitedb_column_address_name} = ?;"
c = self.db_cursor()
c.execute(sql_query, (acl_address,))
rows = c.fetchall()
for row in rows:
# Looks like it exists
existing_acl_id = row[0]
existing_acl_service_area_name = row[2]
self.debug(
f"Found address entry '{acl_address}' in service area '{existing_acl_service_area_name}'. Excluding..")
sql_delete = "DELETE FROM acls WHERE id = ?;"
c2 = self.db_cursor()
c2.execute(sql_delete, (existing_acl_id,))
self.__excluded_count += 1
c2.close()
self.db_commit()
c.close()
return
def db_analyse_api_rule_lists(self, endpoint_set):
"""
Analyse object 'endpoint_set' returned from M365 API, and create/update/extend dict-of-list 'rule_list'
Filter results for Allow, Optimize, Default endpoints, and transform these into tuples with port and category
ServiceArea is used to generate rule_set dictionary key, and if global collapse_acl_set is True,
reduce number of dictionary keys to the minimum, effectively de-duplicating as much as possible at the expense
of destination granularity
"""
collapse_acl_sets = self.config.get('collapse_acl_sets', Defaults.collapse_acl_sets)
categories_filter_include = self.config.get('categories_filter_include', Defaults.categories_filter_include)
if self.config.get('address_filter_domains_enabled', Defaults.address_filter_domains_enabled):
self.info("Analysing endpoints for domain names...")
for endpointSet in endpoint_set:
if endpointSet['category'] in categories_filter_include:
required = endpointSet['required'] if 'required' in endpointSet else False
if not required:
continue
urls = endpointSet['urls'] if 'urls' in endpointSet else []
service_area = str(endpointSet['serviceArea']) if 'serviceArea' in endpointSet else ''
if collapse_acl_sets:
service_area_name = f"M365-API-Source-domain"
else:
service_area_name = f"M365-API-Source-{service_area}-domain"
for url in urls:
self.db_add_acl_to_rule_list(str(url), service_area_name)
if self.config.get('address_filter_ipv4_enabled', Defaults.address_filter_ipv4_enabled) \
or self.config.get('address_filter_ipv6_enabled', Defaults.address_filter_ipv4_enabled):
self.info("Analysing endpoints for IPs...")
for endpointSet in endpoint_set:
if endpointSet['category'] in categories_filter_include:
required = endpointSet['required'] if 'required' in endpointSet else False
if not required:
continue
ips = endpointSet['ips'] if 'ips' in endpointSet else []
# IPv4 strings have dots while IPv6 strings have colons
ip4s = [ip for ip in ips if '.' in ip]
ip6s = [ip for ip in ips if ':' in ip]
service_area = str(endpointSet['serviceArea']) if 'serviceArea' in endpointSet else ''
if collapse_acl_sets:
service_area_name = f"M365-API-Source-ip"
else:
service_area_name = f"M365-API-Source-{service_area}-ip"
for ip in ip4s:
self.db_add_acl_to_rule_list(str(ip), service_area_name)
for ip in ip6s:
self.db_add_acl_to_rule_list(str(ip), service_area_name)
def db_get_count_acls_in_rule_list(self) -> int:
"""
Get the total number of ACL's in the 'rule_list' db
"""
c = self.db_cursor()
c.execute("SELECT COUNT(*) FROM acls")
result = c.fetchone()[0]
return result
def db_analyse_rule_lists_for_subdomain_errors(self):
"""
Analyse rule database to remove subdomain overlaps
"""
self.info("Analysing rules for subdomain overlaps")
c = self.db_cursor()
sql_query = f"SELECT id, " \
f"{Defaults.sqlitedb_column_address_name}, " \
f"{Defaults.sqlitedb_column_service_area_name} " \
f"FROM acls ORDER BY {Defaults.sqlitedb_column_service_area_name}"
c.execute(sql_query)
rows = c.fetchall()
for row in rows:
search_acl = ''
# For every single ACL, check one isn't a subset of the other!
acl_outer = str(row[1])
if acl_outer.startswith('.'):
search_acl = acl_outer
sql_query2 = f"SELECT id, " \
f"{Defaults.sqlitedb_column_address_name}, " \
f"{Defaults.sqlitedb_column_service_area_name} " \
f"FROM acls WHERE {Defaults.sqlitedb_column_address_name} LIKE '%' || ?"
else:
search_acl = "." + acl_outer
sql_query2 = f"SELECT id, " \
f"{Defaults.sqlitedb_column_address_name}, " \
f"{Defaults.sqlitedb_column_service_area_name} " \
f"FROM acls WHERE {Defaults.sqlitedb_column_address_name}=?;"
c2 = self.db_cursor()
c2.execute(sql_query2, (search_acl,)) # comma makes it a tuple... don't ask
rows2 = c2.fetchall()
for row2 in rows2:
acl_inner = str(row2[1])
if acl_inner != acl_outer:
acl_inner_id = str(row2[0])
sql_query3 = "DELETE FROM acls " \
"WHERE id=?"
c3 = self.db_cursor()
c3.execute(sql_query3, (acl_inner_id,))
self.db_commit()
c3.close()
self.__domain_subset_duplicate_count += 1
self.debug(f"Removed subdomain overlap outer: '{acl_outer}', inner: '{acl_inner}'")
c2.close()
c.close()
def db_get_unique_rule_sources(self):
"""
Get unique source names from rule database
"""
sql_query = f"SELECT DISTINCT {Defaults.sqlitedb_column_service_area_name} FROM acls;"
c = self.db_cursor()
c.execute(sql_query)
rows = c.fetchall()
c.close()
return rows
def db_get_rule_list(self):
"""
Get complete rule set from rule database
"""
local_rule_list = dict()
sources_list = self.db_get_unique_rule_sources()
for source in sources_list:
if isinstance(source, tuple):
source = str(source[0])
sql_query = f"SELECT {Defaults.sqlitedb_column_address_name} " \
f"FROM acls " \
f"WHERE {Defaults.sqlitedb_column_service_area_name} = ? " \
f"ORDER BY address ASC;"
c = self.db_cursor()
c.execute(sql_query, (source,))
addresses = c.fetchall()
c.close()
local_rule_list[source] = list()
for address in addresses:
address = str(address[0])
local_rule_list[source].append(address)
return local_rule_list
def m365_web_service_get_rule_set(self, method_name, global_instance_name, client_request_id) -> dict:
"""
Communicate with MS 365 Web Service to obtain json object with endpoint information
Arguments 'clientRequestId' is a GUID. All zeros returns most recent changes only, hardcoded
value of 'b10c5ed1-bad1-445f-b386-b919946339a7' should return complete set since 2018..
Portions from https://geektechstuff.com/2018/07/06/microsoft-office-365-endpoints-v1-python/
Portions from https://support.office.com/en-us/article/managing-office-365-endpoints-99cab9d4-ef59-4207-9f2b-3728eb46bf9a?ui=en-US&rs=en-US&ad=US#ID0EACAAA=4._Web_service
"""
request_url_base: str = self.config.get('m365_web_service_url', Defaults.m365_web_service_url)
self.info(f"Contacting M365 web service for ruleset: '{request_url_base}' "
f"using clientRequestId: '{client_request_id}'")
request_path: str = request_url_base + \
'/' + method_name + \
'/' + global_instance_name + \
'?clientRequestId=' + \
client_request_id
self.debug(f"Full M365 request path: '{request_path}'")
request = urllib.request.Request(request_path)
with urllib.request.urlopen(request) as response:
return json.loads(response.read().decode())
def m365_web_service_get_version_data(self, client_request_id: str,
global_instance_name: str = Defaults.m365_service_instance_name):
request_url_base: str = self.config.get('m365_web_service_url', Defaults.m365_web_service_url)
self.info(f"Contacting M365 web service for version data: '{request_url_base}'")
request_path: str = request_url_base + '/version'
if global_instance_name:
request_path += '/' + global_instance_name
request_path += '?clientRequestId=' + client_request_id
self.debug(f"Full M365 request path: '{request_path}'")
request = urllib.request.Request(request_path)
with urllib.request.urlopen(request) as response:
return json.loads(response.read().decode())
def main(self) -> int:
"""Main function"""
if self.config.get('sqlitedb_context', Defaults.sqlitedb_context):
self.config.setdefault('sqlitedb_file_path',
self.config.get('sqlitedb_context_memory', Defaults.sqlitedb_context_memory))
else:
self.config.setdefault('sqlitedb_file_path',
self.config.get('sqlitedb_context_file', Defaults.sqlitedb_context_file))
sqlitedb_context_handle: str = self.config.get('sqlitedb_file_path', None)
# If no sqlite db output specified, generate a temp file to use?
# FIXME:
if not sqlitedb_context_handle:
db_path = tempfile._get_default_tempdir()
db_name = next(tempfile._get_candidate_names()) + ".db"
sqlitedb_context_handle = os.path.join(db_path, db_name)
try:
self.open_db(sqlitedb_context_handle)
except Exception as e:
return self.error_quit(f"Unable to open sqlite | |
<reponame>bopopescu/TSAR-AI<gh_stars>0
import os
from pprint import pprint
import numpy as np
import click
import geopandas as gpd
import logging
import spectral.io.envi as s_envi
import yaml
from ocli.ai.Envi import Envi, header_transform_map_for_zone
from pathlib import Path
from spectral.io.spyfile import FileNotFoundError
from ocli.ai.recipe import Recipe
from ocli.ai.util import Filenames
from ocli.cli import output
from ocli.cli.ai_options import option_locate_recipe, option_list, option_slice, resolve_recipe, argument_zone, \
option_save, option_tnorm, option_clip, option_hist, option_data_path, option_columns, option_bands, option_gauss, \
option_tensor_vis, option_stack_vis, COMMON_MATH_TENSOR_HELP, COMMON_MATH_STACK_HELP
from ocli.cli.output import OCLIException
from ocli.cli.state import Repo, Task, pass_task, pass_repo
from ocli.preview import preview_stack, preview_tnsr, preview_cluster, create_stack_rgb, _vis_rgb, \
create_tensor_rgb, read_tensor
from ocli.util.docstring_parameter import docstring_parameter
log = logging.getLogger()
def get_tensor_df(tnsr_hdr_fname):
""" get stack files as geopandas GeoDataFrame
:param dir_path: path to stack folder
:return: tuple(full_shape,GeoDataFrame)
"""
e = s_envi.open(tnsr_hdr_fname)
full_shape = e.shape[:2]
bn = e.metadata['band names']
df = gpd.GeoDataFrame([[b, f'{full_shape[0]}x{full_shape[1]}'] for b in bn],
columns=['name', 'resolution'])
return full_shape, df
def _show_tnsr_list(tnsr_hdr_fname, df=None):
output.comment(f'tensor HDR: {tnsr_hdr_fname}')
if df is None:
try:
full_shape, df = get_tensor_df(tnsr_hdr_fname)
except FileNotFoundError as e:
raise OCLIException(f"{e}")
output.table(df, showindex=True, headers=['band', 'name', 'resolution'])
return
def get_stack_df(dir_path):
""" get stack files as geopandas GeoDataFrame
:param dir_path: path to stack folder
:return: tuple(full_shape,GeoDataFrame)
"""
df = gpd.GeoDataFrame([[f[:-4], None, None, None, None] for f in os.listdir(dir_path) if f.endswith('.hdr')],
columns=['filename', 'geometry', 'resolution', 'interleave', 'path'])
full_shape = []
for i, row in df.iterrows():
_f = os.path.join(dir_path, row['filename'])
img = s_envi.open(_f + '.hdr')
df.at[i, 'resolution'] = f"{img.shape[0]}x{img.shape[1]}"
df.at[i, 'interleave'] = img.interleave
df.at[i, 'path'] = _f
# log.info(img)
full_shape = img.shape
return full_shape, df
# ####################################### PREVIEW #######################################################
# todo move to other package, make checks for plot libs
# todo draw ROI on stack
# todo draw ROI on tensor
# todo draw ROI on cluster
def _show_plt(_plt, save=None):
if save:
_plt.savefig(save)
_plt.close()
output.success(f'image saved to file "{Path(save).absolute()}"')
else:
_plt.show()
def _sqave_envy_tnsr(tnsr, export, band_names, data_path, georef, slice_range, title):
envi = Envi({
'DATADIR': data_path
}, cos=None)
_, envi_header = envi.read_header(georef + '.hdr', is_fullpath=True)
if slice_range[0] != -1:
envi_header['map info'] = header_transform_map_for_zone(envi_header, zoneY=int(slice_range[0]),
zoneX=int(slice_range[1]))
envi_header['data type'] = 1 # BYTE
envi_header['description'] = '{' + title + '}'
envi_header['lines'] = tnsr.shape[0]
envi_header['samples'] = tnsr.shape[1]
envi_header['bands'] = tnsr.shape[2]
envi_header['band names'] = "{" + ",".join(band_names) + "}"
envi.save_dict_to_hdr(export + '.hdr', envi_header)
tnsr.tofile(export + '.img')
def _save_envi_rgb(r, g, b, export, data_path, georef, slice_range, title):
envi = Envi({
'DATADIR': data_path
}, cos=None)
_, envi_header = envi.read_header(georef + '.hdr', is_fullpath=True)
# _data = (np.clip(np.stack((r, g, b), axis=-1) * 255.5, 0, 255)).astype(np.uint8) # type: np.ndarray
_data = np.stack((r, g, b) , axis=-1) # type: np.ndarray
if slice_range[0] != -1:
envi_header['map info'] = header_transform_map_for_zone(envi_header, zoneY=int(slice_range[0]),
zoneX=int(slice_range[1]))
# _data = (np.stack((r, g, b), axis=-1)) # type: np.ndarray
envi_header['data type'] = 1 # BYTE
envi_header['description'] = '{' + title + '}'
envi_header['lines'] = _data.shape[0]
envi_header['samples'] = _data.shape[1]
envi_header['bands'] = _data.shape[2]
envi_header['band names'] = "{R, G, B}"
envi.save_dict_to_hdr(export + '.hdr', envi_header)
_data.tofile(export + '.img')
# img_as_ubyte(_data).tofile(export + '.img')
def _resolve_tensor_filenames(repo, task, zone, roi_id, data_path, recipe_path, tnorm) -> Filenames:
if not data_path:
try:
_recipe = recipe_path if recipe_path else resolve_recipe(repo, task, roi_id)
recipe = Recipe(_recipe)
output.comment(f'Using recipe file "{_recipe}"')
except (RuntimeError, AssertionError, click.UsageError) as e:
output.comment(f'Using tensor from ai_results')
try:
data_path = task.get_ai_results_path(full=True)
if not os.path.isdir(data_path):
raise AssertionError(f'Directory "{data_path}" is not exists ')
recipe = {'OUTDIR': data_path}
except AssertionError as e:
raise click.UsageError(f'Could not get ai_results: {e}')
else:
recipe = {'OUTDIR': data_path}
if tnorm and 'PREDICTOR_DIR' not in recipe:
try:
_filenames = Filenames(zone, recipe)
with open(_filenames.process_info, 'r') as f:
_prcinfo = yaml.load(f, Loader=yaml.FullLoader)
recipe['PREDICTOR_DIR'] = _prcinfo['process']['PREDICTOR_DIR']
except Exception as e:
raise OCLIException(f"Could not resolve tnorm file: {e}")
return Filenames(zone, recipe)
def preview_math_options(f):
return f
def options_preview_stack(f):
f = option_save(f)
f = option_hist(f)
f = option_columns(f)
f = option_bands(f)
f = option_clip(f)
f = option_slice(f)
f = option_list(help_text='list available products')(f)
return f
def options_preview_stack_math(f):
f = option_save(f)
f = option_slice(f)
f = option_data_path(f)
f = option_list(help_text='list available products')(f)
f = click.option('-b1', '--band1', type=click.INT, help='b1 band file index', default=None)(f)
f = click.option('-b2', '--band2', type=click.INT, help='b2 band file index', default=None)(f)
f = click.option('-b3', '--band3', type=click.INT, help='b3 band file index', default=None)(f)
f = option_hist(f)
f = option_stack_vis(f)
return f
def options_preview_tnsr(f):
f = argument_zone(f)
f = option_data_path(f)
f = option_save(f)
f = option_list(help_text='list available bands')(f)
f = option_columns(f)
f = option_tnorm(f)
f = option_hist(f)
f = option_slice(f)
f = option_bands(f)
return f
def options_preview_tensor_math(f):
f = argument_zone(f)
f = option_save(f)
f = option_data_path(f)
f = option_list(help_text='list available bands')(f)
f = option_slice(f)
f = click.option('-b1', '--band1', type=click.INT, help='b1 band file index', default=None)(f)
f = click.option('-b2', '--band2', type=click.INT, help='b2 band file index', default=None)(f)
f = click.option('-b3', '--band3', type=click.INT, help='b3 band file index', default=None)(f)
f = option_gauss(f)
f = option_tnorm(f)
f = option_hist(f)
f = option_tensor_vis(f)
return f
def options_preview_cluster(f):
f = option_save(f)
f = option_hist(f)
f = option_columns(f)
f = option_bands(f)
f = option_list(help_text='list available products')(f)
f = option_slice(f)
f = argument_zone(f)
return f
@click.group('preview')
def ai_preview():
"""Preview """
pass
@ai_preview.command('stack')
@options_preview_stack
@option_locate_recipe
@pass_task
@pass_repo
def ai_preview_stack(repo: Repo, task: Task, roi_id, recipe_path, slice_range,
show_list,
# rgb,
band, columns, clip, hist, save, export, ylog):
""" Preview assembled tensor band
** use --clip <minl> <max> to apply np.log10(np.clip(.., 10**min, 10**max)) to stack values
\b
* Windows WSL: follow https://www.scivision.dev/pyqtmatplotlib-in-windows-subsystem-for-linux/
"""
try:
_recipe = recipe_path if recipe_path else resolve_recipe(repo, task, roi_id)
recipe = Recipe(_recipe)
_dir = recipe.get('DATADIR')
except (RuntimeError, AssertionError, click.UsageError) as e:
output.comment(f"Could not resolve recipe {e}, fall-back to task")
try:
_dir = task.get_stack_path('snap_path')
except AssertionError as e:
raise click.UsageError(f'Could not get stack path: {e}')
except Exception as e:
log.exception("Could not resolve Stack results")
raise click.UsageError('Could not resolve Stack results')
output.comment(f"Stack dir: {_dir}\n\n")
full_shape, df = get_stack_df(_dir)
if show_list:
output.table(df[['filename', 'resolution', 'path']], showindex=True,
headers=['band', 'name', 'resolution', 'path'])
else:
try:
# if rgb:
# if len(rgb) != 3:
# raise AssertionError('rgb', '--rgb should contain exactly 3 digits without spaces')
# band = (int(rgb[0]), int(rgb[1]), int(rgb[2]))
if band[0] == -1:
band = list(range(0, len(df)))
else:
band = list(band)
_ds = df.iloc[band] # type: gpd.GeoDataFrame
output.table(_ds, showindex=True)
_plt = preview_stack(_ds, _dir,
full_shape=full_shape,
slice_region=slice_range,
band=band,
clip=clip,
columns=columns,
hist=hist,
ylog=ylog
)
_show_plt(_plt, save=save)
except AssertionError as e:
log.exception(e)
raise click.UsageError(str(e))
@ai_preview.command('cluster')
@options_preview_cluster
@option_locate_recipe
@pass_task
@pass_repo
def ai_preview_cluster(repo: Repo, task: Task, roi_id, recipe_path, slice_range, show_list, band, columns,
# threshold,
zone,
hist, ylog,
save, export,
rgb=False
):
""" Preview assembled tensor band
\b
* Windows WSL: follow https://www.scivision.dev/pyqtmatplotlib-in-windows-subsystem-for-linux/
"""
try:
_recipe = recipe_path if recipe_path else resolve_recipe(repo, task, roi_id)
recipe = Recipe(_recipe)
filenames = Filenames(zone, recipe)
pred8c_img = filenames.pred8c_img
pred8c_hdr = filenames.pred8c_hdr
if not os.path.isfile(pred8c_img):
raise AssertionError(f"IMG file '{pred8c_img}' not fond")
if not os.path.isfile(pred8c_hdr):
raise AssertionError(f"HDR file '{pred8c_hdr}' not fond")
pred8c_hdr = s_envi.open(filenames.pred8c_hdr)
except (AssertionError) as e:
raise click.UsageError(f"Could not visualize: {e}")
if show_list:
output.comment(f'Cluster HDR: {filenames.pred8c_hdr}')
x, y = pred8c_hdr.shape[:2]
bn = pred8c_hdr.metadata['band names']
bn = [[b, f'{x}x{y}'] for b in bn]
output.table(bn, showindex=True, headers=['band', 'name', 'resolution'])
return
# if rgb:
# if len(rgb) != 3:
# raise click.BadOptionUsage('rgb', '--rgb should contain exactly 3 digits without spaces')
# band = (int(rgb[0]), int(rgb[1]), int(rgb[2]))
if band[0] == -1:
band = list(range(0, pred8c_hdr.shape[2]))
preview_cluster(filenames.pred8c_hdr, filenames.pred8c_img,
band=band,
slice_region=slice_range,
columns=columns,
rgb=rgb
)
@ai_preview.command('stack-math')
@options_preview_stack_math
@docstring_parameter(common=COMMON_MATH_STACK_HELP)
@pass_task
@pass_repo
def ai_preview_stack_math(repo: Repo, task: Task, roi_id, recipe_path, slice_range, show_list,
band1, band2, band3,
vis_mode, data_path, save, export, hist, ylog):
""" Band math for stack
{common}
"""
if not data_path:
try:
_recipe = recipe_path if recipe_path else resolve_recipe(repo, task, roi_id)
recipe = Recipe(_recipe)
data_path = recipe.get('DATADIR')
output.comment(f'Using recipe file "{recipe_path}"')
except (RuntimeError, AssertionError, click.UsageError) as e:
output.comment(f'Using stack from task stack_results')
try:
data_path = task.get_stack_path('snap_path')
if not os.path.isdir(data_path):
raise AssertionError(f'Directory "{data_path}" is not exists ')
except AssertionError as e:
raise click.UsageError(f'Could not get stack_results: {e}')
output.comment(f"Stack dir: {data_path}\n\n")
full_shape, df = get_stack_df(data_path)
if show_list:
output.table(df, showindex=True)
else:
title, (r, g, b) = create_stack_rgb(band1, band2, band3,
df=df,
vis_mode=vis_mode,
slice_range=slice_range,
)
if export:
georef = df.iloc[band1].path
_save_envi_rgb(r, g, b, export=export,
georef=georef, data_path=data_path, slice_range=slice_range,
title=title
)
else:
_plt = _vis_rgb(r, g, b, title, hist, ylog)
_show_plt(_plt, save)
@ai_preview.command('tensor')
@options_preview_tnsr
@option_locate_recipe
@pass_task
@pass_repo
def ai_preview_tnsr(repo: Repo, task: Task, roi_id, recipe_path, show_list
, zone, slice_range
, band,
# rgb,
columns, hist, tnorm, save, ylog, export, data_path):
""" Preview assembled tensor band
\b
* Windows WSL: follow https://www.scivision.dev/pyqtmatplotlib-in-windows-subsystem-for-linux/ instructions
"""
filenames = _resolve_tensor_filenames(
repo, task,
zone=zone,
roi_id=roi_id,
data_path=data_path,
recipe_path=recipe_path,
tnorm=tnorm
)
output.comment(f"Data dir: {data_path}")
full_shape, df = get_tensor_df(filenames.tnsr_hdr)
if show_list:
_show_tnsr_list(filenames.tnsr_hdr, df=df)
return
try:
e = s_envi.open(filenames.tnsr_hdr)
except | |
<filename>ldt/dicts/semantics/lex_dictionary.py<gh_stars>10-100
# -*- coding: utf-8 -*-
""" Lexicographic Dictionary classes
This module implements the base Lexicographic dictionary class that is
inherited by classes for resources from which semantic relations can be
obtained. There is also a separate DictionaryWithDefinitions class for
resources which also provide lists of definitions and examples per word sense.
Basic functionality required in any subclass:
* retrieve the list of word with the specified relation;
* retrieve a dictionary with specified relations as values and lists of
related words as values
Todo:
* creation of default config file upon installation
* the right error path in NLTK tokenizer
"""
# import abc
from abc import ABCMeta, abstractmethod
from nltk.tokenize import word_tokenize
from ldt.dicts.dictionary import Dictionary
from ldt.helpers.exceptions import DictError
from ldt.helpers.resources import load_stopwords
from ldt.helpers.resources import lookup_language_by_code
from ldt.helpers.formatting import get_spacing_variants
from ldt.helpers.formatting import remove_text_inside_brackets
class LexicographicDictionary(Dictionary, metaclass=ABCMeta):
"""A super-class for resources with relations functionality
"""
def __init__(self, **kw):
""" Initializing the base class.
Args:
lowercasing (bool): *True* if all data should be lowercased
"""
super(LexicographicDictionary, self).__init__()
self.main_relations = ("synonyms", "antonyms", "hyponyms",
"hypernyms", "meronyms")
self.supported_relations = self.main_relations
def check_relation(self, relation):
"""Helper method for :meth:`get_relation`. Checks if relations are
supported by a given resource.
Args:
relations (str): the particular relation to check
Returns:
(str): the verified relation
Raises:
DictError: the requested relation are not supported
"""
if relation not in self.supported_relations:
raise DictError("Unknown relation. The supported relations are: " +
", ".join(self.supported_relations))
return relation
def check_relations(self, relations, reduce=False):
"""Helper method for :meth:`get_relations`. Checks if relations are
supported by a given resource.
Args:
reduce (bool): if *True*, and unknown relation is encountered,
the requested list is reduced to the available relations.
Otherwise DictError arises.
relations (tuple or string):
* the particular relations to check,
* "main" for a predefined list of main relations (synonyms,
antonyms, meronyms, hyponyms, hypernyms)
* "all" for all supported relations
Returns:
(tuple): the verified relations
Raises:
DictError: the requested relations are not supported
"""
if isinstance(relations, tuple):
if not reduce:
for i in relations:
if not i in self.supported_relations:
raise DictError("Unknown relation. The supported "
"relations are: " +
", ".join(self.supported_relations))
else:
filtered_rels = [i for i in relations if i in
self.supported_relations]
relations = tuple(filtered_rels)
elif isinstance(relations, str):
if relations == "main":
relations = self.main_relations
elif relations == "all":
relations = self.supported_relations
elif "nyms" in relations:
if reduce:
try:
relations = (self.check_relation(relations),)
except DictError:
return None
else:
relations = (relations,)
return relations
def get_relation(self, word, relation):
"""Wrapper for :meth:`get_relations` for one-relation use.
Some resources like WordNet have detailed interfaces for different
relations, while others (including BabelNet and Wiktionary) have
a single interface for them all. This method can be simply inherited
for the second type of resources, and overridden in the first type case.
Args:
word (str): the word to be looked up
relation (str): the relation to look up
Returns:
(list): the list of words related to the target word with the
specified relation
"""
relation = self.check_relation(relation)
res = self.get_relations(word, (relation))
if res:
if relation in res:
return res[relation]
else:
return []
@abstractmethod
def get_relations(self, word, relations):
"""Stub for the compulsory method for all subclasses that
returns the specified list of relations for the given word.
Args:
word (str): the word to be looked up
relations (tuple, string): the relations to look up
Returns:
(dict): dictionary with relations as keys and lists of words as
values
"""
pass
def post_process(self, wordlist):
"""Helper for processing the wordlists from different resources
according to general config presets.
At the moment, the results can be automatically lowercased (
``self.lowercasing = True``).
Args:
wordlist: the list of words to process
Returns:
(list): post-processed list of words
Todo:
* partial matches
* spacing for underscored words?
"""
if self.lowercasing:
wordlist = [w.lower() for w in wordlist]
# if self.split_mwu:
# newres = []
# for mwu in wordlist:
# newres += get_spacing_variants(mwu)
# wordlist = newres
wordlist = list(set(wordlist))
return wordlist
class DictionaryWithDefinitions(LexicographicDictionary, metaclass=ABCMeta):
"""A super-class for resources with definition functionality
"""
def __init__(self):
""" Initializing the base class.
Args:
lowercasing (bool): *True* if all data should be lowercased
"""
super(DictionaryWithDefinitions, self).__init__()
def get_relations(self, word, relations):
"""Stub for the compulsory method for all subclasses that
returns the specified list of relations for the given word.
Args:
word (str): the word to be looked up
relations (tuple, string): the relations to look up
Returns:
(dict): dictionary with relations as keys and lists of words as
values
"""
raise NotImplementedError()
@abstractmethod
def get_definitions(self, word, remove_notes=True):
"""Stub for the compulsory method for all subclasses of
DictionaryWithDefinitions that returns a sense inventory for the
given word.
Args:
word (str): the word to be looked up
remove_notes (bool): if *True*, attempts to remove the
lexicographic notes such as *(obsolete)* from the definition
Returns:
(dict): dictionary with sense numbers as keys and
subdictionaries with "def" and "ex" as values
"""
raise NotImplementedError()
def get_words_in_definitions(self, word, stopwords=False,
remove_notes=True, examples="add"):
"""
A method returning a list of words found in all definitions
and/or examples of the target word
Args:
word (str or dict): word to be queried for defintions, or a sense
inventory dictionary returned by the :meth:`get_definitions`
in the dictionaries that support it (WordNet, Wiktionary).
remove_notes (bool): if *True*, attempts to remove the lexicographic
notes such as *(obsolete)* from the definition
stopwords (bool): if *True*, the words in NLTK stopword lists for
the given language (if it exists) are filtered out
examples (str): Possible values:
* **add**: words in both examples and definitions are
collected;
* **only**: the words in definitions are ignored;
* **ignore**: only words in definitions are returned.
Returns:
(list): a list of words in definitions of the target word,
with or without examples and/or stopwords
"""
if isinstance(word, str):
defs = self.get_definitions(word, remove_notes)
elif isinstance(word, dict) and 1 in word.keys():
defs = word
text = ""
for i in defs.keys():
if "def" in defs[i].keys() and examples != "only":
text += " "+defs[i]["def"]
if "ex" in defs[i].keys():
if examples == "add" or examples == "only":
text += " " + " ".join(defs[i]["ex"])
text = text.replace(" ", " ")
if remove_notes:
text = remove_text_inside_brackets(text)
# words = text.split()
if len(self.language) == 2:
nltk_language = lookup_language_by_code(self.language).lower()
else:
nltk_language = self.language.lower()
# try:
words = word_tokenize(text, language=nltk_language)
# except LookupError("No NLTK tokenizer for "+nltk_language):
# problem: TypeError: catching classes that do not inherit from BaseException is not allowed
if isinstance(words, str):
words = words.split()
words = list(set(words))
if stopwords:
stopwordlist = set(load_stopwords(nltk_language))
words = [w for w in words if not w in stopwordlist]
#todo add cleanup individual words
return words
# poses
# lemmatization
# retrieving word forms
#
#
# def get_words_in_definitions(word, stopwords=False, lowercasing=
# lowercasing,
# remove_notes=True, examples = "add"):
# '''
#
# A function returning a list of words found in all WN definitions and/or
# examples of the target word
#
# Args:
#
# word (str): the word to look up
# lowercasing (Bool): if not set, the global config variable is used.
# True (default) lowercases all vocab.
# remove_notes (bool): if True, attempts to remove the
# lexicographic notes such as (obsolete) from the definition
# stopwords (bool): if True, the words in NLTK stopword lists for
# English are filtered out
# examples (str): if "add", words in both examples and definitions
# are collected. if "only", the words in definitions are ignored,
# if "ignore", only words in definitions are returned.
#
# Returns:
# (list): a list of words in WordNet definitions in the target word,
# with or without examples and/or stopwords
#
# '''
#
# defs = get_definitions(word, remove_notes=remove_notes, lowercasing=lowercasing)
# #
# # text = ""
# #
# # for i in defs.keys():
# # if "def" in defs[i].keys() and examples != "only":
# # text += " "+defs[i]["def"]
# # if "ex" in defs[i].keys():
# # if examples == "add" or examples == "only":
# # text += " " + defs[i]["ex"]
# #
# # text = text.replace(" ", " ")
# # words = text.split()
# # words = list(set(words))
# #
# # if stopwords:
# # stopWords = ldt.resources.load_stopwords(language)
# # words = [w for w in words if not w in stopWords]
# | |
import logging
import numpy as np
from astropy import units as u
from astropy.io import fits
from astropy import wcs
from astropy import coordinates
import katbeam
_cosine_taper = katbeam.jimbeam._cosine_taper
# Mapping from 'BANDCODE' to katbeam model name
BAND_MAP = {'L': 'MKAT-AA-L-JIM-2020',
'UHF': 'MKAT-AA-UHF-JIM-2020',
'S': 'MKAT-AA-S-JIM-2020'}
def _circular_pattern(x, y, fwhm_x, fwhm_y):
"""Make the beam circular.
Parameters
----------
x, y : arrays of float of the same shape
Coordinates where beam is sampled, in degrees
"""
theta_b = np.sqrt(fwhm_x * fwhm_y)
r = np.sqrt(x**2 + y**2)
rr = r/theta_b
return _cosine_taper(rr)
def read_fits(path):
"""Read in the FITS file.
Parameters
----------
path : str
FITS file
Returns
-------
output_file : astropy.io.fits.hdu.image.PrimaryHDU
First element of the HDU list
"""
fl = fits.open(path)
images = fl[0]
return images
def get_position(path):
"""Determine the sky coordinate of the pointing centre.
This implementation assumes that the pointing centre is the
same as the crval.
Parameters
----------
path : str
FITS file
Returns
-------
position : astropy.coordinations.SkyCoord
Sky coordinate of the phase centre
image_wcs : astropy.wcs.wcs.WCS
WCS keywords in the primary HDU
"""
# Parse the WCS keywords in the primary HDU
image_wcs = wcs.WCS(path)
# Get pointing centre of the observation
phase_centre_ra = image_wcs.celestial.wcs.crval[0]
phase_centre_dec = image_wcs.celestial.wcs.crval[1]
# Convert to astropy.coordinates.SkyCoord object
phase_center = coordinates.SkyCoord(phase_centre_ra, phase_centre_dec, unit=(u.deg, u.deg))
return phase_center, image_wcs
def radial_offset(phase_center, image_wcs):
"""Compute radial offset of pixels from the phase centre in degrees.
Parameters
----------
phase_center : astropy.coordinations.SkyCoord
phase center position
image_wcs : astropy.wcs.wcs.WCS
WCS keywords in the primary HDU
"""
# Get pixel coordinates
pixcrd = np.indices((image_wcs.array_shape[2], image_wcs.array_shape[3]))
row = np.ravel(pixcrd[0])
col = np.ravel(pixcrd[1])
# Convert pixel coordinates to world coordinates
p2w = image_wcs.pixel_to_world(col, row, 0, 0)[0]
# Compute a separation vector between phase centre and source positions in degrees.
separation_deg = p2w.separation(phase_center).deg
return separation_deg
def central_freq(path):
"""Determine central frequency of each frequency plane.
Parameters
----------
path : str
FITS file
Returns
-------
output : numpy array
An array of central frequencie in MHz of each frequency plane.
"""
images = read_fits(path)
c_freq_plane = []
# NSPEC - number of frequency planes.
for i in range(1, images.header['NSPEC']+1):
# FREQ00X is the central frequency for each plane X.
c_freq_plane.append(images.header['FREQ{0:04}'.format(i)])
return np.array(c_freq_plane)
def check_band_type(path):
"""Check band type using information from the FITS header and return appropriate
katbeam model name.
Parameters
----------
path : str
FITS file
Returns
-------
output_file : str
katbeam model name
"""
raw_image = read_fits(path)
band = raw_image.header.get('BANDCODE')
if band is None:
logging.warning('BANDCODE not found in the FITS header. Therefore, frequency ranges'
' are used to determine the band.')
freqs = central_freq(path)
start_freq = freqs[0]/1e6
end_freq = freqs[-1]/1e6
if start_freq >= 856 and end_freq <= 1712: # L-band
band = 'L'
elif start_freq >= 544 and end_freq <= 1087: # UHF-band
band = 'UHF'
elif start_freq >= 1750 and end_freq <= 3500: # S-band
band = 'S'
# If BANDCODE and frequency ranges fails, the L-band model is returned by default.
else:
logging.warning('Frequency ranges do not match. Defalting to L-band frequency range.')
band = 'L'
model = BAND_MAP.get(band)
logging.warning('The {} katbeam model for the {}-band is used.'.format(model, band))
return model
def cosine_power_pattern(x, y, path, c_freq):
"""Compute Power patterns for a given frequency.
This uses the katbeam module.
https://github.com/ska-sa/katbeam.git
Parameters
----------
x, y : arrays of float of the same shape
Coordinates where beam is sampled, in degrees
path : str
FITS file
c_freq : numpy array
An array of central frequencies for each frequency plane
"""
# check the band type
band_type = check_band_type(path)
# return beam model for the band type
circbeam = CircularBeam(band_type)
flux_density = []
for nu in c_freq:
nu = nu/1.e6 # GHz to MHz
a_b = circbeam.I(x, y, nu)
flux_density.append(a_b.ravel())
return flux_density
def beam_pattern(path):
"""Get beam pattern from katbeam module.
Parameters
----------
path : str
FITS file
"""
# Get the central frequency of the image header given.
c_freq = central_freq(path)
phase_center, image_wcs = get_position(path)
# Get radial separation between sources and the phase centre as well as make y=0
# since we are circularising the beam.
x = radial_offset(phase_center, image_wcs).reshape(image_wcs.array_shape[2],
image_wcs.array_shape[3])
y = np.zeros((image_wcs.array_shape[2], image_wcs.array_shape[3]))
beam_list = cosine_power_pattern(x, y, path, c_freq)
return beam_list
def standard_deviation(data):
"""Compute the median and the estimate of the standard deviation.
This is based on the median absolute deviation (MAD).
"""
MAD_TO_SD = 1.4826
med = np.nanmedian(data)
dev = np.abs(data - med)
return med, MAD_TO_SD * np.nanmedian(dev)
def inverse_variance(data):
"""Calculate the inverse variance.
Reject pixels more than 5 sigma from the median and reject all zeros
until either no more pixels are rejected or a maximum of 50 iterations
is reached.
"""
data = data[(data != 0.0) & (np.isfinite(data))]
if len(data) == 0:
return 0.0
med, sd = standard_deviation(data)
for i in range(50):
old_sd = sd
cut = np.abs(data - med) < 5.0 * sd
if np.all(~cut):
return 1/(sd)**2
data = data[cut]
med, sd = standard_deviation(data)
if sd == 0.0:
return 1/(old_sd)**2
return 1/(sd)**2
def weighted_average(arr, weights):
"""Compute weighted average of all the frequency planes."""
wt_average = np.average(arr, weights=weights, axis=0)
return wt_average
def primary_beam_correction(beam_pattern, raw_image, px_cut=0.1):
"""Correct the effects of primary beam.
Parameters
----------
beam_pattern : numpy array
Array of beam pattern
raw_image : astropy.io.fits.hdu.image.PrimaryHDU
First element of the HDU list
px_cut : float
Threshold to cut off all the pixels with attenuated flux less than
the value.
"""
nterm = raw_image.header['NTERM']
weight = []
pbc_image = []
# Get all the pixels with attenuated flux of less than 10% of the peak
beam_mask = beam_pattern[-1] <= px_cut
for i in range(len(beam_pattern)):
# Blank all the pixels with attenuated flux of less than 10% of the peak
beam_pattern[i][beam_mask] = np.nan
# Get the inverse variance (weight) in each frequency plane
# (before primary beam correction)
weight.append(inverse_variance(np.ravel(raw_image.data[0, i+nterm, :, :])))
# correct the effect of the beam by dividing with the beam pattern.
ratio = np.ravel(raw_image.data[0, i + nterm, :, :]) / beam_pattern[i]
pbc_image.append(ratio)
# Convert primary beam corrected (pbc) and weight list into numpy array
pbc_image = np.array(pbc_image)
weight = np.array(weight)
# Calculate a weighted average from the frequency plane images
corr_image = weighted_average(pbc_image, weight)
# Add new axis
corr_image = corr_image.reshape(1, 1, raw_image.data.shape[2], raw_image.data.shape[3])
return corr_image
def _get_value_from_history(keyword, header):
"""
Return the value of a keyword from the FITS HISTORY in header.
Assumes keyword is found in a line of the HISTORY with format: 'keyword = value'.
Parameters
----------
keyword : str
keyword to search for such as BMAJ, CLEANBMJ and BMIN
header : astropy header
Image header
"""
for history in header['HISTORY']:
line = history.replace('=', ' ').split()
try:
ind = line.index(keyword)
except ValueError:
continue
return line[ind + 1]
raise KeyError(f'{keyword} not found in HISTORY')
def write_new_fits(pbc_image, path, outputFilename):
"""
Write out a new FITS image with primary beam corrected continuum in its first plane.
"""
images = read_fits(path)
hdr = images.header
newhdr = hdr.copy()
# change the frequency plane keywords, we don't want multiple frequency axes
newhdr['CTYPE3'] = 'FREQ'
newhdr['NAXIS3'] = 1
newhdr['CDELT3'] = 1.0
try:
if 'CLEANBMJ' in newhdr and newhdr['CLEANBMJ'] > 0:
# add in required beam keywords
newhdr['BMAJ'] = newhdr['CLEANBMJ']
newhdr['BMIN'] = newhdr['CLEANBMN']
newhdr['BPA'] = newhdr['CLEANBPA']
else:
# Check CLEANBMAJ in the history
newhdr['BMAJ'] = float(_get_value_from_history('BMAJ', newhdr))
newhdr['BMIN'] = float(_get_value_from_history('BMIN', newhdr))
newhdr['BPA'] = float(_get_value_from_history('BPA', newhdr))
except KeyError:
logging.error('Exception occurred, keywords not found', exc_info=True)
new_hdu = fits.PrimaryHDU(header=newhdr, data=pbc_image)
return new_hdu.writeto(outputFilename, overwrite=True)
class CircularBeam(katbeam.JimBeam):
def HH(self, x, y, freqMHz):
"""Calculate the H co-polarised beam at the provided coordinates.
Parameters
----------
x, y : arrays of float of the same shape
Coordinates where beam is sampled, in degrees
freqMHz : float
Frequency, in MHz
Returns
-------
HH : array of float, same shape as `x` and `y`
The H co-polarised beam
"""
squint, fwhm = self._interp_squint_fwhm(freqMHz)
return _circular_pattern(x, y, fwhm[0], fwhm[1])
def VV(self, x, y, freqMHz):
"""Calculate the V co-polarised beam at the provided coordinates.
Parameters
----------
x, y : arrays of float of the same shape
Coordinates where beam is sampled, in degrees
freqMHz : float
Frequency, in MHz
Returns
-------
VV : array of | |
<reponame>mbr4477/astroflux<filename>python/astrofluxlib.py
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.visualization.wcsaxes.frame import EllipticalFrame
from astropy.coordinates import SkyCoord, AltAz, EarthLocation, ICRS
from astropy import units as u
from astropy.time import Time
from astropy.wcs import WCS
from scipy.signal import convolve2d
class SkyMap(object):
""" Abstract sky map class. """
def get_temp_mk(self, ra_dec_samples):
"""
Parameters
----------
ra_dec_samples : Nx2 matrix of (ra, dec) coordinates to sample from
Returns
-------
pixel_temps : Nx1 matrix of pixel temperatures in mK
"""
pass
class FITSSkyMap(SkyMap):
"""
A FITS sky map utility.
"""
def __init__(self, fits_path):
"""
Parameters
----------
fits_path : str
path to FITS sky file
"""
f = fits.open(fits_path)[1]
self.image_data = f.data
self.w = WCS(f.header)
def get_temp_mk(self, ra_dec_samples):
"""
Parameters
----------
ra_dec_samples : ndarray
Nx2 matrix of (ra, dec) coordinates to sample from
Returns
-------
pixel_temps : ndarray
Nx1 matrix of pixel temperatures in mK
"""
targets = SkyCoord(ra_dec_samples[:,0]*u.degree, ra_dec_samples[:,1]*u.degree)
coordspix = np.array(targets.to_pixel(self.w)).T
idx = np.floor(coordspix).astype(int)
image = self.image_data[idx[:,1], idx[:,0]]
return image
class CrossSky(SkyMap):
""" A debug sky that always shows a cross. """
def get_temp_mk(self, ra_dec_samples):
N = int(np.sqrt(ra_dec_samples.shape[0]))
image = np.zeros((N,N))
image[:] = 0.0
image[int(N/4):int(3*N/4),int(N/2-2):int(N/2+2)] = 1
image[int(N/2-2):int(N/2+2), int(N/4):int(3*N/4)] = 1
return image.reshape(-1)
class StarSky(SkyMap):
""" A debug sky that always shows a star field. """
def __init__(self, N):
"""
Parameters
----------
N : int
image dimension
"""
image = np.zeros((N,N))
image[:] = 0.0
for _ in range(5):
pos = np.random.rand(2)*N
image[int(pos[0]), int(pos[1])] = 1
self.image = image
def get_temp_mk(self, ra_dec_samples):
return self.image.reshape(-1)
class Observation(object):
def __init__(self, ra, dec, lat, lon, timestamp, duration=0):
"""
Parameters
----------
ra : float
RA coordinate of target in degrees
dec : float
DEC coordinate of target in degrees
lat : float
latitude in degrees
lon : float
longitude in degrees
timestamp : str
ISO 8601 date-time string
duration: float
observation length in hours
"""
self.ra = ra
self.dec = dec
self.lat = lat
self.lon = lon
self.timestamp = timestamp
self.duration = duration
def alt_az(self):
"""
Returns
-------
alt_az : ndarray
1x2 matrix of (alt, az) coordinates
"""
return convert_ra_dec_to_alt_az(
np.array([[self.ra, self.dec]]),
self.lat,
self.lon,
self.timestamp
)
def parabolic_beamwidth(dishsize, wavelength, degrees=False):
"""
Calculate the beamwidth of a parabolic dish.
Parameters
----------
dishsize : float
diameter of dish in meters
wavelength : float
wavelength in meters
degrees : bool
if `True` return beamwidth in degrees, else radians
Returns
-------
beamwidth : float
beamwidth in radians or degrees
"""
beamwidth = wavelength / dishsize
return beamwidth if not degrees else beamwidth / np.pi * 180
def generate_alt_az_samples(beamwidth_deg, alt, az, samples_per_dim):
"""
Parameters
----------
beamwidth_deg : float
antenna beamwidth in degrees
alt : float
altitude in degrees
az : float
azimuth in degrees
samples_per_dim : int
samples per dimension
Returns
-------
alt_az_samples : ndarray
samples_per_dim^2 x 2 matrix of (alt, az)
coordinates in degrees
"""
alts = np.arange(-0.5, 0.5, 1/samples_per_dim) * beamwidth_deg + alt
azs = alts.copy() - alt + az
AL,AZ = np.meshgrid(alts,azs)
alt_az_samples = np.stack((AL.reshape(-1), AZ.reshape(-1)), axis=1)
return alt_az_samples
def convert_alt_az_to_ra_dec(alt_az_samples, lat, lon, timestamp):
"""
Parameters
----------
alt_az_samples : ndarray
Nx2 matrix of (alt, az) coordinates in degrees
Returns
-------
ra_dec_samples : ndarray
Nx2 matrix of (ra, dec) coordinates
"""
location = EarthLocation(
lat=lat*u.degree,
lon=lon*u.degree,
height=0
)
coords = AltAz(
alt=alt_az_samples[:,0]*u.degree,
az=alt_az_samples[:,1]*u.degree,
obstime=Time(timestamp),
location=location
)
coords = coords.transform_to(ICRS())
return np.array([coords.ra.degree, coords.dec.degree]).T
def convert_ra_dec_to_alt_az(ra_dec_samples, lat, lon, timestamp):
"""
Parameters
----------
ra_dec_samples : ndarray
Nx2 matrix of (ra, dec) coordinates in degrees
lat : float
latitude of observation in degrees
lon : float
longitude of observation in degrees
timestamp : str
ISO 8601 date-time string of observation
Returns
-------
alt_az_samples : ndarray
Nx2 matrix of (alt, az) coordinates in degrees
"""
c = SkyCoord(ra=ra_dec_samples[:,0]*u.degree, dec=ra_dec_samples[:,1]*u.degree, frame='icrs')
location = EarthLocation(lat=lat*u.degree, lon=lon*u.degree, height=0)
aa = c.transform_to(AltAz(obstime=Time(timestamp), location=location))
return np.array([aa.alt.degree, aa.az.degree]).T
def create_antenna_beam_lm_samples(beamwidth_deg, samples_per_dim):
"""
Parameters
----------
beamwidth_deg : float
beamwidth of the antenna in degrees
Returns
-------
lm_grid_samples : ndarray
samples_per_dim^2 x 2 matrix of (l,m) direction cosine
pairs for the given beamwidth
"""
l = np.arange(-0.5, 0.5, 1/samples_per_dim) * beamwidth_deg / 90
m = l.copy()
L,M = np.meshgrid(l,m)
lm = np.stack((L.reshape(-1), M.reshape(-1)), axis=1)
return lm
def generate_antenna_signals(antenna_xy, antenna_beam_lm_samples, pixelvalues, wavelength, snr=None, samples=1):
"""
Parameters
----------
antenna_xy : ndarray
J x 2 matrix of positions
antenna_beam_lm_samples : ndarray
N^2 x 2 matrix of sampled direction cosine
lm grid points for individual dish beam
pixelvalues : ndarray
N^2 x 1 matrix of pixel values corresponding to the sampled lm plane
wavelength : float
wavelength in meters
snr : float | None
signal-to-noise-ratio of the sky data or None if no noise
samples : int
number of samples to average for this short term interval
Returns
-------
antenna_signals : ndarray
Jx1 vector of antenna output (assume heterodyned)
"""
# normalize the pixel values
px = (pixelvalues - np.amin(pixelvalues)) / (np.amax(pixelvalues) - np.amin(pixelvalues))
if snr:
noisepower = 10**(-snr/20)
noisemag = np.sqrt(noisepower)
dynamic_range = 4*noisemag
offset = dynamic_range*0.5
withnoise = np.zeros(px.shape)
for i in range(samples):
noise = np.random.randn(px.shape[0])*noisemag
withnoise += np.abs(np.round((px + noise) / dynamic_range))
px = withnoise / samples
phase_delays = 2*np.pi*antenna_beam_lm_samples.dot(antenna_xy.T/wavelength)
phase_delays = np.exp(-1j * phase_delays)
rx = phase_delays.T.dot(px)
rx = rx.reshape(rx.shape[0],1)
return rx
def xcorr_signals(signals):
"""
Parameters
----------
signals : ndarray
JxM vector of stacked input signals
Returns
-------
xcorr_matrix : ndarray
JxJ matrix of cross correlation results
"""
xcorr = signals.dot(signals.conj().T)
return xcorr
def propagate_antennas(antenna_xy, elapsed):
"""
Parameters
----------
antenna_xy : ndarray
Jx2 matrix of antenna (x,y) positions
duration : float
hours to propagate
Returns
-------
antenna_xy : ndarray
Jx2 matrix of propagated positions
"""
angle = elapsed / 24 * 2*np.pi
s,c = np.sin(angle), np.cos(angle)
rotmat = np.array(((c, -s),(s, c)))
return antenna_xy.dot(rotmat)
def to_uv(antenna_xy, wavelength):
"""
Parameters
----------
antenna_xy : ndarray
Jx2 matrix of antenna (x,y) positions
wavelength : float
observation wavelength in meters
Returns
-------
uv_baselines : ndarray
J^2 x 2 matrix of baseline vectors
"""
X = np.repeat(
antenna_xy[:,0].reshape(antenna_xy.shape[0],1),
antenna_xy.shape[0],
axis=1
)
Y = np.repeat(
antenna_xy[:,1].reshape(antenna_xy.shape[0],1),
antenna_xy.shape[0],
axis=1
)
u = (X-X.T).reshape(-1)/wavelength
v = (Y-Y.T).reshape(-1)/wavelength
uv = np.stack((u,v), axis=1)
return uv
def simulate(observation, axy, beamwidth, wavelength, skymap, samples_per_dim=64, snr=None, samples=1):
"""
Parameters
----------
observation : Observation
`Observation` object
axy : ndarray
Jx2 array of antenna (x,y) positions
beamwidth : float
antenna beamwidth in degrees
wavelength : float
wavelength in meters
skymap : SkyMap
`SkyMap` object
samples_per_dim : int
samples per dim of alt/az grid
snr : float | None
signal to noise ratio in dB
samples : int
samples to use in each short-term interval
Returns
-------
signals : ndarray
a JxM matrix of noiseless stacked antenna signals from the skymap
pixeldata : ndarray
samples_per_dim^2 length vector of pixel values
"""
target_alt_az = observation.alt_az().squeeze()
alt_az_samples = generate_alt_az_samples(
beamwidth,
target_alt_az[0],
target_alt_az[1],
samples_per_dim
)
ra_dec_samples = convert_alt_az_to_ra_dec(
alt_az_samples,
observation.lat,
observation.lon,
observation.timestamp
)
pixeldata = skymap.get_temp_mk(ra_dec_samples)
beamsamples = create_antenna_beam_lm_samples(
beamwidth,
samples_per_dim=samples_per_dim
)
rx = generate_antenna_signals(axy, beamsamples, pixeldata, wavelength, snr, samples)
return rx, pixeldata
def compute_dirty_image_pixels(xcorr, uv, lm):
"""
Parameters
----------
xcorr : ndarray
J^2 vector of antenna signal cross correlations
uv : ndarray
J^2 x 2 matrix of baseline vectors
lm : ndarray
N^2 x 2 matrix of (l,m) points
Returns
-------
pixelvalues : ndarray
N^2 vector of s plane pixel values
"""
zdots = uv.dot(lm.T)
result = xcorr.reshape(1,xcorr.shape[0]).dot(np.exp(1j*2*np.pi*zdots))
return result
def compute_dirty_image(uv, xcorr, imwidth, samples_per_dim):
"""
Parameters
----------
uv : ndarray
J^2 x 2 matrix of (u,v) baselines
xcorr : ndarray
J^2 x 1 matrix of cross correlations
imwidth : float
image beamwidth in degrees
samples_per_dim : int
samples per dimension of the image
Returns
-------
image : ndarray
(samples_per_dim x samples_per_dim) dirty image
"""
lm = create_antenna_beam_lm_samples(imwidth, samples_per_dim)
image = compute_dirty_image_pixels(xcorr, uv, lm)
return image.reshape(samples_per_dim, samples_per_dim)
def dirty_beam(uvs, beamwidth, samples_per_dim):
"""
Parameters
----------
uvs : mdarray
tuple of (J^2 x 2) sets of (u,v) baselines from J antennas
beamwidth : float
beamwidth in degrees
samples_per_dim : int
samples per dimension for beam image
Returns
-------
dirty_beam : ndarray
samples_per_dim x samples_per_dim dirty | |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Skylark BUILD extensions for Integration Test
"""
# Provider for Executable Proto messages, for passing along to the sut_component
# rule and the integration_test rule.
# pylint: disable=invalid-name
ItExecutableInfo = provider(
fields = [
"program",
"program_file",
"data",
"output_properties",
"output_files",
"executable_proto",
"timeout_seconds",
],
)
DEFAULT_EXECUTABLE_TIMEOUT_SECONDS = 30
def _integration_test_executable_impl(ctx):
# Create an Executable proto message from ctx.attr.program, ctx.attr.args and
# ctx.attr.input_files. This proto message is from system_under_test.proto.
executable_proto = "file: \"%s\"%s%s%s%s%s" % (
ctx.executable.program.short_path,
" timeout_seconds: %d" % ctx.attr.timeout_seconds,
"".join([" args: \"%s\"" % ctx.expand_location(arg) for arg in ctx.attr.args]),
"".join([" input_files {filename: \"%s\"}" % fname for fname in ctx.attr.input_files]),
"".join([" output_properties {key: \"%s\"}" % fname for fname in ctx.attr.output_properties]),
"".join([" output_files {filename: \"%s\"}" % fname for fname in ctx.attr.output_files]))
return [ItExecutableInfo(program = ctx.attr.program,
program_file = ctx.executable.program,
data = depset(ctx.files.data),
output_properties = ctx.attr.output_properties,
output_files = ctx.attr.output_files,
executable_proto = executable_proto,
timeout_seconds = ctx.attr.timeout_seconds)]
# Private rule to be used only from the sut_component macro.
_integration_test_executable = rule(
attrs = {
"program": attr.label(
mandatory = True,
allow_files = True,
executable = True,
cfg = "target",
),
"timeout_seconds": attr.int(default=DEFAULT_EXECUTABLE_TIMEOUT_SECONDS),
"args": attr.string_list(),
"input_files": attr.string_list(),
"data": attr.label_list(allow_files = True),
"deps": attr.label_list(allow_files = True),
"output_properties": attr.string_list(),
"output_files": attr.string_list(),
},
implementation = _integration_test_executable_impl,
)
# Provider for Sut Component Proto files, for passing along to the
# integration_test rule.
# pylint: disable=invalid-name
SutComponentInfo = provider(fields =
[
"sut_protos",
"setups",
"teardowns",
"data",
],
)
# Provider to help testing integration_test.
# pylint: disable=invalid-name
IntegrationTestInfoForTestInfo = provider(fields =
[
"environment",
]
)
PREPARE_PHASE_SUFFIX = "_prepare"
INTEGRATION_TEST_CONFIG_ENV_VAR = "IntegrationTestConfig"
INTEGRATION_TEST_TYPE_ENV_VAR = "IntegrationTestType"
def _get_transitive_sutcs(sut_deps):
trans_sut_proto = depset()
trans_setup = depset()
trans_teardown = depset()
trans_data = depset()
for sutc in sut_deps:
trans_sut_proto += sutc[SutComponentInfo].sut_protos
trans_setup += sutc[SutComponentInfo].setups
trans_teardown += sutc[SutComponentInfo].teardowns
trans_data += sutc[SutComponentInfo].data
return struct(sut_protos = trans_sut_proto,
setups = trans_setup,
teardowns = trans_teardown,
data = trans_data)
def _sut_component_rule_impl(ctx):
out_proto_list = ["name: \"%s\"" % (ctx.label)]
trans_data = depset([])
trans_setup = depset([])
for setup in ctx.attr.setups:
out_proto_list.append("setups {%s}" % setup[ItExecutableInfo].executable_proto)
trans_setup += depset([setup[ItExecutableInfo].program_file])
# Add the data files specified in the setup ItExecutableInfo and in its
# target.
trans_data += setup[ItExecutableInfo].data
trans_data += setup[ItExecutableInfo].program[DefaultInfo].data_runfiles.files
trans_teardown = depset([])
for teardown in ctx.attr.teardowns:
out_proto_list.append("teardowns {%s}" % teardown[ItExecutableInfo].executable_proto)
trans_teardown += depset([teardown[ItExecutableInfo].program_file])
# Add the data files specified in the teardown ItExecutableInfo and in its
# target.
trans_data += teardown[ItExecutableInfo].data
trans_data += teardown[ItExecutableInfo].program[DefaultInfo].data_runfiles.files
if ctx.attr.docker_image:
out_proto_list.append(
"docker_image: \"%s\"" % ctx.attr.docker_image)
for sutc_key in ctx.attr.sut_deps:
out_proto_list.append("sut_component_alias {target: \"%s\" local_alias: \"%s\"}"
% (sutc_key.label, ctx.attr.sut_deps[sutc_key]))
out_proto_list.append("num_requested_ports: %d" % ctx.attr.num_requested_ports)
trans_sut_proto = depset([" ".join(out_proto_list)])
trans_sutcs = _get_transitive_sutcs(ctx.attr.sut_deps)
trans_sut_proto += trans_sutcs.sut_protos
trans_data += trans_sutcs.data
trans_setup += trans_sutcs.setups
trans_teardown += trans_sutcs.teardowns
return [SutComponentInfo(sut_protos = trans_sut_proto,
setups = trans_setup,
teardowns = trans_teardown,
data = trans_data)]
# An internal rule to be used only from the sut_component macro.
_sut_component = rule(
attrs = {
"setups": attr.label_list(providers = [ItExecutableInfo]),
"teardowns": attr.label_list(providers = [ItExecutableInfo]),
"docker_image": attr.string(),
"sut_deps": attr.label_keyed_string_dict(
providers = [SutComponentInfo],
cfg = "target"),
"num_requested_ports": attr.int(default=1),
},
implementation = _sut_component_rule_impl,
)
def _create_integration_test_executable(
orig_target_name,
target_suffix,
executable):
"""Create _integration_test_executable rule and return its names.
Args:
orig_target_name: The name given to the sut_component or the
integration_test.
target_suffix: A suffix to append to the orig_target_name to make a unique
target name for the _integration_test_executable.
executable: Can be either a string, which is interpretted as a program,
or a dictionary that has a mandatory "program" field (whose value is a
string), and some optional fields. If a string is provided or if the
optional field 'timeout_seconds' is not provided, it will default to
DEFAULT_EXECUTABLE_TIMEOUT_SECONDS.
Returns:
The target name of the _integration_test_executable rule.
"""
# Create a target name for the _integration_test_executable rule.
target_name = "_%s_%s" % (orig_target_name, target_suffix)
# isinstance is not supported in skylark
# pylint: disable=unidiomatic-typecheck
if type(executable) == "string":
_integration_test_executable(
name = target_name,
program = executable,
)
return target_name
# Validate that executable is a valid dictionary.
# isinstance is not supported in skylark
# pylint: disable=unidiomatic-typecheck
if type(executable) != "dict":
fail("Error in target %s: %s is neither a string nor a dictionary." % (orig_target_name, target_suffix))
for key in executable:
if key not in ["program", "args", "input_files", "data", "deps", "output_properties", "output_files", "timeout_seconds"]:
fail("Error in target %s: %s has an invalid key %s." % (orig_target_name, target_suffix, key))
_integration_test_executable(
name = target_name,
timeout_seconds = executable.get("timeout_seconds"),
program = executable.get("program"),
args = executable.get("args"),
input_files = executable.get("input_files"),
data = executable.get("data"),
deps = executable.get("deps"),
output_properties = executable.get("output_properties"),
output_files = executable.get("output_files"),
)
return target_name
def _create_integration_test_executables(
orig_target_name,
executable_type,
executables):
"""Create _integration_test_executable rules and return their names.
Args:
orig_target_name: The name given to the sut_component.
executable_type: "setup", "teardown" or "pretest".
executables: An array of executables.
Returns:
A list of the target names of the _integration_test_executable rules.
"""
if executables == None:
return []
# isinstance is not supported in skylark
# pylint: disable=unidiomatic-typecheck
if type(executables) != "list":
fail("Error in target %s: %ss is not a list." % (orig_target_name, executable_type))
targets = []
i = 0
for e in executables:
targets.append(_create_integration_test_executable(
orig_target_name, "%s_%d" % (executable_type, i), e))
i += 1
return targets
# sut_component macro
def sut_component(
name,
setups = None,
teardowns = None,
docker_image = "",
sut_deps = {}, num_requested_ports = 1):
"""Macro definition that expresses an sut_component.
Behind the scenes, it creates _integration_test_executable rules for setup and
teardown, and a _sut_component rule.
Args:
name: The name of the _sut_component target.
setups: An array of setup executables (see executables in
_create_integration_test_executables)
teardowns: An array of teardown executables (see executables in
_create_integration_test_executables)
docker_image: The setup and teardown will be run inside this docker image.
sut_deps: Dictionary mapping names of dependent SUTs to their aliases.
num_requested_ports: The number of ports requested for inter-SUT
communication.
"""
# Create integration_test_executable rules for setup and teardown.
setup_targets = _create_integration_test_executables(
name, "setup", setups)
teardown_targets = _create_integration_test_executables(
name, "teardown", teardowns)
_sut_component(
name = name,
setups = setup_targets,
teardowns = teardown_targets,
docker_image = docker_image,
sut_deps = sut_deps,
num_requested_ports = num_requested_ports
)
def external_sut_component(
name,
prepares = None,
setups = None,
teardowns = None,
docker_image = "",
sut_deps = {}, num_requested_ports = 1):
"""Macro definition that expresses an external_sut_component.
It expresses external_sut_component as two sut_component bzl targets.
For more details, see go/ascit-plan-phase.
Args:
name: The name of the target. PREPARE_PHASE_SUFFIX will be appended to the
target corresponding to the prepare phase.
prepares: The scripts to run during the prepare phase.
setups: The scripts to run during the setup phase.
teardowns: The script to run during the teardown phase.
docker_image: If provided, the SUT will be run inside the docker image.
sut_deps: Dictionary mapping names of dependent SUTs to their
aliases.
num_requested_ports: The number of ports requested for inter-SUT
communication.
"""
# The user cannot map an sut to "prep". We are using that name internally as
# an alias to the prep sut.
if "prep" in sut_deps.values():
fail("'prep' is an invalid sut alias, please choose a different name.")
sut_deps_with_prep = {":" + name + PREPARE_PHASE_SUFFIX: "prep"}
sut_deps_with_prep.update(sut_deps)
sut_component(
name = name + PREPARE_PHASE_SUFFIX,
setups = prepares,
teardowns = teardowns,
docker_image = docker_image,
sut_deps = sut_deps,
num_requested_ports = 0,
)
sut_component(
name = name,
setups = setups,
docker_image = docker_image,
sut_deps = sut_deps_with_prep,
num_requested_ports = num_requested_ports,
)
# Rule and implementation for integration tests as a test rule.
def _integration_test_impl(ctx):
test_script = ("#!/bin/bash\n" +
"/tmp/botexec/.asci-reserved/test_wrapper_script.sh \"$@\"")
ctx.file_action(
output = ctx.outputs.executable,
content = test_script,
executable = True)
config_proto_list = ["name: \"%s\"" % (ctx.attr.name)]
# transitive_data_files is the set of all data dependencies required to run
# the test, as well as to run all the setups and all the teardowns of all the
# SUTs that this integration_test transitively depends on.
transitive_data_files = depset()
# Collect all the files to pass | |
"""
Parser classes for Cheetah's Compiler
Classes:
ParseError( Exception )
_LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer
_HighLevelParser( _LowLevelParser )
Parser === _HighLevelParser (an alias)
"""
import os
import sys
import re
from re import DOTALL, MULTILINE
from types import StringType, ListType, TupleType, ClassType, TypeType
import time
from tokenize import pseudoprog
import inspect
import new
import traceback
from Cheetah.SourceReader import SourceReader
from Cheetah import Filters
from Cheetah import ErrorCatchers
from Cheetah.Unspecified import Unspecified
from Cheetah.Macros.I18n import I18n
# re tools
_regexCache = {}
def cachedRegex(pattern):
if pattern not in _regexCache:
_regexCache[pattern] = re.compile(pattern)
return _regexCache[pattern]
def escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
"""Return a txt with all special regular expressions chars escaped."""
return escapeRE.sub(r'\\\1', txt)
def group(*choices): return '(' + '|'.join(choices) + ')'
def nongroup(*choices): return '(?:' + '|'.join(choices) + ')'
def namedGroup(name, *choices): return '(P:<' + name +'>' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
##################################################
## CONSTANTS & GLOBALS ##
NO_CACHE = 0
STATIC_CACHE = 1
REFRESH_CACHE = 2
SET_LOCAL = 0
SET_GLOBAL = 1
SET_MODULE = 2
##################################################
## Tokens for the parser ##
#generic
identchars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
namechars = identchars + "0123456789"
#operators
powerOp = '**'
unaryArithOps = ('+', '-', '~')
binaryArithOps = ('+', '-', '/', '//', '%')
shiftOps = ('>>', '<<')
bitwiseOps = ('&', '|', '^')
assignOp = '='
augAssignOps = ('+=', '-=', '/=', '*=', '**=', '^=', '%=',
'>>=', '<<=', '&=', '|=', )
assignmentOps = (assignOp,) + augAssignOps
compOps = ('<', '>', '==', '!=', '<=', '>=', '<>', 'is', 'in',)
booleanOps = ('and', 'or', 'not')
operators = (powerOp,) + unaryArithOps + binaryArithOps \
+ shiftOps + bitwiseOps + assignmentOps \
+ compOps + booleanOps
delimeters = ('(', ')', '{', '}', '[', ']',
',', '.', ':', ';', '=', '`') + augAssignOps
keywords = ('and', 'del', 'for', 'is', 'raise',
'assert', 'elif', 'from', 'lambda', 'return',
'break', 'else', 'global', 'not', 'try',
'class', 'except', 'if', 'or', 'while',
'continue', 'exec', 'import', 'pass',
'def', 'finally', 'in', 'print',
)
single3 = "'''"
double3 = '"""'
tripleQuotedStringStarts = ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""')
tripleQuotedStringPairs = {"'''": single3, '"""': double3,
"r'''": single3, 'r"""': double3,
"u'''": single3, 'u"""': double3,
"ur'''": single3, 'ur"""': double3,
"R'''": single3, 'R"""': double3,
"U'''": single3, 'U"""': double3,
"uR'''": single3, 'uR"""': double3,
"Ur'''": single3, 'Ur"""': double3,
"UR'''": single3, 'UR"""': double3,
}
closurePairs= {')':'(',']':'[','}':'{'}
closurePairsRev= {'(':')','[':']','{':'}'}
##################################################
## Regex chunks for the parser ##
tripleQuotedStringREs = {}
def makeTripleQuoteRe(start, end):
start = escapeRegexChars(start)
end = escapeRegexChars(end)
return re.compile(r'(?:' + start + r').*?' + r'(?:' + end + r')', re.DOTALL)
for start, end in tripleQuotedStringPairs.items():
tripleQuotedStringREs[start] = makeTripleQuoteRe(start, end)
WS = r'[ \f\t]*'
EOL = r'\r\n|\n|\r'
EOLZ = EOL + r'|\Z'
escCharLookBehind = nongroup(r'(?<=\A)', r'(?<!\\)')
nameCharLookAhead = r'(?=[A-Za-z_])'
identRE=re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*')
EOLre=re.compile(r'(?:\r\n|\r|\n)')
specialVarRE=re.compile(r'([a-zA-z_]+)@') # for matching specialVar comments
# e.g. ##author@ <NAME>
unicodeDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}unicode[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
encodingDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}encoding[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
escapedNewlineRE = re.compile(r'(?<!\\)((\\\\)*)\\(n|012)')
directiveNamesAndParsers = {
# importing and inheritance
'import': None,
'from': None,
'extends': 'eatExtends',
'implements': 'eatImplements',
'super': 'eatSuper',
# output, filtering, and caching
'slurp': 'eatSlurp',
'raw': 'eatRaw',
'include': 'eatInclude',
'cache': 'eatCache',
'filter': 'eatFilter',
'echo': None,
'silent': None,
'transform': 'eatTransform',
'call': 'eatCall',
'arg': 'eatCallArg',
'capture': 'eatCapture',
# declaration, assignment, and deletion
'attr': 'eatAttr',
'def': 'eatDef',
'block': 'eatBlock',
'@': 'eatDecorator',
'defmacro': 'eatDefMacro',
'closure': 'eatClosure',
'set': 'eatSet',
'del': None,
# flow control
'if': 'eatIf',
'while': None,
'for': None,
'else': None,
'elif': None,
'pass': None,
'break': None,
'continue': None,
'stop': None,
'return': None,
'yield': None,
# little wrappers
'repeat': None,
'unless': None,
# error handling
'assert': None,
'raise': None,
'try': None,
'except': None,
'finally': None,
'errorCatcher': 'eatErrorCatcher',
# intructions to the parser and compiler
'breakpoint': 'eatBreakPoint',
'compiler': 'eatCompiler',
'compiler-settings': 'eatCompilerSettings',
# misc
'shBang': 'eatShbang',
'encoding': 'eatEncoding',
'end': 'eatEndDirective',
}
endDirectiveNamesAndHandlers = {
'def': 'handleEndDef', # has short-form
'block': None, # has short-form
'closure': None, # has short-form
'cache': None, # has short-form
'call': None, # has short-form
'capture': None, # has short-form
'filter': None,
'errorCatcher': None,
'while': None, # has short-form
'for': None, # has short-form
'if': None, # has short-form
'try': None, # has short-form
'repeat': None, # has short-form
'unless': None, # has short-form
}
##################################################
## CLASSES ##
# @@TR: SyntaxError doesn't call exception.__str__ for some reason!
#class ParseError(SyntaxError):
class ParseError(ValueError):
def __init__(self, stream, msg='Invalid Syntax', extMsg='', lineno=None, col=None):
self.stream = stream
if stream.pos() >= len(stream):
stream.setPos(len(stream) -1)
self.msg = msg
self.extMsg = extMsg
self.lineno = lineno
self.col = col
def __str__(self):
return self.report()
def report(self):
stream = self.stream
if stream.filename():
f = " in file %s" % stream.filename()
else:
f = ''
report = ''
if self.lineno:
lineno = self.lineno
row, col, line = (lineno, (self.col or 0),
self.stream.splitlines()[lineno-1])
else:
row, col, line = self.stream.getRowColLine()
## get the surrounding lines
lines = stream.splitlines()
prevLines = [] # (rowNum, content)
for i in range(1, 4):
if row-1-i <=0:
break
prevLines.append( (row-i, lines[row-1-i]) )
nextLines = [] # (rowNum, content)
for i in range(1, 4):
if not row-1+i < len(lines):
break
nextLines.append( (row+i, lines[row-1+i]) )
nextLines.reverse()
## print the main message
report += "\n\n%s\n" %self.msg
report += "Line %i, column %i%s\n\n" % (row, col, f)
report += 'Line|Cheetah Code\n'
report += '----|-------------------------------------------------------------\n'
while prevLines:
lineInfo = prevLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
report += "%(row)-4d|%(line)s\n"% {'row':row, 'line':line}
report += ' '*5 +' '*(col-1) + "^\n"
while nextLines:
lineInfo = nextLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
## add the extra msg
if self.extMsg:
report += self.extMsg + '\n'
return report
class ForbiddenSyntax(ParseError):
pass
class ForbiddenExpression(ForbiddenSyntax):
pass
class ForbiddenDirective(ForbiddenSyntax):
pass
class CheetahVariable(object):
def __init__(self, nameChunks, useNameMapper=True, cacheToken=None,
rawSource=None):
self.nameChunks = nameChunks
self.useNameMapper = useNameMapper
self.cacheToken = cacheToken
self.rawSource = rawSource
class Placeholder(CheetahVariable):
pass
class ArgList(object):
"""Used by _LowLevelParser.getArgList()"""
def __init__(self):
self.arguments = []
self.defaults = []
self.count = 0
def add_argument(self, name):
self.arguments.append(name)
self.defaults.append(None)
def next(self):
self.count += 1
def add_default(self, token):
count = self.count
if self.defaults[count] is None:
self.defaults[count] = ''
self.defaults[count] += token
def merge(self):
defaults = (isinstance(d, basestring) and d.strip() or None for d in self.defaults)
return list(map(None, (a.strip() for a in self.arguments), defaults))
def __str__(self):
return str(self.merge())
class _LowLevelParser(SourceReader):
"""This class implements the methods to match or extract ('get*') the basic
elements of Cheetah's grammar. It does NOT handle any code generation or
state management.
"""
_settingsManager = None
def setSettingsManager(self, settingsManager):
self._settingsManager = settingsManager
def setting(self, key, default=Unspecified):
if default is Unspecified:
return self._settingsManager.setting(key)
else:
return self._settingsManager.setting(key, default=default)
def setSetting(self, key, val):
self._settingsManager.setSetting(key, val)
def settings(self):
return self._settingsManager.settings()
def updateSettings(self, settings):
self._settingsManager.updateSettings(settings)
def _initializeSettings(self):
self._settingsManager._initializeSettings()
def configureParser(self):
"""Is called by the Compiler instance after the parser has had a
settingsManager assigned with self.setSettingsManager()
"""
self._makeCheetahVarREs()
self._makeCommentREs()
self._makeDirectiveREs()
self._makePspREs()
self._possibleNonStrConstantChars = (
self.setting('commentStartToken')[0] +
self.setting('multiLineCommentStartToken')[0] +
self.setting('cheetahVarStartToken')[0] +
self.setting('directiveStartToken')[0] +
self.setting('PSPStartToken')[0])
self._nonStrConstMatchers = [
self.matchCommentStartToken,
self.matchMultiLineCommentStartToken,
self.matchVariablePlaceholderStart,
self.matchExpressionPlaceholderStart,
self.matchDirective,
self.matchPSPStartToken,
self.matchEOLSlurpToken,
]
## regex setup ##
def _makeCheetahVarREs(self):
"""Setup the regexs for Cheetah $var parsing."""
num = r'[0-9\.]+'
interval = (r'(?P<interval>' +
num + r's|' +
num + r'm|' +
num + r'h|' +
num + r'd|' +
num + r'w|' +
num + ')'
)
cacheToken = (r'(?:' +
r'(?P<REFRESH_CACHE>\*' + interval + '\*)'+
'|' +
r'(?P<STATIC_CACHE>\*)' +
'|' +
r'(?P<NO_CACHE>)' +
')')
self.cacheTokenRE = cachedRegex(cacheToken)
silentPlaceholderToken = (r'(?:' +
r'(?P<SILENT>' +escapeRegexChars('!')+')'+
'|' +
r'(?P<NOT_SILENT>)' +
')')
self.silentPlaceholderTokenRE = cachedRegex(silentPlaceholderToken)
self.cheetahVarStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>'+escapeRegexChars(self.setting('cheetahVarStartToken'))+')'+
r'(?P<silenceToken>'+silentPlaceholderToken+')'+
r'(?P<cacheToken>'+cacheToken+')'+
r'(?P<enclosure>|(?:(?:\{|\(|\[)[ \t\f]*))' + # allow WS after enclosure
r'(?=[A-Za-z_])')
validCharsLookAhead = r'(?=[A-Za-z_\*!\{\(\[])'
self.cheetahVarStartToken = self.setting('cheetahVarStartToken')
self.cheetahVarStartTokenRE = cachedRegex(
escCharLookBehind +
escapeRegexChars(self.setting('cheetahVarStartToken'))
+validCharsLookAhead
)
self.cheetahVarInExpressionStartTokenRE = cachedRegex(
escapeRegexChars(self.setting('cheetahVarStartToken'))
+r'(?=[A-Za-z_])'
)
self.expressionPlaceholderStartRE = cachedRegex(
escCharLookBehind +
r'(?P<startToken>' + escapeRegexChars(self.setting('cheetahVarStartToken')) + ')' +
r'(?P<cacheToken>' + cacheToken + ')' +
#r'\[[ \t\f]*'
r'(?:\{|\(|\[)[ \t\f]*'
+ r'(?=[^\)\}\]])'
)
if self.setting('EOLSlurpToken'):
self.EOLSlurpRE = cachedRegex(
escapeRegexChars(self.setting('EOLSlurpToken'))
+ r'[ \t\f]*'
+ r'(?:'+EOL+')'
)
else:
self.EOLSlurpRE = None
def _makeCommentREs(self):
"""Construct the regex bits that are used in comment parsing."""
startTokenEsc = escapeRegexChars(self.setting('commentStartToken'))
self.commentStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc)
del startTokenEsc
startTokenEsc = escapeRegexChars(
self.setting('multiLineCommentStartToken'))
endTokenEsc = escapeRegexChars(
self.setting('multiLineCommentEndToken'))
self.multiLineCommentTokenStartRE = cachedRegex(escCharLookBehind +
startTokenEsc)
self.multiLineCommentEndTokenRE = cachedRegex(escCharLookBehind +
endTokenEsc)
def _makeDirectiveREs(self):
"""Construct the regexs that are used in directive parsing."""
startToken = self.setting('directiveStartToken')
endToken = self.setting('directiveEndToken')
startTokenEsc = escapeRegexChars(startToken)
endTokenEsc = escapeRegexChars(endToken)
validSecondCharsLookAhead = r'(?=[A-Za-z_@])'
reParts = | |
<gh_stars>0
from rainbow_ddpg.ddpg import DDPG
import baselines.common.tf_util as tfutil
import itertools
from baselines import logger
import numpy as np
import tensorflow as tf
import cv2
import gym
from baselines.common.schedules import LinearSchedule
import sys
import os
from threading import Thread
from main import build_env
tmp = os.path.dirname(sys.modules['__main__'].__file__) + "/tmp"
demo_states_dir = tmp + "/demo_states"
if not os.path.exists(demo_states_dir):
os.makedirs(demo_states_dir)
demo_states_template = demo_states_dir + "/{}/{}.bullet"
class Renderer(object):
def __init__(self, renderer_type, run_name, epoch, seed=None):
self.directory = tmp + '/ddpg_video_buffer/'
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.run_name = run_name
if seed is not None:
run_name = run_name + str(seed)
self.fname = '{}-{}-{}.avi'.format(renderer_type, run_name, epoch + 1)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
self.rgb = cv2.VideoWriter(self.directory + self.fname, fourcc, 30.0,
(84, 84))
def record_frame(self, frame, reward, action, q):
frame = np.array(frame[:, :, 0:3].copy() * 255, dtype=np.uint8)
cv2.putText(frame, format(reward, '.2f'), (40, 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1)
cv2.putText(frame, format(action[0], '.2f'), (5, 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1)
cv2.putText(frame, format(action[1], '.2f'), (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1)
cv2.putText(frame, format(action[2], '.2f'), (5, 35),
cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1)
cv2.putText(frame, format(action[3], '.2f'), (40, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1)
cv2.putText(frame, format(q[0][0], '.2f'), (40, 35),
cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1)
self.rgb.write(frame)
def finalize(self):
self.rgb.release()
class RolloutWorker(object):
def __init__(self, env_id, agent, num_steps, run_name,
reset_to_demo_rate_sched, seed, demo_terminality):
self.num_steps = num_steps # Number of steps this workers should execute.
self.reset_to_demo_rate_sched = reset_to_demo_rate_sched # Schedule to anneal reset to demo.
self.epoch_rewards = []
self.epoch_qs = []
self.run_name = run_name
self.agent = agent
self.env_id = env_id
self.seed = seed # If we have multiple workers, we want to seed them differently.
self.demo_terminality = demo_terminality
if seed == 0:
self.renderer = Renderer("rollout", run_name, 0, seed)
else:
self.renderer = None
self.rendering = seed == 0 # Render only first one
self.steps_to_render = 300
def advance_epoch(self):
self.epoch_rewards = []
self.epoch_qs = []
def exec_rollouts(self):
with self.agent.sess.as_default():
#Adi: Want to create our own custom cloth env, so we can't use gym.make()
#env = gym.make(self.env_id)
cloth_cfg_path = '/Users/adivganapathi/Documents/UC Berkeley/Rainbow_ddpg-fork/cfg/demo_spaces.yaml'
render_path = ''
init_state_path = '/Users/adivganapathi/Documents/UC Berkeley/Rainbow_ddpg-fork/init_states/state_init_easy_81_coverage.pkl'
env = build_env(cloth_cfg_path=cloth_cfg_path, render_path=render_path, start_state_path=init_state_path, num_env=1, seed=1, alg='ddpg')
env.seed(self.seed)
obs0, aux0, state0 = env.reset(), env.get_aux(), env.get_state()
episode_reward = 0
episodes = 1
for i in range(self.num_steps):
if not self.renderer and np.random.uniform(0, 1) < 0.0005:
self.steps_to_render = 300
self.renderer = Renderer("rollout", self.run_name, i,
self.seed)
if self.renderer and self.steps_to_render == 0:
self.renderer.finalize()
self.renderer = None
action, q, _, _, _ = self.agent.pi(
obs0, aux0, state0, apply_noise=True, compute_Q=True)
self.epoch_qs.append(q)
assert action.shape == env.action_space.shape
obs1, r, done, info = env.step(action)
episode_reward += r
state1, aux1 = env.get_state(), env.get_aux()
self.agent.store_transition(state0, obs0, action, r, state1,
obs1, done, aux0, aux1,
None)
if self.renderer and self.steps_to_render > 0:
frame = env.render(mode="rgb_array")
self.renderer.record_frame(frame, r, action, q)
self.steps_to_render -= 1
obs0, aux0, state0 = obs1, aux1, state1
if done:
self.agent.save_reward(episode_reward)
episodes += 1
self.epoch_rewards.append(episode_reward)
episode_reward = 0.
if np.random.uniform(
0, 1) < self.reset_to_demo_rate_sched.value(i):
while True:
memory = self.agent.memory
with memory.lock:
demo_index = np.random.randint(
0, memory.num_demonstrations)
state = memory.storage[demo_index][0]
terminal_demo = False
for di in range(
demo_index,
demo_index + self.demo_terminality):
terminal_demo = terminal_demo or memory.storage[
di % memory.num_demonstrations][6]
if not terminal_demo:
break
fn = demo_states_template.format(
self.run_name, memory.storage[demo_index][9])
obs0 = env.reset_to_state(state, fn=fn)
else:
obs0 = env.reset()
aux0, state0 = env.get_aux(), env.get_state()
class DistributedTrain(object):
def __init__(self, run_name, agent, env, nb_rollout_steps,
num_pretrain_steps, nb_epochs, nb_epoch_cycles,
nb_train_steps, demo_env, demo_policy, render_demo,
num_demo_steps, reset_to_demo_rate, render_eval, eval_env,
nb_eval_steps, env_id, policy_and_target_update_period,
demo_terminality, load_file, save_folder, only_eval):
# Misc params
self.run_name = run_name
self.agent = agent
self.load_file = load_file
self.save_folder = save_folder
self.only_eval = only_eval
self.saver = tf.train.Saver()
# Main rollout params
self.nb_rollout_steps = nb_rollout_steps
# Train params
self.num_pretrain_steps = num_pretrain_steps
self.nb_epochs = nb_epochs
self.nb_epoch_cycles = nb_epoch_cycles
self.nb_train_steps = nb_train_steps
self.policy_and_target_update_period = policy_and_target_update_period
# Demo params
self.demo_env = demo_env
self.demo_policy = demo_policy
self.render_demo = render_demo
self.num_demo_steps = num_demo_steps
self.reset_to_demo_rate = reset_to_demo_rate
self.demo_terminality = demo_terminality
# Render params
self.render_eval = render_eval
self.eval_env = eval_env
self.nb_eval_steps = nb_eval_steps
self.env_id = env_id
def start(self):
with tfutil.single_threaded_session() as sess:
self.sess = sess
self.agent.set_sess(sess)
if self.load_file:
self.saver.restore(sess, self.load_file)
else:
self.agent.initialize_vars()
self.agent.sync_optimizers()
self._write_summary()
sess.graph.finalize()
successes = 0
if self.only_eval:
for i in range(20):
done = False
obs = self.eval_env.reset()
self.agent.reset()
total_r = 0
while not done:
aux0 = self.eval_env.get_aux()
state0 = self.eval_env.get_state()
action, q, object_conf, gripper, target = self.agent.pi(
obs,
aux0,
state0,
apply_noise=False,
compute_Q=True)
try:
obs, r, done, info = self.eval_env.step(action)
except StopIteration:
print("interrupted iteration")
done = True
if done and r > 0:
print("success")
successes += 1
elif done:
print("fail")
total_r += r
print(total_r)
print(successes)
return
if self.demo_policy:
self._initialize_memory_with_policy()
self.agent.memory.demonstrations_done()
#Adi: Temporarily no demonstrations so there is nothing to pretrain on
#self.pretrain()
ret = self.train()
self.sess = None
return ret
def train(self):
num_steps = self.nb_epochs * self.nb_epoch_cycles * self.nb_rollout_steps
rws = []
# Use a single rollout worker for now
for i in range(1):
rw = RolloutWorker(
self.env_id, self.agent, num_steps, self.run_name,
LinearSchedule(
200000, initial_p=self.reset_to_demo_rate, final_p=0.1), i,
self.demo_terminality)
thread = Thread(target=rw.exec_rollouts, daemon=True)
thread.start()
rws.append(rw)
eval_episodes = 1
final_evals = []
iteration = self.num_pretrain_steps
for epoch in range(self.nb_epochs):
for cycle in range(self.nb_epoch_cycles):
print(
"Cycle: {}/{}".format(cycle, self.nb_epoch_cycles) + "[" +
"-" * cycle + " " * (self.nb_epoch_cycles - cycle) + "]",
end="\r")
self.agent.memory.grow_limit()
for t_train in range(self.nb_train_steps):
self.agent.train(iteration)
iteration += 1
if iteration % self.policy_and_target_update_period == 0:
self.agent.update_target_net()
logger.record_tabular("epoch", epoch)
logger.record_tabular("total transitions",
self.agent.memory.total_transitions)
logger.record_tabular("run_name", self.run_name)
all_rewards = list(
itertools.chain(*[rw.epoch_rewards for rw in rws]))
all_qs = list(itertools.chain(*[rw.epoch_qs for rw in rws]))
logger.record_tabular(
"rollout_rewards",
np.mean(all_rewards) if all_rewards else "none")
logger.record_tabular("rollout_qs",
np.mean(all_qs) if all_qs else "none")
for rw in rws:
rw.advance_epoch()
print("Executed epoch cycles, starting the evaluation.")
eval_obs0, aux0, state0 = self.eval_env.reset(), self.eval_env.get_aux(), self.eval_env.get_state()
eval_episode_reward = 0.
eval_episode_rewards = []
eval_qs = []
if self.render_eval:
renderer = Renderer("eval", self.run_name, epoch)
for eval_episode in range(self.nb_eval_steps):
eval_done = False
print(
"Evaluation {}/{}".format(eval_episode,
self.nb_eval_steps),
end="\r")
while not eval_done:
eval_action, eval_q, object_conf, gripper, target = self.agent.pi(
eval_obs0,
aux0,
state0,
apply_noise=False,
compute_Q=True)
eval_obs0, eval_r, eval_done, eval_info = self.eval_env.step(
eval_action)
aux0, state0 = self.eval_env.get_aux(
), self.eval_env.get_state()
eval_qs.append(eval_q)
if self.render_eval:
frame = self.eval_env.render(mode="rgb_array")
renderer.record_frame(frame, eval_r, eval_action,
eval_q)
eval_episode_reward += eval_r
actual_object_conf = state0[8:11]
actual_grip = state0[0:3]
actual_target = state0[3:6]
diff_object_conf, diff_grip, diff_target = np.linalg.norm(
actual_object_conf - object_conf), np.linalg.norm(
actual_grip -
gripper), np.linalg.norm(actual_target - target)
self.agent.save_aux_prediction(diff_object_conf, diff_grip,
diff_target)
eval_obs0, aux0, state0 = self.eval_env.reset(), self.eval_env.get_aux(), self.eval_env.get_state()
eval_episode_rewards.append(eval_episode_reward)
self.agent.save_eval_reward(eval_episode_reward, eval_episodes)
eval_episodes += 1
eval_episode_reward = 0.
if self.render_eval:
renderer.finalize()
if eval_episode_rewards and epoch > self.nb_epochs - 5:
final_evals.append(np.mean(eval_episode_rewards))
if epoch % 5 == 0 and self.save_folder:
path = self.save_folder + "/" + \
self.run_name + "epoch{}.ckpt".format(epoch)
print("Saving model to " + path)
self.saver.save(self.sess, path)
logger.record_tabular(
"eval_rewards",
np.mean(eval_episode_rewards)
if eval_episode_rewards else "none")
logger.record_tabular("eval_qs",
np.mean(eval_qs) if eval_qs else "none")
logger.dump_tabular()
logger.info('')
return -np.mean(final_evals)
def pretrain(self):
iteration = 0
while self.num_pretrain_steps > 0:
print(
"Pretrain: {}/{}".format(iteration, self.num_pretrain_steps),
end="\r")
self.agent.train(iteration, pretrain=True)
iteration += 1
if iteration % self.policy_and_target_update_period == 0:
self.agent.update_target_net()
self.num_pretrain_steps -= 1
def _initialize_memory_with_policy(self):
print("Start collecting demo transitions")
obs0, aux0, state0 = self.demo_env.reset(), self.demo_env.get_aux(
), self.demo_env.get_state()
self.demo_policy.reset()
os.makedirs(demo_states_dir + "/" + self.run_name, exist_ok=True)
if self.render_demo:
renderer = Renderer("demo", self.run_name, 0)
iteration = -1
successes = 0
total_r = 0
total_dones = 0
for i in range(self.num_demo_steps):
print("Demo: {}/{}".format(i, self.num_demo_steps), end="\r")
transitions = []
frames = []
while True:
iteration += 1
#print(len(state0))
#print(state0)
action = self.demo_policy.choose_action(state0)
fn = demo_states_template.format(self.run_name, iteration)
self.demo_env.store_state(fn)
obs1, r, done, info = self.demo_env.step(action)
total_r += r
aux1, state1 = self.demo_env.get_aux(
), self.demo_env.get_state()
transitions.append((state0, obs0, action, r, state1, obs1,
done, aux0, aux1, iteration))
obs0, aux0, state0 = obs1, aux1, state1
if self.render_demo:
frame = self.demo_env.render(mode="rgb_array")
frames.append(frame)
if done:
total_dones += 1
obs0, aux0, state0 = self.demo_env.reset(
), self.demo_env.get_aux(), self.demo_env.get_state()
self.demo_policy.reset()
if r > 0:
successes += 1
for t in transitions:
self.agent.store_transition(*t, demo=True)
if self.render_demo:
for (j, frame) in enumerate(frames):
renderer.record_frame(frame, transitions[j][3],
transitions[j][2], [[0]])
break
else:
print("Bad demo - throw away")
transitions = []
frames = []
if self.render_demo:
renderer.finalize()
print("Collected {} demo transition.".format(
self.agent.memory.num_demonstrations))
print("Successes {} .".format(successes))
print("Reward {} .".format(total_r / total_dones))
def _write_summary(self):
training_text_summary = {
"env_data": {
"env:": str(self.eval_env),
"run_name": self.run_name,
"obs_shape": self.eval_env.observation_space.shape,
"action_shace": self.eval_env.action_space.shape,
#Passing in hardcoded shape
#"aux_shape": self.eval_env.aux_space.shape,
"aux_shape": (16,),
"call_command": " ".join(sys.argv),
},
"demo_data": {
"policy": self.demo_policy.__class__.__name__,
"number_of_steps": self.num_demo_steps,
"demo_terminality": self.demo_terminality,
"reset_to_demo_rate": self.reset_to_demo_rate,
},
"training_data": {
| |
if len(params) > 1:
event_description = clean_string(ctx, params[1])
c = userDatabase.cursor()
try:
c.execute("SELECT creator FROM events WHERE creator = ? AND active = 1 AND start > ?", (creator, now,))
result = c.fetchall()
if len(result) > 1 and creator not in owner_ids+mod_ids:
yield from bot.say("You can only have two running events simultaneously. Delete or edit an active event")
return
servers = get_user_servers(bot, creator)
# If message is via PM, but user only shares one server, we just consider that server
if ctx.message.channel.is_private and len(servers) == 1:
server = servers[0]
# Not a private message, so we just take current server
elif not ctx.message.channel.is_private:
server = ctx.message.server
# PM and user shares multiple servers, we must ask him for which server is the event
else:
yield from bot.say("For which server is this event? Choose one (number only)" +
"\n\t0: *Cancel*\n\t" +
"\n\t".join(["{0}: **{1.name}**".format(i+1, j) for i, j in enumerate(servers)]))
reply = yield from bot.wait_for_message(author=ctx.message.author, channel=ctx.message.channel,
timeout=50.0)
if reply is None:
yield from bot.say("Nothing? Forget it then.")
return
elif is_numeric(reply.content):
answer = int(reply.content)
if answer == 0:
yield from bot.say("Changed your mind? Typical human.")
return
try:
server = servers[answer-1]
except IndexError:
yield from bot.say("That wasn't in the choices, you ruined it. Start from the beginning.")
return
else:
yield from bot.say("That's not a valid answer, try the command again.")
return
c.execute("INSERT INTO events (creator,server,start,name,description) VALUES(?,?,?,?,?)",
(creator, server.id, start, name, event_description))
event_id = c.lastrowid
reply = "Event registered successfully.\n\t**{0}** in *{1}*.\n*To edit this event use ID {2}*"
yield from bot.say(reply.format(name, starts_in.original, event_id))
finally:
userDatabase.commit()
c.close()
@event_add.error
@checks.is_not_lite()
@asyncio.coroutine
def event_add_error(error, ctx):
if isinstance(error, commands.BadArgument):
yield from bot.say(str(error))
@events.command(name="editname", pass_context=True)
@checks.is_not_lite()
@asyncio.coroutine
def event_edit_name(ctx, event_id: int, *, new_name):
"""Changes an event's name
Only the creator of the event or mods can edit an event's name
Only upcoming events can be edited"""
c = userDatabase.cursor()
now = time.time()
new_name = single_line(clean_string(ctx, new_name))
try:
c.execute("SELECT creator, name FROM events WHERE id = ? AND active = 1 AND start > ?", (event_id, now,))
event = c.fetchone()
if not event:
yield from bot.say("There are no active events with that ID.")
return
if event["creator"] != int(ctx.message.author.id) and ctx.message.author.id not in mod_ids+owner_ids:
yield from bot.say("You can only edit your own events.")
return
yield from bot.say("Do you want to change the name of **{0}**? `(yes/no)`".format(event["name"]))
answer = yield from bot.wait_for_message(author=ctx.message.author, channel=ctx.message.channel, timeout=30.0)
if answer is None:
yield from bot.say("I will take your silence as a no...")
elif answer.content.lower() in ["yes", "y"]:
c.execute("UPDATE events SET name = ? WHERE id = ?", (new_name, event_id,))
yield from bot.say("Your event was renamed successfully to **{0}**.".format(new_name))
else:
yield from bot.say("Ok, nevermind.")
finally:
userDatabase.commit()
c.close()
@events.command(name="editdesc", aliases=["editdescription"], pass_context=True)
@checks.is_not_lite()
@asyncio.coroutine
def event_edit_description(ctx, event_id: int, *, new_description):
"""Changes an event's description
Only the creator of the event or mods can edit an event's description
Only upcoming events can be edited"""
c = userDatabase.cursor()
now = time.time()
new_description = clean_string(ctx, new_description)
try:
c.execute("SELECT creator FROM events WHERE id = ? AND active = 1 AND start > ?", (event_id, now,))
event = c.fetchone()
if not event:
yield from bot.say("There are no active events with that ID.")
return
if event["creator"] != int(ctx.message.author.id) and ctx.message.author.id not in mod_ids+owner_ids:
yield from bot.say("You can only edit your own events.")
return
yield from bot.say("Do you want to change the description of **{0}**? `(yes/no)`")
answer = yield from bot.wait_for_message(author=ctx.message.author, channel=ctx.message.channel, timeout=30.0)
if answer is None:
yield from bot.say("I will take your silence as a no...")
elif answer.content.lower() in ["yes", "y"]:
c.execute("UPDATE events SET description = ? WHERE id = ?", (new_description, event_id,))
yield from bot.say("Your event's description was changed successfully to **{0}**.".format(new_description))
else:
yield from bot.say("Ok, nevermind.")
finally:
userDatabase.commit()
c.close()
@events.command(name="edittime", aliases=["editstart"], pass_context=True)
@checks.is_not_lite()
@asyncio.coroutine
def event_edit_time(ctx, event_id: int, starts_in: TimeString):
"""Changes an event's time
Only the creator of the event or mods can edit an event's time
Only upcoming events can be edited"""
c = userDatabase.cursor()
now = time.time()
try:
c.execute("SELECT creator, name FROM events WHERE id = ? AND active = 1 AND start > ?", (event_id, now,))
event = c.fetchone()
if not event:
yield from bot.say("There are no active events with that ID.")
return
if event["creator"] != int(ctx.message.author.id) and ctx.message.author.id not in mod_ids+owner_ids:
yield from bot.say("You can only edit your own events.")
return
yield from bot.say("Do you want to change the start time of '**{0}**'? `(yes/no)`".format(event["name"]))
answer = yield from bot.wait_for_message(author=ctx.message.author, channel=ctx.message.channel, timeout=30.0)
if answer is None:
yield from bot.say("I will take your silence as a no...")
elif answer.content.lower() in ["yes", "y"]:
c.execute("UPDATE events SET start = ? WHERE id = ?", (now+starts_in.seconds, event_id,))
yield from bot.say(
"Your event's start time was changed successfully to **{0}**.".format(starts_in.original))
else:
yield from bot.say("Ok, nevermind.")
finally:
userDatabase.commit()
c.close()
@events.command(name="delete", aliases=["remove"], pass_context=True)
@checks.is_not_lite()
@asyncio.coroutine
def event_remove(ctx, event_id: int):
"""Deletes an event
Only the creator of the event or mods can delete an event
Only upcoming events can be edited"""
c = userDatabase.cursor()
now = time.time()
try:
c.execute("SELECT creator,name FROM events WHERE id = ? AND active = 1 AND start > ?", (event_id, now,))
event = c.fetchone()
if not event:
yield from bot.say("There are no active events with that ID.")
return
if event["creator"] != int(ctx.message.author.id) and ctx.message.author.id not in mod_ids+owner_ids:
yield from bot.say("You can only delete your own events.")
return
yield from bot.say("Do you want to delete the event '**{0}**'? `(yes/no)`".format(event["name"]))
answer = yield from bot.wait_for_message(author=ctx.message.author, channel=ctx.message.channel, timeout=30.0)
if answer is None:
yield from bot.say("I will take your silence as a no...")
elif answer.content.lower() in ["yes", "y"]:
c.execute("UPDATE events SET active = 0 WHERE id = ?", (event_id,))
yield from bot.say("Your event was deleted successfully.")
else:
yield from bot.say("Ok, nevermind.")
finally:
userDatabase.commit()
c.close()
@events.command(pass_context=True, name="make", aliases=["creator", "maker"])
@checks.is_not_lite()
@asyncio.coroutine
def event_make(ctx):
"""Creates an event guiding you step by step
Instead of using confusing parameters, commas and spaces, this commands has the bot ask you step by step."""
author = ctx.message.author
creator = author.id
now = time.time()
c = userDatabase.cursor()
try:
c.execute("SELECT creator FROM events WHERE creator = ? AND active = 1 AND start > ?", (creator, now,))
event = c.fetchall()
if len(event) > 1 and creator not in owner_ids + mod_ids:
return
yield from bot.say("Let's create an event. What would you like the name to be?")
name = yield from bot.wait_for_message(author=author, channel=ctx.message.channel, timeout=50.0)
if name is None:
yield from bot.say("...You took to long. Try the command again.")
return
name = single_line(name.clean_content)
yield from bot.say("Alright, what description would you like the event to have? `(no/none = no description)`")
event_description = yield from bot.wait_for_message(author=author, channel=ctx.message.channel, timeout=50.0)
if event_description is None:
yield from bot.say("...You took too long. Try the command again.")
return
elif event_description.content.lower().strip() in ["no", "none"]:
yield from bot.say("No description then? Alright, now tell me the start time of the event from now. "
"`e.g. 2d1h20m, 2d3h`")
event_description = ""
else:
event_description = event_description.clean_content
yield from bot.say("Alright, now tell me the start time of the event from now. `e.g. 2d1h20m, 2d3h`")
starts_in = yield from bot.wait_for_message(author=author, channel=ctx.message.channel, timeout=50.0)
if starts_in is None:
yield from bot.say("...You took too long. Try the command again.")
return
try:
starts_in = TimeString(starts_in.content)
except commands.BadArgument:
yield from bot.say("Invalid time. Try the command again. `Time examples: 1h2m, 2d30m, 40m, 5h`")
return
servers = get_user_servers(bot, creator)
# If message is via PM, but user only shares one server, we just consider that server
if ctx.message.channel.is_private and len(servers) == 1:
server = servers[0]
# Not a private message, so we just take current server
elif not ctx.message.channel.is_private:
server = ctx.message.server
# PM and user shares multiple servers, we must ask him for which server is the event
else:
yield from bot.say("One more question...for which server is this event? Choose one (number only)" +
"\n\t0: *Cancel*\n\t" +
"\n\t".join(["{0}: **{1.name}**".format(i+1, j) for i, j in enumerate(servers)]))
reply = yield from bot.wait_for_message(author=ctx.message.author, channel=ctx.message.channel,
timeout=50.0)
if reply is None:
yield from bot.say("Nothing? Forget it then.")
return
elif is_numeric(reply.content):
answer = int(reply.content)
if answer == 0:
yield from bot.say("Changed your mind? Typical human.")
return
try:
server = servers[answer-1]
except IndexError:
yield from bot.say("That wasn't in | |
for d in range(1, 32)}
assert rule.apply_to_date_range(start_date, end_date) == days_in_march
@pytest.mark.django_db
def test_rule_filter_dates7(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2020, month=9, day=1),
end_date=datetime.date(year=2020, month=11, day=30),
)
time_span_group = time_span_group_factory(period=date_period)
rule1 = rule_factory(
group=time_span_group,
context=RuleContext.MONTH,
subject=RuleSubject.WEEK,
start=3,
)
start_date = datetime.date(year=2020, month=10, day=1)
end_date = datetime.date(year=2020, month=10, day=31)
assert rule1.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2020, month=10, day=12),
datetime.date(year=2020, month=10, day=13),
datetime.date(year=2020, month=10, day=14),
datetime.date(year=2020, month=10, day=15),
datetime.date(year=2020, month=10, day=16),
datetime.date(year=2020, month=10, day=17),
datetime.date(year=2020, month=10, day=18),
}
@pytest.mark.django_db
def test_rule_filter_dates7_1(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2020, month=9, day=1),
end_date=datetime.date(year=2020, month=11, day=30),
)
time_span_group = time_span_group_factory(period=date_period)
rule1 = rule_factory(
group=time_span_group,
context=RuleContext.MONTH,
subject=RuleSubject.WEEK,
start=1,
)
start_date = datetime.date(year=2020, month=9, day=1)
end_date = datetime.date(year=2020, month=11, day=30)
assert rule1.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2020, month=9, day=1),
datetime.date(year=2020, month=9, day=2),
datetime.date(year=2020, month=9, day=3),
datetime.date(year=2020, month=9, day=4),
datetime.date(year=2020, month=9, day=5),
datetime.date(year=2020, month=9, day=6),
datetime.date(year=2020, month=9, day=28),
datetime.date(year=2020, month=9, day=29),
datetime.date(year=2020, month=9, day=30),
datetime.date(year=2020, month=10, day=1),
datetime.date(year=2020, month=10, day=2),
datetime.date(year=2020, month=10, day=3),
datetime.date(year=2020, month=10, day=4),
datetime.date(year=2020, month=10, day=26),
datetime.date(year=2020, month=10, day=27),
datetime.date(year=2020, month=10, day=28),
datetime.date(year=2020, month=10, day=29),
datetime.date(year=2020, month=10, day=30),
datetime.date(year=2020, month=10, day=31),
datetime.date(year=2020, month=11, day=1),
}
@pytest.mark.django_db
def test_rule_filter_dates8(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule1 = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.DAY,
start=3,
)
start_date = datetime.date(year=2020, month=1, day=1)
end_date = datetime.date(year=2020, month=12, day=31)
assert rule1.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2020, month=1, day=3),
}
@pytest.mark.django_db
def test_rule_filter_dates8_1(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule1 = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.DAY,
start=3,
)
start_date = datetime.date(year=2019, month=1, day=1)
end_date = datetime.date(year=2021, month=12, day=31)
assert rule1.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2019, month=1, day=3),
datetime.date(year=2020, month=1, day=3),
datetime.date(year=2021, month=1, day=3),
}
@pytest.mark.django_db
def test_rule_filter_dates9(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule1 = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.WEEK,
start=3,
)
start_date = datetime.date(year=2020, month=1, day=1)
end_date = datetime.date(year=2020, month=12, day=31)
assert rule1.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2020, month=1, day=13),
datetime.date(year=2020, month=1, day=14),
datetime.date(year=2020, month=1, day=15),
datetime.date(year=2020, month=1, day=16),
datetime.date(year=2020, month=1, day=17),
datetime.date(year=2020, month=1, day=18),
datetime.date(year=2020, month=1, day=19),
}
@pytest.mark.django_db
def test_rule_filter_dates10(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule1 = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.WEEK,
start=1,
frequency_modifier=FrequencyModifier.EVEN,
)
start_date = datetime.date(year=2020, month=11, day=1)
end_date = datetime.date(year=2020, month=11, day=30)
assert rule1.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2020, month=11, day=1),
datetime.date(year=2020, month=11, day=9),
datetime.date(year=2020, month=11, day=10),
datetime.date(year=2020, month=11, day=11),
datetime.date(year=2020, month=11, day=12),
datetime.date(year=2020, month=11, day=13),
datetime.date(year=2020, month=11, day=14),
datetime.date(year=2020, month=11, day=15),
datetime.date(year=2020, month=11, day=23),
datetime.date(year=2020, month=11, day=24),
datetime.date(year=2020, month=11, day=25),
datetime.date(year=2020, month=11, day=26),
datetime.date(year=2020, month=11, day=27),
datetime.date(year=2020, month=11, day=28),
datetime.date(year=2020, month=11, day=29),
}
@pytest.mark.django_db
def test_rule_filter_dates10_1(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule1 = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.WEEK,
start=1,
frequency_modifier=FrequencyModifier.EVEN,
)
start_date = datetime.date(year=2020, month=12, day=1)
end_date = datetime.date(year=2021, month=1, day=31)
assert rule1.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2020, month=12, day=7),
datetime.date(year=2020, month=12, day=8),
datetime.date(year=2020, month=12, day=9),
datetime.date(year=2020, month=12, day=10),
datetime.date(year=2020, month=12, day=11),
datetime.date(year=2020, month=12, day=12),
datetime.date(year=2020, month=12, day=13),
datetime.date(year=2020, month=12, day=21),
datetime.date(year=2020, month=12, day=22),
datetime.date(year=2020, month=12, day=23),
datetime.date(year=2020, month=12, day=24),
datetime.date(year=2020, month=12, day=25),
datetime.date(year=2020, month=12, day=26),
datetime.date(year=2020, month=12, day=27),
datetime.date(year=2021, month=1, day=11),
datetime.date(year=2021, month=1, day=12),
datetime.date(year=2021, month=1, day=13),
datetime.date(year=2021, month=1, day=14),
datetime.date(year=2021, month=1, day=15),
datetime.date(year=2021, month=1, day=16),
datetime.date(year=2021, month=1, day=17),
datetime.date(year=2021, month=1, day=25),
datetime.date(year=2021, month=1, day=26),
datetime.date(year=2021, month=1, day=27),
datetime.date(year=2021, month=1, day=28),
datetime.date(year=2021, month=1, day=29),
datetime.date(year=2021, month=1, day=30),
datetime.date(year=2021, month=1, day=31),
}
@pytest.mark.django_db
def test_rule_filter_dates11(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.MONTH,
start=3,
)
start_date = datetime.date(year=2020, month=2, day=15)
end_date = datetime.date(year=2020, month=4, day=30)
days_in_march = {datetime.date(year=2020, month=3, day=d) for d in range(1, 32)}
assert rule.apply_to_date_range(start_date, end_date) == days_in_march
@pytest.mark.django_db
def test_rule_filter_dates11_1(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.MONTH,
start=3,
)
start_date = datetime.date(year=2019, month=1, day=1)
end_date = datetime.date(year=2021, month=12, day=31)
days_in_march = set()
for year in [2019, 2020, 2021]:
days_in_march |= {
datetime.date(year=year, month=3, day=d) for d in range(1, 32)
}
assert rule.apply_to_date_range(start_date, end_date) == days_in_march
@pytest.mark.django_db
def test_rule_filter_dates11_2(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2020, month=1, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.MONTH,
start=3,
)
start_date = datetime.date(year=2019, month=1, day=1)
end_date = datetime.date(year=2021, month=12, day=31)
days_in_march = {datetime.date(year=2020, month=3, day=d) for d in range(1, 32)}
assert rule.apply_to_date_range(start_date, end_date) == days_in_march
@pytest.mark.django_db
def test_rule_filter_dates11_3(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2020, month=5, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.MONTH,
start=3,
)
start_date = datetime.date(year=2020, month=2, day=15)
end_date = datetime.date(year=2020, month=4, day=30)
assert rule.apply_to_date_range(start_date, end_date) == set()
@pytest.mark.django_db
def test_rule_filter_dates11_4(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2020, month=1, day=1),
end_date=datetime.date(year=2020, month=1, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.MONTH,
start=3,
)
start_date = datetime.date(year=2020, month=2, day=1)
end_date = datetime.date(year=2020, month=4, day=30)
assert rule.apply_to_date_range(start_date, end_date) == set()
@pytest.mark.django_db
def test_rule_filter_dates12(
resource, date_period_factory, time_span_group_factory, rule_factory
):
date_period = date_period_factory(
resource=resource,
resource_state=State.OPEN,
start_date=datetime.date(year=2019, month=1, day=1),
end_date=datetime.date(year=2021, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
rule = rule_factory(
group=time_span_group,
context=RuleContext.YEAR,
subject=RuleSubject.FRIDAY,
start=2,
)
start_date = datetime.date(year=2019, month=1, day=1)
end_date = datetime.date(year=2021, month=12, day=31)
assert rule.apply_to_date_range(start_date, end_date) == {
datetime.date(year=2019, month=1, day=11),
datetime.date(year=2020, month=1, day=10),
datetime.date(year=2021, month=1, day=8),
}
@pytest.mark.django_db
def test_resource_get_daily_opening_hours_override(
resource, date_period_factory, time_span_group_factory, time_span_factory
):
date_period = date_period_factory(
name="The whole year",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=1, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
time_span_factory(
group=time_span_group,
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
resource_state=State.OPEN,
weekdays=list(Weekday),
)
date_period2 = date_period_factory(
name="Exception for december",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=12, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
override=True,
)
time_span_group2 = time_span_group_factory(period=date_period2)
time_span_factory(
group=time_span_group2,
start_time=datetime.time(hour=10, minute=0),
end_time=datetime.time(hour=15, minute=0),
resource_state=State.OPEN,
weekdays=list(Weekday),
)
expected_time_element = TimeElement(
start_time=datetime.time(hour=10, minute=0),
end_time=datetime.time(hour=15, minute=0),
end_time_on_next_day=False,
resource_state=State.OPEN,
override=True,
full_day=False,
)
assert resource.get_daily_opening_hours(
datetime.date(year=2020, month=12, day=23),
datetime.date(year=2020, month=12, day=25),
) == {
datetime.date(year=2020, month=12, day=23): [expected_time_element],
datetime.date(year=2020, month=12, day=24): [expected_time_element],
datetime.date(year=2020, month=12, day=25): [expected_time_element],
}
@pytest.mark.django_db
def test_resource_get_daily_opening_hours_multiple_full_day_overrides(
resource, date_period_factory, time_span_group_factory, time_span_factory
):
date_period = date_period_factory(
name="The whole year",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=1, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
time_span_factory(
group=time_span_group,
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
resource_state=State.OPEN,
weekdays=list(Weekday),
)
date_period2 = date_period_factory(
name="Exception for december",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=12, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
override=True,
)
time_span_group2 = time_span_group_factory(period=date_period2)
time_span_factory(
group=time_span_group2,
resource_state=State.CLOSED,
weekdays=list(Weekday),
full_day=True,
)
date_period3 = date_period_factory(
name="Exceptions for december 24th and 25th",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=12, day=24),
end_date=datetime.date(year=2020, month=12, day=25),
override=True,
)
time_span_group3 = time_span_group_factory(period=date_period3)
time_span_factory(
group=time_span_group3,
resource_state=State.EXIT_ONLY,
weekdays=list(Weekday),
full_day=True,
)
expected_time_element_closed = TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=True,
)
expected_time_element_exit_only = TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.EXIT_ONLY,
override=True,
full_day=True,
)
assert resource.get_daily_opening_hours(
datetime.date(year=2020, month=12, day=23),
datetime.date(year=2020, month=12, day=25),
) == {
datetime.date(year=2020, month=12, day=23): [expected_time_element_closed],
datetime.date(year=2020, month=12, day=24): [expected_time_element_exit_only],
datetime.date(year=2020, month=12, day=25): [expected_time_element_exit_only],
}
@pytest.mark.django_db
def test_resource_get_daily_opening_hours_multiple_full_day_overrides_unbounded(
resource, date_period_factory, time_span_group_factory, time_span_factory
):
date_period = date_period_factory(
name="The whole year",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=1, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
time_span_factory(
group=time_span_group,
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
resource_state=State.OPEN,
weekdays=list(Weekday),
)
date_period2 = date_period_factory(
name="Exception for december forwards",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=12, day=1),
end_date=None,
override=True,
)
time_span_group2 = time_span_group_factory(period=date_period2)
time_span_factory(
group=time_span_group2,
resource_state=State.CLOSED,
weekdays=list(Weekday),
full_day=True,
)
date_period3 = date_period_factory(
name="Exceptions for december 24th and 25th",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=12, day=24),
end_date=datetime.date(year=2020, month=12, day=25),
override=True,
)
time_span_group3 = time_span_group_factory(period=date_period3)
time_span_factory(
group=time_span_group3,
resource_state=State.EXIT_ONLY,
weekdays=list(Weekday),
full_day=True,
)
expected_time_element_closed = TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True,
full_day=True,
)
expected_time_element_exit_only = TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.EXIT_ONLY,
override=True,
full_day=True,
)
assert resource.get_daily_opening_hours(
datetime.date(year=2020, month=12, day=23),
datetime.date(year=2020, month=12, day=25),
) == {
datetime.date(year=2020, month=12, day=23): [expected_time_element_closed],
datetime.date(year=2020, month=12, day=24): [expected_time_element_exit_only],
datetime.date(year=2020, month=12, day=25): [expected_time_element_exit_only],
}
@pytest.mark.django_db
def test_resource_get_daily_opening_hours_multiple_overrides(
resource, date_period_factory, time_span_group_factory, time_span_factory
):
date_period = date_period_factory(
name="The whole year",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=1, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
)
time_span_group = time_span_group_factory(period=date_period)
time_span_factory(
group=time_span_group,
start_time=datetime.time(hour=8, minute=0),
end_time=datetime.time(hour=16, minute=0),
resource_state=State.OPEN,
weekdays=list(Weekday),
)
date_period2 = date_period_factory(
name="Exception for december",
resource=resource,
resource_state=State.UNDEFINED,
start_date=datetime.date(year=2020, month=12, day=1),
end_date=datetime.date(year=2020, month=12, day=31),
override=True,
)
time_span_group2 = time_span_group_factory(period=date_period2)
time_span_factory(
group=time_span_group2,
start_time=datetime.time(hour=10, minute=0),
| |
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_alert_policy
short_description: Create or Delete Alert Policies at CenturyLink Cloud.
description:
- An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
version_added: "2.0"
options:
alias:
description:
- The alias of your CLC Account
required: True
name:
description:
- The name of the alert policy. This is mutually exclusive with id
id:
description:
- The alert policy id. This is mutually exclusive with name
alert_recipients:
description:
- A list of recipient email ids to notify the alert.
This is required for state 'present'
metric:
description:
- The metric on which to measure the condition that will trigger the alert.
This is required for state 'present'
choices: ['cpu','memory','disk']
duration:
description:
- The length of time in minutes that the condition must exceed the threshold.
This is required for state 'present'
threshold:
description:
- The threshold that will trigger the alert when the metric equals or exceeds it.
This is required for state 'present'
This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
state:
description:
- Whether to create or delete the policy.
default: present
choices: ['present','absent']
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
---
- name: Create Alert Policy Example
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create an Alert Policy for disk above 80% for 5 minutes
clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
alert_recipients:
- <EMAIL>
- <EMAIL>
metric: 'disk'
duration: '00:05:00'
threshold: 80
state: present
register: policy
- name: debug
debug: var=policy
---
- name: Delete Alert Policy Example
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Alert Policy
clc_alert_policy:
alias: wfad
name: 'alert for disk > 80%'
state: absent
register: policy
- name: debug
debug: var=policy
'''
RETURN = '''
policy:
description: The alert policy information
returned: success
type: dict
sample:
{
"actions": [
{
"action": "email",
"settings": {
"recipients": [
"<EMAIL>",
"<EMAIL>"
]
}
}
],
"id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
"links": [
{
"href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
"rel": "self",
"verbs": [
"GET",
"DELETE",
"PUT"
]
}
],
"name": "test_alert",
"triggers": [
{
"duration": "00:05:00",
"metric": "disk",
"threshold": 80.0
}
]
}
'''
__version__ = '${version}'
import json
import os
import traceback
from distutils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcAlertPolicy:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
self.policy_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(default=None),
id=dict(default=None),
alias=dict(required=True, default=None),
alert_recipients=dict(type='list', default=None),
metric=dict(
choices=[
'cpu',
'memory',
'disk'],
default=None),
duration=dict(type='str', default=None),
threshold=dict(type='int', default=None),
state=dict(default='present', choices=['present', 'absent'])
)
mutually_exclusive = [
['name', 'id']
]
return {'argument_spec': argument_spec,
'mutually_exclusive': mutually_exclusive}
# Module Behavior Goodness
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
self._set_clc_credentials_from_env()
self.policy_dict = self._get_alert_policies(p['alias'])
if p['state'] == 'present':
changed, policy = self._ensure_alert_policy_is_present()
else:
changed, policy = self._ensure_alert_policy_is_absent()
self.module.exit_json(changed=changed, policy=policy)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_alert_policy_is_present(self):
"""
Ensures that the alert policy is present
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the created/updated alert policy
"""
changed = False
p = self.module.params
policy_name = p.get('name')
if not policy_name:
self.module.fail_json(msg='Policy name is a required')
policy = self._alert_policy_exists(policy_name)
if not policy:
changed = True
policy = None
if not self.module.check_mode:
policy = self._create_alert_policy()
else:
changed_u, policy = self._ensure_alert_policy_is_updated(policy)
if changed_u:
changed = True
return changed, policy
def _ensure_alert_policy_is_absent(self):
"""
Ensures that the alert policy is absent
:return: (changed, None)
changed: A flag representing if anything is modified
"""
changed = False
p = self.module.params
alert_policy_id = p.get('id')
alert_policy_name = p.get('name')
alias = p.get('alias')
if not alert_policy_id and not alert_policy_name:
self.module.fail_json(
msg='Either alert policy id or policy name is required')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id(
self.module,
alert_policy_name)
if alert_policy_id and alert_policy_id in self.policy_dict:
changed = True
if not self.module.check_mode:
self._delete_alert_policy(alias, alert_policy_id)
return changed, None
def _ensure_alert_policy_is_updated(self, alert_policy):
"""
Ensures the alert policy is updated if anything is changed in the alert policy configuration
:param alert_policy: the target alert policy
:return: (changed, policy)
changed: A flag representing if anything is modified
policy: the updated the alert policy
"""
changed = False
p = self.module.params
alert_policy_id = alert_policy.get('id')
email_list = p.get('alert_recipients')
metric = p.get('metric')
duration = p.get('duration')
threshold = p.get('threshold')
policy = alert_policy
if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
(duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
(threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
changed = True
elif email_list:
t_email_list = list(
alert_policy.get('actions')[0].get('settings').get('recipients'))
if set(email_list) != set(t_email_list):
changed = True
if changed and not self.module.check_mode:
policy = self._update_alert_policy(alert_policy_id)
return changed, policy
def _get_alert_policies(self, alias):
"""
Get the alert policies for account alias by calling the CLC API.
:param alias: the account alias
:return: the alert policies for the account alias
"""
response = {}
policies = self.clc.v2.API.Call('GET',
'/v2/alertPolicies/%s'
% alias)
for policy in policies.get('items'):
response[policy.get('id')] = policy
return response
def _create_alert_policy(self):
"""
Create an alert Policy using the CLC API.
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'POST',
'/v2/alertPolicies/%s' % alias,
arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to create alert policy "{0}". {1}'.format(
policy_name, str(e.response_text)))
return result
def _update_alert_policy(self, alert_policy_id):
"""
Update alert policy using the CLC API.
:param alert_policy_id: The clc alert policy id
:return: response dictionary from the CLC API.
"""
p = self.module.params
alias = p['alias']
email_list = p['alert_recipients']
metric = p['metric']
duration = p['duration']
threshold = p['threshold']
policy_name = p['name']
arguments = json.dumps(
{
'name': policy_name,
'actions': [{
'action': 'email',
'settings': {
'recipients': email_list
}
}],
'triggers': [{
'metric': metric,
'duration': duration,
'threshold': threshold
}]
}
)
try:
result = self.clc.v2.API.Call(
'PUT', '/v2/alertPolicies/%s/%s' %
(alias, alert_policy_id), arguments)
except APIFailedResponse as e:
return self.module.fail_json(
msg='Unable to update alert policy "{0}". {1}'.format(
| |
rewrite some commands as necessary
if not vs.argument_supplied:
return None
if cmd in repeatable_cmds:
count = vs.get_count()
args.update({
'cmd': cmd,
'_times': abs(count),
})
if count < 0 and 'forward' in args:
args['forward'] = not args['forward']
return ("sbp_do_times", args)
elif cmd == 'scroll_lines':
args['amount'] *= vs.get_count()
return (cmd, args)
#
# Post command processing: deal with active mark and resetting the numeric argument.
#
def on_post_text_command(self, view, cmd, args):
vs = ViewState.get(view)
cm = CmdUtil(view)
if vs.active_mark and vs.this_cmd != 'drag_select' and vs.last_cmd == 'drag_select':
# if we just finished a mouse drag, make sure active mark mode is off
cm.toggle_active_mark_mode(False)
# reset numeric argument (if command starts with "sbp_" this is handled elsewhere)
if not cmd.startswith("sbp_"):
vs.argument_value = 0
vs.argument_supplied = False
vs.last_cmd = cmd
if vs.active_mark:
if len(view.sel()) > 1:
# allow the awesomeness of multiple cursors to be used: the selection will disappear
# after the next command
vs.active_mark = False
else:
cm.set_selection(cm.get_mark(), cm.get_point())
if cmd in ensure_visible_cmds and cm.just_one_point():
cm.ensure_visible(cm.get_point())
#
# Process the selection if it was created from a drag_select (mouse dragging) command.
#
def on_selection_modified(self, view):
vs = ViewState.get(view)
selection = view.sel()
if len(selection) == 1 and vs.this_cmd == 'drag_select':
cm = CmdUtil(view, vs);
if vs.drag_count == 2:
# second event - enable active mark
region = view.sel()[0]
mark = region.a
cm.set_mark(mark, and_selection=False)
cm.toggle_active_mark_mode(True)
elif vs.drag_count == 0:
cm.toggle_active_mark_mode(False)
vs.drag_count += 1
#
# At a minimum this is called when bytes are inserted into the buffer.
#
def on_modified(self, view):
ViewState.get(view).this_cmd = None
self.on_anything(view)
class WindowCmdWatcher(sublime_plugin.EventListener):
def __init__(self, *args, **kwargs):
super(WindowCmdWatcher, self).__init__(*args, **kwargs)
def on_window_command(self, window, cmd, args):
# REMIND - JP: Why is this code here? Can't this be done in the SbpPaneCmd class?
# Check the move state of the Panes and make sure we stop recursion
if cmd == "sbp_pane_cmd" and args and args['cmd'] == 'move' and 'next_pane' not in args:
lm = ll.LayoutManager(window.layout())
if args["direction"] == 'next':
pos = lm.next(window.active_group())
else:
pos = lm.next(window.active_group(), -1)
args["next_pane"] = pos
return cmd, args
#
# A helper class which provides a bunch of useful functionality on a view
#
class CmdUtil:
def __init__(self, view, state=None, edit=None):
self.view = view
if state is None:
state = ViewState.get(self.view)
self.state = state
self.edit = edit
#
# Sets the status text on the bottom of the window.
#
def set_status(self, msg):
self.view.set_status(JOVE_STATUS, msg)
#
# Returns point. Point is where the cursor is in the possibly extended region. If there are multiple cursors it
# uses the first one in the list.
#
def get_point(self):
sel = self.view.sel()
if len(sel) > 0:
return sel[0].b
return -1
#
# This no-op ensures the next/prev line target column is reset to the new locations.
#
def reset_target_column(self):
selection = self.view.sel()
if len(selection) == 1 and selection[0].empty() and selection[0].b < self.view.size():
self.run_command("move", {"by": "characters", "forward": True})
self.run_command("move", {"by": "characters", "forward": False})
#
# Returns the mark position.
#
def get_mark(self):
mark = self.view.get_regions("jove_mark")
if mark:
mark = mark[0]
return mark.a
#
# Get the region between mark and point.
#
def get_region(self):
selection = self.view.sel()
if len(selection) != 1:
# Oops - this error message does not belong here!
self.set_status("Operation not supported with multiple cursors")
return
selection = selection[0]
if selection.size() > 0:
return selection
mark = self.get_mark()
if mark is not None:
point = self.get_point()
return sublime.Region(mark, self.get_point())
#
# Save a copy of the current region in the named mark. This mark will be robust in the face of
# changes to the buffer.
#
def save_region(self, name):
r = self.get_region()
if r:
self.view.add_regions(name, [r], "mark", "", sublime.HIDDEN)
return r
#
# Restore the current region to the named saved mark.
#
def restore_region(self, name):
r = self.view.get_regions(name)
if r:
r = r[0]
self.set_mark(r.a, False, False)
self.set_selection(r.b, r.b)
self.view.erase_regions(name)
return r
#
# Iterator on all the lines in the specified sublime Region.
#
def for_each_line(self, region):
view = self.view
pos = region.begin()
limit = region.end()
while pos < limit:
line = view.line(pos)
yield line
pos = line.end() + 1
#
# Returns true if all the text between a and b is blank.
#
def is_blank(self, a, b):
text = self.view.substr(sublime.Region(a, b))
return re.match(r'[ \t]*$', text) is not None
#
# Returns the current indent of the line containing the specified POS and the column of POS.
#
def get_line_indent(self, pos):
data,col,region = self.get_line_info(pos)
m = re.match(r'[ \t]*', data)
return (len(m.group(0)), col)
#
# Sets the buffers mark to the specified pos (or the current position in the view).
#
def set_mark(self, pos=None, update_status=True, and_selection=True):
view = self.view
mark_ring = self.state.mark_ring
if pos is None:
pos = self.get_point()
# update the mark ring
mark_ring.set(pos)
if and_selection:
self.set_selection(pos, pos)
if update_status:
self.set_status("Mark Saved")
# Allows to always set the active mark mode
def set_active_mark_mode(self):
point = self.get_point()
mark = self.get_mark()
self.set_selection(mark, point)
self.state.active_mark = True
#
# Enabling active mark means highlight the current emacs region.
#
def toggle_active_mark_mode(self, value=None):
if value is not None and self.state.active_mark == value:
return
self.state.active_mark = value if value is not None else (not self.state.active_mark)
point = self.get_point()
if self.state.active_mark:
mark = self.get_mark()
self.set_selection(mark, point)
self.state.active_mark = True
elif len(self.view.sel()) <= 1:
self.set_selection(point, point)
def swap_point_and_mark(self):
view = self.view
mark_ring = self.state.mark_ring
mark = mark_ring.exchange(self.get_point())
if mark is not None:
self.goto_position(mark)
else:
self.set_status("No mark in this buffer")
def set_selection(self, a=None, b=None):
if a is None:
a = self.get_point()
if b is None:
b = a
selection = self.view.sel()
selection.clear()
r = sublime.Region(a, b)
selection.add(r)
def get_line_info(self, point):
view = self.view
region = view.line(point)
data = view.substr(region)
row,col = view.rowcol(point)
return (data, col, region)
def run_window_command(self, cmd, args):
self.view.window().run_command(cmd, args)
def has_prefix_arg(self):
return self.state.argument_supplied
def just_one_point(self):
return len(self.view.sel()) == 1
def get_count(self, peek=False):
return self.state.get_count(peek)
#
# This provides a way to run a function on all the cursors, one after another. This maintains
# all the cursors and then calls the function with one cursor at a time, with the view's
# selection state set to just that one cursor. So any calls to run_command within the function
# will operate on only that one cursor.
#
# The called function is supposed to return a new cursor position or None, in which case value
# is taken from the view itself.
#
# REMIND: This isn't how it currently works!
#
# After the function is run on all the cursors, the view's multi-cursor state is restored with
# new values for the cursor.
#
def for_each_cursor(self, function, *args, **kwargs):
view = self.view
selection = view.sel()
# copy cursors into proper regions which sublime will manage while we potentially edit the
# buffer and cause things to move around
key = "tmp_cursors"
cursors = [c for c in selection]
view.add_regions(key, cursors, "tmp", "", sublime.HIDDEN)
# run the command passing in each cursor and collecting the returned cursor
for i in range(len(cursors)):
selection.clear()
regions = view.get_regions(key)
if i >= len(regions):
# we've deleted some cursors along the way - we're done
break
cursor = regions[i]
selection.add(cursor)
cursor = function(cursor, *args, **kwargs)
if cursor is not None:
# update the cursor in its slot
regions[i] = cursor
view.add_regions(key, regions, "tmp", "", sublime.HIDDEN)
# restore the cursors
selection.clear()
for r in view.get_regions(key):
selection.add(r)
view.erase_regions(key)
def goto_line(self, line):
if line >= 0:
view = self.view
point = view.text_point(line - 1, 0)
self.goto_position(point, set_mark=True)
def goto_position(self, pos, set_mark=False):
if set_mark and self.get_point() != pos:
self.set_mark()
self.view.sel().clear()
self.view.sel().add(sublime.Region(pos, pos))
self.ensure_visible(pos)
def is_visible(self, pos):
visible = self.view.visible_region()
return visible.contains(pos)
def ensure_visible(self, point, force=False):
if force or not self.is_visible(point):
self.view.show_at_center(point)
def is_word_char(self, pos, forward, separators):
if not forward:
if pos == 0:
return False
pos -= 1
char = self.view.substr(pos)
return not (char in " \t\r\n" or char in separators)
#
# Goes to the other end of the scope at | |
"clincher",
"ascender",
"triticum",
"pedaling",
"trickier",
"radishes",
"surmised",
"scolding",
"leathery",
"snorkels",
"packings",
"dejected",
"zeolites",
"chiseled",
"empathic",
"iterates",
"squander",
"bonefish",
"dragoons",
"gazpacho",
"spurring",
"calfskin",
"campings",
"moderato",
"deceiver",
"scirocco",
"subsides",
"blanched",
"sulfides",
"ironside",
"nauseous",
"mesoderm",
"tailpipe",
"pullback",
"whitener",
"talmudic",
"aldolase",
"coquette",
"vermouth",
"inflicts",
"formless",
"reticent",
"dabbling",
"negating",
"relapses",
"plodding",
"deserter",
"neatness",
"grosbeak",
"parading",
"crabmeat",
"showgirl",
"biphenyl",
"valvular",
"listless",
"copperas",
"ceramide",
"downcast",
"bondsman",
"sportive",
"bloodied",
"chickpea",
"laggards",
"marzipan",
"rewriter",
"epithets",
"sniffers",
"flatland",
"macaques",
"unplayed",
"anabaena",
"grumbles",
"proudest",
"fanciers",
"postgame",
"stonefly",
"warlocks",
"hecatomb",
"legalese",
"glaziers",
"postpaid",
"costumer",
"grapples",
"aplastic",
"maryjane",
"drillers",
"glimpsed",
"newsboys",
"debuting",
"ghosting",
"revolted",
"thalamic",
"anarchic",
"corniche",
"halflife",
"walkable",
"diatonic",
"stickler",
"paperboy",
"rhizomes",
"ablution",
"handcuff",
"cephalic",
"birdbath",
"griddles",
"parasols",
"munition",
"comorbid",
"greeters",
"abrogate",
"stockade",
"biphasic",
"henchman",
"basilisk",
"tobaccos",
"exhorted",
"howitzer",
"sharpens",
"wildness",
"novellas",
"cryonics",
"glycosyl",
"stingers",
"cathouse",
"floodway",
"caduceus",
"biofilms",
"fanzines",
"subgrade",
"ontogeny",
"blustery",
"courtier",
"snowmelt",
"leaguers",
"arborist",
"uncaring",
"piranhas",
"meteoric",
"equalled",
"crackpot",
"stockpot",
"feedlots",
"singlets",
"bonfires",
"diverges",
"atheneum",
"seedless",
"baclofen",
"redshirt",
"capsular",
"appeased",
"verboten",
"righting",
"fuelwood",
"breakups",
"striding",
"tuberous",
"herrings",
"prancing",
"floodlit",
"mightier",
"alacrity",
"locative",
"heathers",
"employes",
"checkoff",
"woodcuts",
"chugging",
"endpaper",
"calvados",
"skipjack",
"protrude",
"ionising",
"fadeaway",
"scrapper",
"laboured",
"clothier",
"expunged",
"forfeits",
"magmatic",
"wordless",
"flipbook",
"sleepily",
"prowling",
"lettuces",
"chambray",
"retracts",
"mimicked",
"icehouse",
"backstop",
"accusers",
"sunshade",
"rateable",
"tonality",
"ruptures",
"paralyze",
"hereunto",
"languish",
"feathery",
"reasoner",
"adorning",
"drumbeat",
"pastiche",
"abeyance",
"snagging",
"sidereal",
"websters",
"encamped",
"intaglio",
"doorknob",
"ethology",
"pinafore",
"disposer",
"feverfew",
"necropsy",
"farcical",
"tweenies",
"placebos",
"dilutive",
"collides",
"escapist",
"gunshots",
"travails",
"linesman",
"outlived",
"basaltic",
"dogfight",
"middling",
"ureteral",
"antihero",
"freewill",
"personae",
"fourthly",
"skullcap",
"knighted",
"emanated",
"homilies",
"moonbeam",
"orifices",
"longings",
"sideband",
"thresher",
"alleyway",
"helmsman",
"robustly",
"recoding",
"hedgerow",
"immanent",
"chattels",
"brambles",
"vitiligo",
"striated",
"bronzing",
"mangling",
"filtrate",
"debarred",
"apologia",
"rocketed",
"punditry",
"cryostat",
"spatulas",
"bladders",
"elastase",
"beetroot",
"dutchmen",
"airpower",
"marchese",
"replacer",
"extensor",
"cesspool",
"stirrers",
"banality",
"questing",
"pinecone",
"averting",
"tapeworm",
"frisbees",
"faubourg",
"sarcomas",
"teamster",
"fringing",
"bendable",
"propping",
"storable",
"stressor",
"cyclamen",
"rejoices",
"midterms",
"panniers",
"carapace",
"elastics",
"montages",
"bailiffs",
"singling",
"slugfest",
"diphenyl",
"jointing",
"spinoffs",
"freckled",
"marooned",
"dauphine",
"conjunct",
"gelatine",
"thrombus",
"doctored",
"hotshots",
"chutzpah",
"inflator",
"widowers",
"encircle",
"mustards",
"voyagers",
"tendrils",
"ratchets",
"malarial",
"dioramas",
"liveness",
"simmered",
"mainsail",
"spooling",
"scalding",
"witchery",
"wingless",
"cantonal",
"scatters",
"escapism",
"sandbags",
"repartee",
"crossway",
"revealer",
"micelles",
"knightly",
"ethicist",
"bouzouki",
"rapports",
"nibblers",
"meekness",
"pinochle",
"uneasily",
"lamellar",
"wrestles",
"climaxes",
"toboggan",
"nystatin",
"airships",
"flickers",
"abdicate",
"wiggling",
"toreador",
"spurrier",
"descents",
"posttest",
"snobbery",
"leukemic",
"erectors",
"witching",
"gendarme",
"lyricism",
"palatial",
"flippant",
"zodiacal",
"grubbing",
"cruncher",
"unturned",
"workroom",
"censured",
"hayfield",
"relished",
"membered",
"kayakers",
"missteps",
"doubters",
"cabbages",
"saboteur",
"wreaking",
"cometary",
"linguine",
"kurtosis",
"waterjet",
"chapbook",
"sceptics",
"outpaced",
"implored",
"earthing",
"clumping",
"quackery",
"villainy",
"shunting",
"hurtling",
"squabble",
"funneled",
"inimical",
"bunching",
"townsmen",
"accredit",
"skylines",
"unpaired",
"handfuls",
"asphyxia",
"smelters",
"oversold",
"lacquers",
"purifies",
"stunting",
"sparkled",
"arachnid",
"equaling",
"quibbles",
"uncalled",
"memetics",
"subplots",
"backbeat",
"alluvium",
"stargaze",
"alarmist",
"blithely",
"amortize",
"virginal",
"contempo",
"tallying",
"billfish",
"backlogs",
"lobotomy",
"fiercest",
"meristem",
"tailback",
"grizzled",
"fatalism",
"despises",
"jousting",
"ferrules",
"nonunion",
"blackest",
"cathodes",
"relented",
"domesday",
"mudstone",
"trekkers",
"schizoid",
"mutating",
"threader",
"isotherm",
"motherly",
"savannas",
"boondock",
"coachmen",
"atomizer",
"botrytis",
"bonehead",
"monopods",
"offsides",
"rubidium",
"attender",
"halfpipe",
"granites",
"armbands",
"skidding",
"stylised",
"optioned",
"amassing",
"consoled",
"rearward",
"doughboy",
"purposed",
"hornpipe",
"tuckahoe",
"boatyard",
"berthing",
"laterals",
"sparsity",
"peekaboo",
"availing",
"overtone",
"golgotha",
"frazzled",
"hustling",
"cadavers",
"readjust",
"affiches",
"seafarer",
"repeller",
"docketed",
"platoons",
"luminary",
"accosted",
"umpiring",
"pruritus",
"demersal",
"morbidly",
"swooping",
"blueline",
"niceties",
"handymen",
"signaler",
"silurian",
"sayonara",
"quietude",
"wildfowl",
"syntaxes",
"acolytes",
"fretless",
"royalist",
"hierarch",
"shuffler",
"synching",
"tripling",
"resected",
"windlass",
"studious",
"patellar",
"fineness",
"pharisee",
"dieldrin",
"amicably",
"ecologic",
"nuptials",
"munchers",
"satirist",
"outliner",
"wobbling",
"gadgetry",
"teahouse",
"domaines",
"xanthine",
"liposome",
"sagacity",
"fillable",
"agitprop",
"trickled",
"abounded",
"disloyal",
"staggers",
"defector",
"stuntman",
"zillions",
"longleaf",
"pervaded",
"falsetto",
"foxhound",
"gloating",
"deadbolt",
"maquette",
"absolved",
"hydrogel",
"snappers",
"aperitif",
"monetize",
"lacrimal",
"uppercut",
"ticklers",
"megastar",
"butyrate",
"morpheme",
"acreages",
"pursuers",
"mutagens",
"lavished",
"defacing",
"hangouts",
"trioxide",
"teethers",
"zoonotic",
"tinkling",
"masterly",
"butchery",
"wresting",
"variably",
"cherubim",
"nightcap",
"kickball",
"dianthus",
"toggling",
"polyglot",
"mayflies",
"dainties",
"bluebook",
"disquiet",
"deerskin",
"midbrain",
"sufficed",
"corsages",
"rebutted",
"missense",
"pickings",
"nuclides",
"cisterns",
"nitrites",
"capuchin",
"highrise",
"twitched",
"lavalier",
"reticule",
"pervious",
"longhair",
"gondolas",
"spenders",
"samizdat",
"jottings",
"unsuited",
"gurgling",
"tinplate",
"gusseted",
"hardcase",
"illusive",
"chucking",
"ripeness",
"sinkhole",
"debaters",
"hairball",
"cordials",
"bandaged",
"copepods",
"chippers",
"slimmers",
"bullocks",
"vilified",
"telefilm",
"caroling",
"hogshead",
"passives",
"provably",
"inedible",
"deplored",
"tunneled",
"ideative",
"trapdoor",
"backlist",
"sneering",
"huggable",
"neuritis",
"perineal",
"remotest",
"fixative",
"paraquat",
"submenus",
"principi",
"crunched",
"returner",
"scuttled",
"discolor",
"masonite",
"bivalves",
"entropic",
"keeshond",
"outsmart",
"senorita",
"riflemen",
"blinders",
"clasping",
"seamount",
"unstated",
"occlusal",
"excusing",
"uplander",
"slanting",
"detested",
"slashers",
"synthpop",
"immunize",
"boneyard",
"kickoffs",
"daiquiri",
"squalene",
"readouts",
"broached",
"puckered",
"shunning",
"passband",
"cleaving",
"overwork",
"belching",
"hauliers",
"playland",
"aquarist",
"chimaera",
"bellowed",
"moieties",
"restrike",
"tonights",
"digraphs",
"cashless",
"timbered",
"gobbling",
"analogic",
"moulders",
"tranches",
"valeting",
"flatline",
"speedier",
"cooldown",
"hepatoma",
"slickers",
"optionee",
"chunking",
"deuteron",
"unravels",
"electret",
"flossing",
"vacuoles",
"ghoulish",
"prefaced",
"yellowed",
"dendrite",
"calluses",
"sunstone",
"colorist",
"animates",
"leftwing",
"affixing",
"aerators",
"efferent",
"inclines",
"aspirate",
"chowders",
"shudders",
"ardently",
"granitic",
"begonias",
"clouding",
"bronchus",
"intruded",
"unforced",
"hominids",
"defusing",
"subentry",
"latterly",
"drizzled",
"outfalls",
"subscale",
"flagella",
"ruminate",
"biracial",
"chimeras",
"mandalas",
"instants",
"whoppers",
"shoptalk",
"anaphase",
"archduke",
"succumbs",
"focaccia",
"northing",
"hardener",
"reddened",
"blinkers",
"plummets",
"holdover",
"energise",
"bankside",
"misogyny",
"closeted",
"notching",
"sulfonic",
"monomial",
"turnouts",
"brouhaha",
"crannies",
"impactor",
"tankards",
"minutely",
"pontoons",
"silvered",
"dairying",
"subgenus",
"subtasks",
"pantries",
"theorize",
"eruptive",
"slotting",
"inflates",
"divining",
"hungover",
"fortieth",
"premixed",
"scalpers",
"moleskin",
"stomatal",
"holdalls",
"ambushes",
"cabarets",
"satanist",
"guttural",
"levelers",
"caramels",
"handbell",
"inverses",
"rewiring",
"barreled",
"capsized",
"kilobyte",
"triaxial",
"ejecting",
"matinees",
"tubeless",
"remarque",
"monkfish",
"refrains",
"furrowed",
"hedonist",
"hogmanay",
"uniaxial",
"pachinko",
"pirating",
"swayback",
"machismo",
"eduction",
"thumbing",
"shimmers",
"debonair",
"retinoid",
"grappled",
"sonogram",
"decouple",
"quizzing",
"ovations",
"illiquid",
"bleating",
"squatted",
"moonwalk",
"reassert",
"bettered",
"imbecile",
"alloying",
"pickerel",
"sargasso",
"dickhead",
"beasties",
"denarius",
"afflicts",
"thinness",
"dullness",
"oximetry",
"wrenched",
"usurping",
"foxholes",
"focusses",
"revelers",
"aquiline",
"grippers",
"lockouts",
"wristlet",
"admonish",
"connotes",
"sporrans",
"blackfin",
"ribozyme",
"smudging",
"squealed",
"convents",
"encumber",
"impetigo",
"drachmas",
"bindweed",
"resample",
"heedless",
"toolshed",
"indulges",
"indexers",
"pergolas",
"inductee",
"folktale",
"thespian",
"chelated",
"jeroboam",
"rightist",
"clearcut",
"graviton",
"meanness",
"arboreal",
"inhalant",
"biotechs",
"coercing",
"bolivars",
"atrophic",
"untangle",
"prebuilt",
"engorged",
"foretell",
"colorize",
"landward",
"notarial",
"cripples",
"cornices",
"penciled",
"antiphon",
"rerouted",
"retested",
"subdural",
"ratifies",
"subtests",
"epilator",
"releaser",
"gnashing",
"issuable",
"aardwolf",
"aasvogel",
"abacuses",
"abalones",
"abampere",
"abapical",
"abasedly",
"abashing",
"abatable",
"abatises",
"abbacies",
"abbatial",
"abbesses",
"abdomens",
"abdomina",
"abducens",
"abducent",
"abducing",
"abductee",
"abductor",
"abegging",
"abelmosk",
"abetment",
"abettals",
"abetters",
"abettors",
"abeyancy",
"abfarads",
"abhenrys",
"abhorred",
"abhorrer",
"abidance",
"abigails",
"abjectly",
"abjurers",
"abjuring",
"ablating",
"ablative",
"ablators",
"ablegate",
"ableisms",
"ableists",
"abluents",
"abnegate",
"aboideau",
"aboiteau",
"abomasal",
"abomasum",
"abomasus",
"aborally",
"aborning",
"aborters",
"aboulias",
"abrachia",
"abradant",
"abraders",
"abrading",
"abreacts",
"abridger",
"abridges",
"abrosias",
"abrupter",
"abscised",
"abscises",
"abscisin",
"abscissa",
"absconds",
"abseiled",
"absented",
"absenter",
"absently",
"absinths",
"absolver",
"absolves",
"absonant",
"abstains",
"absterge",
"abstrict",
"abstruse",
"absurder",
"abusable",
"abutilon",
"abuttals",
"abutters",
"academes",
"acalephe",
"acalephs",
"acanthae",
"acapnias",
"acarbose",
"acaridan",
"acarines",
"acarpous",
"acaudate",
"acauline",
"acaulose",
"acaulous",
"acceders",
"accentor",
"acceptee",
"accepter",
"accidias",
"accidies",
"acclaims",
"accorder",
"accouter",
"accoutre",
"accreted",
"accretes",
"accusals",
"accusant",
"accustom",
"aceldama",
"acentric",
"acequias",
"acerated",
"acerbate",
"acerbest",
"acerbity",
"acerolas",
"acervate",
"acervuli",
"acescent",
"acetamid",
"acetated",
"acetates",
"acetones",
"acetonic",
"acetoxyl",
"acetylic",
"achenial",
"achillea",
"achiness",
"achingly",
"achiotes",
"acholias",
"achromat",
"achromic",
"aciculae",
"acicular",
"aciculas",
"aciculum",
"acidemia",
"acidhead",
"acidness",
"acidoses",
"acidotic",
"aciduria",
"acierate",
"acoelous",
"aconites",
"aconitic",
"aconitum",
"acquests",
"acquiree",
"acrasias",
"acrasins",
"acridest",
"acridine",
"acridity",
"acrimony",
"acrodont",
"acrogens",
"acrolect",
"acrolein",
"acrolith",
"acromial",
"acromion",
"acrosome",
"acrostic",
"acrotism",
"actiniae",
"actinian",
"actinias",
"actinide",
"actinism",
"actinium",
"actinoid",
"actinons",
"actioner",
"activize",
"actorish",
"actressy",
"actuates",
"acuities",
"aculeate",
"acutance",
"acylated",
"acylates",
"acyloins",
"adamance",
"adamancy",
"adamants",
"adamsite",
"additory",
"adducent",
"adducers",
"adducing",
"adducted",
"adductor",
"adeeming",
"adenines",
"adenitis",
"adenoids",
"adenoses",
"adenosis",
"adeptest",
"adherend",
"adherers",
"adhibits",
"adiposes",
"adiposis",
"adjoined",
"adjoints",
"adjourns",
"adjudges",
"adjurers",
"adjuring",
"adjurors",
"adjustor",
"admasses",
"admittee",
"admitter",
"admixing",
"adnation",
"adonises",
"adorably",
"adorners",
"adrenals",
"adroiter",
"adroitly",
"adscript",
"adsorber",
"adularia",
"adulated",
"adulates",
"adulator",
"adumbral",
"aduncate",
"aduncous",
"advancer",
"advected",
"adverted",
"advisees",
"advowson",
"adynamia",
"adynamic",
"aecidial",
"aecidium",
"aequorin",
"aerating",
"aerially",
"aerified",
"aerifies",
"aeriform",
"aerobats",
"aerobium",
"aeroduct",
"aerodyne",
"aerofoil",
"aerogels",
"aerogram",
"aerolite",
"aerolith",
"aerology",
"aeronaut",
"aeronomy",
"aerosats",
"aerostat",
"aesthete",
"aestival",
"aetheric",
"afebrile",
"affecter",
"affiance",
"affiants",
"affinely",
"affirmer",
"affixers",
"affixial",
"afflatus",
"affluxes",
"afforest",
"affrayed",
"affrayer",
"affright",
"affronts",
"affusion",
"afghanis",
"aflutter",
"aftertax",
"agalloch",
"agalwood",
"agametes",
"agaroses",
"agatized",
"agatizes",
"agedness",
"agemates",
"agendums",
"ageneses",
"agenesia",
"agenesis",
"agenetic",
"agenized",
"agenizes",
"agential",
"agenting",
"agentive",
"ageratum",
"aggadahs",
"aggadoth",
"aggraded",
"aggrades",
"aggrieve",
"aginners",
"agiotage",
"agisting",
"agitable",
"agitates",
"aglimmer",
"aglitter",
"aglycone",
"aglycons",
"agminate",
"agnation",
"agnizing",
"agnomens",
"agnomina",
"agnosias",
"agonised",
"agonises",
"agonized",
"agonizes",
"agouties",
"agraffes",
"agraphia",
"agraphic",
"agrestal",
"agrestic",
"agrimony",
"agrology",
"agrypnia",
"aguacate",
"aguelike",
"agueweed",
"aguishly",
"aigrette",
"aiguille",
"ailerons",
"aimfully",
"ainsells",
"airboats",
"airbound",
"airburst",
"airbuses",
"aircheck",
"aircoach",
"aircrews",
"airdates",
"airdrome",
"airdrops",
"airflows",
"airfoils",
"airglows",
"airheads",
"airholes",
"airiness",
"airlifts",
"airmails",
"airparks",
"airplays",
"airposts",
"airproof",
"airscape",
"airscrew",
"airsheds",
"airshots",
"airshows",
"airthing",
"airtimes",
"airwoman",
"airwomen",
"aisleway",
"akinesia",
"akinetic",
| |
<reponame>viralpoetry/hvac
import logging
from unittest import TestCase
from unittest import skipIf
from parameterized import parameterized, param
from hvac import exceptions
from tests import utils
from tests.utils.hvac_integration_test_case import HvacIntegrationTestCase
@skipIf(utils.vault_version_lt('0.9.0'), "Identity secrets engine open sourced in Vault version >=0.9.0")
class TestIdentity(HvacIntegrationTestCase, TestCase):
TEST_APPROLE_PATH = 'identity-test-approle'
TEST_MOUNT_POINT = 'identity'
TEST_ENTITY_NAME = 'test-entity'
TEST_ALIAS_NAME = 'test-alias'
TEST_GROUP_NAME = 'test-group'
TEST_GROUP_ALIAS_NAME = 'test-group-alias'
test_approle_accessor = None
def setUp(self):
super(TestIdentity, self).setUp()
self.client.sys.enable_auth_method(
method_type='approle',
path=self.TEST_APPROLE_PATH,
)
list_auth_response = self.client.sys.list_auth_methods()
self.test_approle_accessor = list_auth_response['data']['%s/' % self.TEST_APPROLE_PATH]['accessor']
def tearDown(self):
self.tear_down_entities()
self.tear_down_entity_aliases()
self.tear_down_groups()
self.client.sys.disable_auth_method(
path=self.TEST_APPROLE_PATH,
)
super(TestIdentity, self).tearDown()
def tear_down_entities(self):
try:
list_entities_response = self.client.secrets.identity.list_entities(mount_point=self.TEST_MOUNT_POINT)
logging.debug('list_entities_response in tearDown: %s' % list_entities_response)
entity_ids = list_entities_response['data']['keys']
except exceptions.InvalidPath:
logging.debug('InvalidPath raised when calling list_entites_by_id in tearDown...')
entity_ids = []
for entity_id in entity_ids:
logging.debug('Deleting entity ID: %s' % entity_id)
self.client.secrets.identity.delete_entity(
entity_id=entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
def tear_down_entity_aliases(self):
try:
list_entity_aliases_response = self.client.secrets.identity.list_entity_aliases(mount_point=self.TEST_MOUNT_POINT)
logging.debug('list_entity_aliases_response in tearDown: %s' % list_entity_aliases_response)
alias_ids = list_entity_aliases_response['keys']
except exceptions.InvalidPath:
logging.debug('InvalidPath raised when calling list_entites_by_id in tearDown...')
alias_ids = []
for alias_id in alias_ids:
logging.debug('Deleting alias ID: %s' % alias_id)
self.client.secrets.identity.delete_entity_alias(
alias_id=alias_id,
mount_point=self.TEST_MOUNT_POINT,
)
def tear_down_groups(self):
try:
list_group_response = self.client.secrets.identity.list_groups(mount_point=self.TEST_MOUNT_POINT)
logging.debug('list_group_response in tearDown: %s' % list_group_response)
group_ids = list_group_response['data']['keys']
except exceptions.InvalidPath:
logging.debug('InvalidPath raised when calling list_groups in tearDown...')
group_ids = []
for group_id in group_ids:
logging.debug('Deleting group ID: %s' % group_id)
self.client.secrets.identity.delete_group(
group_id=group_id,
mount_point=self.TEST_MOUNT_POINT,
)
@parameterized.expand([
param(
'create success',
),
param(
'create success with metadata',
metadata=dict(something='meta')
),
param(
'create failure with metadata',
metadata='not a dict',
raises=exceptions.ParamValidationError,
exception_message='unsupported metadata argument provided',
),
param(
'update success',
create_first=True,
),
])
def test_create_or_update_entity(self, label, metadata=None, create_first=False, raises=None, exception_message=''):
entity_id = None
if create_first:
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
entity_id=entity_id,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
entity_id = create_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
create_or_update_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
entity_id=entity_id,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_or_update_response: %s' % create_or_update_response)
if isinstance(create_or_update_response, dict):
self.assertIn(
member='id',
container=create_or_update_response['data'],
)
if entity_id is not None:
self.assertEqual(
first=entity_id,
second=create_or_update_response['data']['id'],
)
else:
self.assertEqual(
first=create_or_update_response.status_code,
second=204,
)
@parameterized.expand([
param(
'create success',
),
param(
'create success with metadata',
metadata=dict(something='meta')
),
param(
'create failure with metadata',
metadata='not a dict',
raises=exceptions.ParamValidationError,
exception_message='unsupported metadata argument provided',
),
param(
'update success',
create_first=True,
),
])
@skipIf(utils.vault_version_lt('0.11.2'), '"by name" operations added in Vault v0.11.2')
def test_create_or_update_entity_by_name(self, label, metadata=None, create_first=False, raises=None, exception_message=''):
entity_id = None
if create_first:
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
entity_id=entity_id,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
entity_id = create_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.create_or_update_entity_by_name(
name=self.TEST_ENTITY_NAME,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
create_or_update_response = self.client.secrets.identity.create_or_update_entity_by_name(
name=self.TEST_ENTITY_NAME,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_or_update_response: %s' % create_or_update_response)
if not create_first:
self.assertIn(
member='id',
container=create_or_update_response['data'],
)
if entity_id is not None:
self.assertEqual(
first=entity_id,
second=create_or_update_response['data']['id'],
)
else:
self.assertEqual(
first=create_or_update_response.status_code,
second=204,
)
@parameterized.expand([
param(
'read success',
),
param(
'read failure',
create_first=False,
raises=exceptions.InvalidPath
),
])
def test_read_entity_by_id(self, label, create_first=True, raises=None, exception_message=''):
entity_id = None
if create_first:
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
entity_id = create_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.read_entity(
entity_id=entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
read_entity_by_id_response = self.client.secrets.identity.read_entity(
entity_id=entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('read_entity_by_id_response: %s' % read_entity_by_id_response)
self.assertEqual(
first=entity_id,
second=read_entity_by_id_response['data']['id'],
)
@parameterized.expand([
param(
'read success',
),
param(
'read failure',
create_first=False,
raises=exceptions.InvalidPath
),
])
@skipIf(utils.vault_version_lt('0.11.2'), '"by name" operations added in Vault v0.11.2')
def test_read_entity_by_name(self, label, create_first=True, raises=None, exception_message=''):
entity_id = None
if create_first:
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
entity_id = create_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.read_entity_by_name(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
read_entity_by_name_response = self.client.secrets.identity.read_entity_by_name(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('read_entity_by_name_response: %s' % read_entity_by_name_response)
self.assertEqual(
first=entity_id,
second=read_entity_by_name_response['data']['id'],
)
@parameterized.expand([
param(
'update success',
),
param(
'update success with metadata',
metadata=dict(something='meta')
),
param(
'update failure with metadata',
metadata='not a dict',
raises=exceptions.ParamValidationError,
exception_message='unsupported metadata argument provided',
),
])
def test_update_entity(self, label, metadata=None, raises=None, exception_message=''):
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
entity_id = create_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.update_entity(
entity_id=entity_id,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
update_entity_response = self.client.secrets.identity.update_entity(
entity_id=entity_id,
metadata=metadata,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('update_entity_response: %s' % update_entity_response)
if isinstance(update_entity_response, dict):
self.assertEqual(
first=update_entity_response['data']['id'],
second=entity_id,
)
else:
self.assertEqual(
first=update_entity_response.status_code,
second=204,
)
@parameterized.expand([
param(
'delete success',
),
param(
'delete success with no corresponding entity',
create_first=False,
),
])
def test_delete_entity_by_id(self, label, create_first=True, raises=None, exception_message=''):
entity_id = None
if create_first:
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
entity_id = create_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.delete_entity(
entity_id=entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
delete_entity_response = self.client.secrets.identity.delete_entity(
entity_id=entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('update_entity_response: %s' % delete_entity_response)
self.assertEqual(
first=delete_entity_response.status_code,
second=204,
)
@parameterized.expand([
param(
'delete success',
),
param(
'delete success with no corresponding entity',
create_first=False,
),
])
@skipIf(utils.vault_version_lt('0.11.2'), '"by name" operations added in Vault v0.11.2')
def test_delete_entity_by_name(self, label, create_first=True, raises=None, exception_message=''):
if create_first:
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.delete_entity_by_name(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
delete_entity_response = self.client.secrets.identity.delete_entity_by_name(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('update_entity_response: %s' % delete_entity_response)
self.assertEqual(
first=delete_entity_response.status_code,
second=204,
)
@parameterized.expand([
param(
'list success - LIST method',
),
param(
'list success - GET method',
method='GET',
),
param(
'list failure - invalid method',
method='PUT',
raises=exceptions.ParamValidationError,
exception_message='"method" parameter provided invalid value',
),
])
def test_list_entities_by_id(self, label, method='LIST', raises=None, exception_message=''):
create_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_response: %s' % create_response)
entity_id = create_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.list_entities(
method=method,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
list_entities_response = self.client.secrets.identity.list_entities(
method=method,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('list_entities_response: %s' % list_entities_response)
self.assertEqual(
first=[entity_id],
second=list_entities_response['data']['keys'],
)
@parameterized.expand([
param(
'list success - LIST method',
),
param(
'list success - GET method',
method='GET',
),
param(
'list failure - invalid method',
method='PUT',
raises=exceptions.ParamValidationError,
exception_message='"method" parameter provided invalid value',
),
])
@skipIf(utils.vault_version_lt('0.11.2'), '"by name" operations added in Vault v0.11.2')
def test_list_entities_by_name(self, label, method='LIST', raises=None, exception_message=''):
create_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_response: %s' % create_response)
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.list_entities_by_name(
method=method,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
list_entities_response = self.client.secrets.identity.list_entities_by_name(
method=method,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('list_entities_response: %s' % list_entities_response)
self.assertEqual(
first=[self.TEST_ENTITY_NAME],
second=list_entities_response['data']['keys'],
)
@parameterized.expand([
param(
'merge success',
),
param(
'merge failure',
),
])
def test_merge_entities(self, label, raises=None, exception_message=''):
create_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_response: %s' % create_response)
create_response2 = self.client.secrets.identity.create_or_update_entity(
name='%s2' % self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_response2: %s' % create_response)
to_entity_id = create_response['data']['id']
from_entity_ids = [create_response2['data']['id']]
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.merge_entities(
from_entity_ids=from_entity_ids,
to_entity_id=to_entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
merge_entities_response = self.client.secrets.identity.merge_entities(
from_entity_ids=from_entity_ids,
to_entity_id=to_entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('merge_entities_response: %s' % merge_entities_response)
self.assertEqual(
first=merge_entities_response.status_code,
second=204,
)
@parameterized.expand([
param(
'create success',
),
param(
'update success',
create_first=True,
),
])
def test_create_or_update_entity_alias(self, label, create_first=False, raises=None, exception_message=''):
entity_id = None
if create_first:
create_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
entity_id=entity_id,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_first_response: %s' % create_first_response)
entity_id = create_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.create_or_update_entity_alias(
name=self.TEST_ALIAS_NAME,
canonical_id=entity_id,
mount_accessor=self.test_approle_accessor,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
create_or_update_response = self.client.secrets.identity.create_or_update_entity_alias(
name=self.TEST_ALIAS_NAME,
canonical_id=entity_id,
mount_accessor=self.test_approle_accessor,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_or_update_response: %s' % create_or_update_response)
self.assertIn(
member='id',
container=create_or_update_response['data'],
)
if entity_id is not None:
self.assertEqual(
first=create_or_update_response['data']['canonical_id'],
second=entity_id,
)
@parameterized.expand([
param(
'read success',
),
param(
'read failure',
create_first=False,
raises=exceptions.InvalidPath,
),
])
def test_read_entity_alias_by_id(self, label, create_first=True, raises=None, exception_message=''):
alias_id = None
if create_first:
create_entity_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_entity_first_response: %s' % create_entity_first_response)
entity_id = create_entity_first_response['data']['id']
create_entity_alias_first_response = self.client.secrets.identity.create_or_update_entity_alias(
name=self.TEST_ALIAS_NAME,
canonical_id=entity_id,
mount_accessor=self.test_approle_accessor,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_entity_alias_first_response: %s' % create_entity_alias_first_response)
alias_id = create_entity_alias_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.read_entity_alias(
alias_id=alias_id,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
read_entity_alias_response = self.client.secrets.identity.read_entity_alias(
alias_id=alias_id,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('read_entity_alias_response: %s' % read_entity_alias_response)
self.assertIn(
member='id',
container=read_entity_alias_response['data'],
)
if alias_id is not None:
self.assertEqual(
first=read_entity_alias_response['data']['id'],
second=alias_id,
)
@parameterized.expand([
param(
'update success',
),
param(
'update failure with invalid mount accessor',
mount_accessor='not a valid accessor',
raises=exceptions.InvalidRequest,
exception_message='invalid mount accessor',
),
])
def test_update_entity_alias_by_id(self, label, mount_accessor=None, raises=None, exception_message=''):
if mount_accessor is None:
mount_accessor = self.test_approle_accessor
create_entity_first_response = self.client.secrets.identity.create_or_update_entity(
name=self.TEST_ENTITY_NAME,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_entity_first_response: %s' % create_entity_first_response)
entity_id = create_entity_first_response['data']['id']
create_entity_alias_first_response = self.client.secrets.identity.create_or_update_entity_alias(
name=self.TEST_ALIAS_NAME,
canonical_id=entity_id,
mount_accessor=self.test_approle_accessor,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('create_entity_alias_first_response: %s' % create_entity_alias_first_response)
alias_id = create_entity_alias_first_response['data']['id']
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.identity.update_entity_alias(
alias_id=alias_id,
name=self.TEST_ALIAS_NAME,
canonical_id=entity_id,
mount_accessor=mount_accessor,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
update_entity_response = self.client.secrets.identity.update_entity_alias(
alias_id=alias_id,
name=self.TEST_ALIAS_NAME,
canonical_id=entity_id,
mount_accessor=mount_accessor,
mount_point=self.TEST_MOUNT_POINT,
)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: <NAME>
import json
from ares.Lib.html import AresHtml
class Editor(AresHtml.Html):
name, category, callFnc, docCategory = 'Code Editor', 'Text', 'editor', 'Preformatted'
__pyStyle = ['CssDivEditor']
__reqCss, __reqJs = ['codemirror'], ['codemirror']
def __init__(self, aresObj, vals, size, language, width, widthUnit, height, heightUnit, isEditable, htmlCode):
super(Editor, self).__init__(aresObj, vals, width=width, widthUnit=widthUnit, height=height, heightUnit=heightUnit, code=htmlCode)
self.size, self.isEditable = self.aresObj.pyStyleDfl['fontSize'] if size is None else "%spx" % size, isEditable
self._jsStyles, self._jsActions, self._definedActions = {'language': language}, {}, ['run', 'load', 'auto', 'clone', 'save', 'delete']
self.css( {'font-size': self.size } )
self.addGlobalVar('%s_editor' % self.htmlId)
@property
def val(self):
""" Property to get the jquery value of the HTML object in a python HTML object """
return '%(htmlId)s_editor.getValue()' % {"htmlId": self.htmlId}
@property
def jsQueryData(self):
"""
:category: Javascript function
:rubric: JS
:example: >>> myObj.jsQueryData
:dsc:
Python function to define the Javascript object to be passed in case of Ajax call internally or via external REST service with other languages
:return: Javascript String of the data to be used in a jQuery call
:link ajax call: http://api.jquery.com/jquery.ajax/
"""
return "{ event_val: %(htmlId)s_editor.getValue(), event_code: '%(htmlId)s' }" % {"htmlId": self.htmlId}
@property
def jsClear(self):
return "%(htmlId)s_editor.setValue('')" % {"htmlId": self.htmlId}
def trigger(self, event):
if event in ['load', 'run']:
self._triggerEvents.add("$('#%(htmlId)s_%(action)s').trigger('click')" % {"htmlId": self.htmlId, "action": event})
else:
return super(Editor, self).trigger(event)
def onDocumentReady(self):
self.jsUpdateDataFnc = '''
%(pyCls)s(%(jqId)s, %(htmlId)s_data, %(jsStyles)s) ;
if(%(htmlCode)s != null) { %(breadCrumVar)s['params'][%(htmlCode)s] = %(jsVal)s };
''' % {'pyCls': self.__class__.__name__, 'jqId': self.jqId, 'htmlId': self.htmlId, 'htmlCode': json.dumps(self.htmlCode),
'jsVal': self.val, 'breadCrumVar': self.aresObj.jsGlobal.breadCrumVar, 'jsStyles': json.dumps(self._jsStyles)}
if self.dataSrc is None or self.dataSrc.get('type') != 'url':
self.aresObj.jsOnLoadFnc.add(self.jsUpdateDataFnc)
def onDocumentLoadFnc(self):
""" Pure Javascript onDocumentLoad Function """
self.addGlobalFnc("%s(htmlObj, data, jsStyles)" % self.__class__.__name__, '''
if (window[htmlObj.attr('id') + '_editor'] == undefined) {
window[htmlObj.attr('id') + '_editor'] = CodeMirror.fromTextArea( htmlObj.get(0), {lineNumbers: true, mode: jsStyles.language} ) ; }
window[htmlObj.attr('id') + '_editor'].setValue(data);
if ($('#'+ htmlObj.attr('id') +'_save').length != 0) {
window[htmlObj.attr('id') + '_editor'].on('keydown', function(i, e) {
if (e.ctrlKey && e.keyCode == 83) {
e.preventDefault();
$('#'+ htmlObj.attr('id') +'_save').trigger('click'); }
}) ;
} ;
$('#'+ htmlObj.attr('id') +'_updated').text('Last update: ' + Today() ) ;
window[htmlObj.attr('id') + '_editor'].getWrapperElement().style["overflow"] = "hidden";
window[htmlObj.attr('id') + '_editor'].getWrapperElement().style["height"] = "100%"; ''')
def jsAction(self, jsFncs, icon, pyCssCls, tooltip, action):
"""
:category: Python function
:rubric: PY
:example: >>>
:dsc:
Define the event on the editor when the save is clicked.
This will call a Ajax service.
:return: The object itself
"""
if not isinstance(jsFncs, list):
jsFncs = [jsFncs]
if action not in self._definedActions:
self._definedActions.append(action)
self._jsActions[action] = "<span id='%(htmlId)s_%(action)s' title='%(tooltip)s' class='%(cssStyle)s %(icon)s'></span>" % {
"icon": icon, "cssStyle": self.addPyCss(pyCssCls), "htmlId": self.htmlId, 'tooltip': tooltip, 'action': action}
self.aresObj.jsOnLoadFnc.add("$('#%(htmlId)s_%(action)s').on('click', function(event) { %(jsFncs)s; })" % {"htmlId": self.htmlId, "jsFncs": ";".join(jsFncs), 'action': action})
return self
# --------------------------------------------------------------------------------------------------------------
# EDITOR STANDARD EVENTS
#
# None of those functions are based on an Ajax call as I do not thing they are supposed to do something special in case of
# success or failure of an internal event. Problems are tackled in the standard way using the ares popup message (and the status for the color)
def save(self, jsFncs, icon='fas fa-save', pyCssCls="CssSmallIcon", tooltip='click to save changes'):
"""
:example: >>> editor.save( aresObj.jsPost( "/reports/create/script", [editor]) )
:wrap jsAction:
:return:
"""
if not isinstance(jsFncs, list):
jsFncs = [jsFncs]
jsFncs = ["var data = %(data)s;" % {"data": self.jsQueryData}] + jsFncs
return self.jsAction(jsFncs, icon, pyCssCls, tooltip, 'save')
def delete(self, jsFncs, icon='fas fa-times-circle', pyCssCls="CssSmallIconRed", tooltip='click to delete the function'):
return self.jsAction(jsFncs, icon, pyCssCls, tooltip, 'delete')
def run(self, jsFncs, icon='fas fa-play', pyCssCls="CssSmallIcon", tooltip='Run button on the Editor Component'):
return self.jsAction(jsFncs, icon, pyCssCls, tooltip, 'run')
def clone(self, jsFncs, icon='fas fa-copy', pyCssCls="CssSmallIcon", tooltip='Create a copy of the script'):
return self.jsAction(jsFncs, icon, pyCssCls, tooltip, 'clone')
def load(self, jsFncs, icon='fas fa-sync', pyCssCls="CssSmallIcon", tooltip='Load button on the Editor Component', interval=5000):
if not isinstance(jsFncs, list):
jsFncs = [jsFncs]
jsFncs.append( "$('#%s_updated').text('Last update: ' + Today() )" % self.htmlId)
self.jsAction(jsFncs, icon, pyCssCls, tooltip, 'load')
jsFncsAuto = ['''
$(this).toggleClass('fa-pulse');
if ( window['%(htmlId)s_interval'] == undefined) { window['%(htmlId)s_interval'] = setInterval( function() { $("#%(htmlId)s_load").trigger('click'); }, %(interval)s ); }
else {
if( $(this).hasClass('fa-pulse') ) { window['%(htmlId)s_interval'] = setInterval( function() { $("#%(htmlId)s_load").trigger('click'); }, %(interval)s ); }
else { clearInterval( window['%(htmlId)s_interval'] ) ;}} ; ''' % {'interval': interval, "htmlId": self.htmlId}]
return self.jsAction(jsFncsAuto, "fas fa-clock", pyCssCls, "Auto Update button on the Editor Component", 'auto')
def download(self, jsFncs='', icon='fas fa-file-download', pyCssCls="CssSmallIcon", tooltip='Download temporary version of the script'):
if not isinstance(jsFncs, list):
jsFncs = [jsFncs]
jsFncs.append("event.stopPropagation(); %s; return false;" % self.aresObj.jsDownload( fileName="tempScript.py", jsData="window['%s_editor'].getValue()" % self.htmlId))
return self.jsAction(jsFncs, icon, pyCssCls, tooltip, 'clone')
def __str__(self):
events = []
for action in self._definedActions:
if action in self._jsActions:
events.append( self._jsActions[action] )
return '''
<div style="display:inline-block;width:100%%;padding:5px 5px 5px 25px">
%(events)s
<span id='%(htmlId)s_updated' style='float:right;font-style:italic;margin-right:10px;display:inline-block:width:100%%'></span>
</div>
<textarea %(attr)s>%(vals)s</textarea>
''' % {'attr': self.strAttr(pyClassNames=self.__pyStyle), "vals": self.vals, 'htmlId': self.htmlId, 'events': "".join(events)}
class Console(AresHtml.Html):
"""
"""
name, category, callFnc, docCategory = 'Python Cell Runner', 'Text', 'pytestcell', 'Preformatted'
__reqCss, __reqJs = ['codemirror'], ['codemirror']
def __init__(self, aresObj, vals, size, width, widthUnit, height, heightUnit, isEditable, htmlCode):
super(Console, self).__init__(aresObj, vals, width=width, widthUnit=widthUnit, height=height, heightUnit=heightUnit, code=htmlCode)
self.size, self.isEditable = self.aresObj.pyStyleDfl['fontSize'] if size is None else "%spx" % size, isEditable
self._jsRun, self._jsSave = '', ''
self.addGlobalVar("%s_count" % self.htmlId, "0")
self.css({'font-size': self.size, 'padding': '10px', "min-height": "30px", "font-family": "Arial, monospace"})
@property
def val(self):
"""
:category: Javascript function
:rubric: JS
:example: >>> myObj.val
:dsc:
Return the value of the In[] section of the editor.
:return: A String with the javascript function to get the value
"""
return '%(htmlId)s_editor.getValue()' % {"htmlId": self.htmlId}
@property
def jsQueryData(self):
"""
:category: Javascript features
:rubric: JS
:dsc:
String with the javascript feature to get the data to send when a event is triggered from this object.
Basically when the run or saved is triggered
:return: Javascript String of the data to be used in a jQuery call
:link ajax call: http://api.jquery.com/jquery.ajax/
"""
return "{ event_out: $('#%(htmlId)s_result_data').text(), event_val: %(htmlId)s_editor.getValue(), event_code: '%(htmlId)s' }" % {'htmlId': self.htmlId}
# --------------------------------------------------------------------------------------------------------------
# EDITOR STANDARD EVENTS
#
# Those are already embedding an ajax call as b default the return of those call will change the display
# Make sure you are not calling a Ajax call within an AJax call, event engine should remain simple
# Remember PEP20: Simple is better than complex.
def run(self, url=None, jsData=None, jsFncs=None, httpCodes=None, tooltip="Run the line"):
"""
:category: Javascript Event
:rubric: JS
:example: >>> myObj.run( "/reports/fncs/run/%s" % report_name )
:dsc:
Add an event action to the console object.
:return: The python object itself
"""
if not isinstance(jsFncs, list):
jsFncs = [jsFncs] if jsFncs is not None else []
jsFncs = [
"if (!data.status){ $('#%(htmlId)s_result_data').css('color', '%(redColor)s') ; } else { $('#%(htmlId)s_result_data').css('color', '%(blackColor)s') }" % {"htmlId": self.htmlId, 'redColor': self.getColor('redColor', 4), 'blackColor': self.getColor('greyColor', 8) },
"%(htmlId)s_count ++; $('#%(htmlId)s_counter').text( 'In [ '+ %(htmlId)s_count +']' )" % {"htmlId": self.htmlId},
"$('#%(htmlId)s_result_data').text(data.output); $('#%(htmlId)s_print_data').text(data.print);" % {"htmlId": self.htmlId}] + jsFncs + ["$('#%(htmlId)s_result').show();$('#%(htmlId)s_print').show();" % {"htmlId": self.htmlId} ]
self._jsRun = (self.aresObj.jsPost(url=url, jsData=jsData, jsFnc=jsFncs, httpCodes=httpCodes) if url is not None else ";".join(jsFncs), tooltip)
return self
def save(self, url=None, jsData=None, jsFncs=None, httpCodes=None, tooltip="Save the run"):
"""
:category: Javascript Event
:rubric: JS
:example: >>> myObj.run( "/reports/fncs/test/%s" % report_name )
:dsc:
Add an event action to the console object to save the result of the In and out.
:return: The python object itself
"""
if not isinstance(jsFncs, list):
jsFncs = [jsFncs] if jsFncs is not None else []
self._jsSave = (self.aresObj.jsPost(url=url, jsData=jsData, jsFnc=jsFncs, httpCodes=httpCodes) if url is not None else ";".join(jsFncs), tooltip)
return self
def __str__(self):
runButton, saveButton = '', ''
if self._jsRun != '':
self.aresObj.jsOnLoadFnc.add('''
var %(htmlId)s_editor = CodeMirror.fromTextArea( $('#%(htmlId)s').get(0), {placeholder: "aresObj.myFncs()", lineNumbers: true, mode: 'python'} ) ;
%(htmlId)s_editor.setSize(null, 30); %(htmlId)s_editor.getWrapperElement().style["line-height"] = "1.5"; %(htmlId)s_editor.refresh() ;
%(htmlId)s_editor.on('keydown', function(i, e) {
if (e.keyCode == 13) { var data = %(data)s ; e.preventDefault(); %(run)s ;}
else {
$('#%(htmlId)s_result_data').text(''); $('#%(htmlId)s_print_data').text('');
$('#%(htmlId)s_result').hide(); $('#%(htmlId)s_print').hide();}
}) ;
$('#%(htmlId)s_run').on('click', function(event) { var data = %(data)s ; %(run)s ; })''' % {"htmlId": self.htmlId, "run": self._jsRun[0], 'data': self.jsQueryData})
runButton = '<i title="%(tooltip)s" id="%(htmlId)s_run" class="%(iconCss)s fas fa-caret-right"></i>' % {'tooltip': self._jsRun[1], "htmlId": self.htmlId, "iconCss": self.addPyCss('CssStdIcon')}
if self._jsSave != '':
self.aresObj.jsOnLoadFnc.add('''
$('#%(htmlId)s_save').on('click', function(event) { var data = %(data)s ; %(save)s | |
<filename>ip2geotools/databases/commercial.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Commercial geolocation databases
===================================
These classes access many different commercial geolocation databases.
"""
# pylint: disable=line-too-long,invalid-name,W0702
from __future__ import absolute_import
import json
from urllib.parse import quote
import re
import requests
from requests.auth import HTTPBasicAuth
import pyquery
from selenium import webdriver # selenium for Ip2LocationWeb
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from ip2geotools.databases.interfaces import IGeoIpDatabase
from ip2geotools.models import IpLocation
from ip2geotools.errors import IpAddressNotFoundError, PermissionRequiredError, \
InvalidRequestError, InvalidResponseError, \
ServiceError, LimitExceededError
class DbIpWeb(IGeoIpDatabase):
"""
Class for accessing geolocation data provided by searching directly
on https://db-ip.com/.
"""
@staticmethod
def get(ip_address, api_key=None, db_path=None, username=None, password=<PASSWORD>):
# process request
try:
request = requests.post('https://db-ip.com/',
headers={'User-Agent': 'Mozilla/5.0'},
data=[('address', ip_address)],
timeout=62)
except:
raise ServiceError()
# check for HTTP errors
if request.status_code != 200:
raise ServiceError()
# check for errors
if b'you have exceeded the daily query limit' in request.content.lower():
raise LimitExceededError()
# parse content
try:
content = request.content.decode('utf-8')
pq = pyquery.PyQuery(content)
parsed_ip = pq('html > body div.container > h1') \
.remove('span') \
.text() \
.strip()
parsed_country = pq('html > body > div.container table tr:contains("Country") td') \
.text() \
.strip()
parsed_region = pq('html > body > div.container table tr:contains("State / Region") td') \
.text() \
.strip()
parsed_city = pq('html > body > div.container table tr:contains("City") td') \
.text() \
.strip()
parsed_coords = pq('html > body > div.container table tr:contains("Coordinates") td') \
.text() \
.strip() \
.split(',')
except:
raise InvalidResponseError()
# check for errors
if ip_address != parsed_ip:
raise IpAddressNotFoundError(ip_address)
# prepare return value
ip_location = IpLocation(ip_address)
# format data
try:
ip_location.country = parsed_country
ip_location.region = parsed_region
ip_location.city = parsed_city
ip_location.latitude = float(parsed_coords[0].strip())
ip_location.longitude = float(parsed_coords[1].strip())
except:
ip_location.country = None
ip_location.region = None
ip_location.city = None
ip_location.latitude = None
ip_location.longitude = None
return ip_location
class MaxMindGeoIp2City(IGeoIpDatabase):
"""
Class for accessing geolocation data provided by GeoIP2 database
created by MaxMind, available from https://www.maxmind.com/.
"""
@staticmethod
def get(ip_address, api_key=None, db_path=None, username=None, password=None):
# process request
try:
# optional auth for increasing amount of queries per day
if username != None and password != None:
auth = HTTPBasicAuth(username, password)
else:
auth = None
request = requests.get('https://www.maxmind.com/geoip/v2.1/city/'
+ quote(ip_address)
+ ('?demo=1' if auth == None else ''),
auth=auth,
timeout=62)
except:
raise ServiceError()
# parse content
try:
content = request.content.decode('utf-8')
content = json.loads(content)
except:
raise InvalidResponseError()
# check for HTTP errors
if request.status_code != 200:
if request.status_code == 400:
raise InvalidRequestError(content['code'])
elif request.status_code == 401:
raise PermissionRequiredError(content['code'])
elif request.status_code == 402:
raise LimitExceededError(content['code'])
elif request.status_code == 403:
raise PermissionRequiredError(content['code'])
elif request.status_code == 404:
raise IpAddressNotFoundError(ip_address)
elif request.status_code == 500:
raise InvalidRequestError()
else:
raise ServiceError()
# prepare return value
ip_location = IpLocation(ip_address)
# format data
if content.get('country'):
ip_location.country = content['country'].get('iso_code')
else:
ip_location.country = None
if content.get('subdivisions'):
if content['subdivisions'][0].get('names'):
ip_location.region = content['subdivisions'][0]['names'].get('en')
else:
ip_location.region = None
else:
ip_location.region = None
if content.get('city'):
if content['city'].get('names'):
ip_location.city = content['city']['names'].get('en')
else:
ip_location.city = None
else:
ip_location.city = None
if content.get('location'):
ip_location.latitude = float(content['location']['latitude'])
ip_location.longitude = float(content['location']['longitude'])
else:
ip_location.latitude = None
ip_location.longitude = None
return ip_location
class Ip2LocationWeb(IGeoIpDatabase):
"""
Class for accessing geolocation data provided by searching directly
on https://www.ip2location.com/.
"""
@staticmethod
def get(ip_address, api_key=None, db_path=None, username=None, password=None):
# initiate headless Firefox using selenium to pass through Google reCAPTCHA
options = Options()
options.headless = True
browser = webdriver.Firefox(options=options)
try:
browser.get('http://www.ip2location.com/demo/' + ip_address)
element = WebDriverWait(browser, 30).until(
EC.presence_of_element_located((By.NAME, 'ipAddress'))
)
if not element:
raise Exception
except:
raise ServiceError()
# parse current limit
current_limit = 0
body = browser.find_element_by_tag_name('body').text
try:
limit = re.search(r'You still have.*?([\d]{1,2})/50.* query limit',
body,
re.DOTALL)
if limit != None:
current_limit = int(limit.group(1))
except:
raise InvalidResponseError()
# check if limit is exceeded
if current_limit == 0:
raise LimitExceededError()
# parse content
try:
table = browser.find_element_by_xpath('//table[contains(.,"Permalink")]')
parsed_ip = table.find_element_by_xpath('//tr[contains(.,"IP Address")]/td').text.strip()
parsed_country = [class_name.replace('flag-icon-', '').upper() for class_name in table.find_element_by_class_name('flag-icon').get_attribute('class').split(' ') if class_name.startswith('flag-icon-')][0]
parsed_region = table.find_element_by_xpath('//tr[contains(.,"Region")]/td').text.strip()
parsed_city = table.find_element_by_xpath('//tr[contains(.,"City")]/td').text.strip()
parsed_coords = table.find_element_by_xpath('//tr[contains(.,"Coordinates of City")]/td').text.strip()
except:
raise InvalidResponseError()
# exit headless firefox
browser.quit()
# check for errors
if ip_address != parsed_ip:
raise IpAddressNotFoundError(ip_address)
# prepare return value
ip_location = IpLocation(ip_address)
# format data
try:
ip_location.country = parsed_country
ip_location.region = parsed_region
ip_location.city = parsed_city
parsed_coords = parsed_coords.split('(')[0].split(',')
ip_location.latitude = float(parsed_coords[0].strip())
ip_location.longitude = float(parsed_coords[1].strip())
except:
ip_location.country = None
ip_location.region = None
ip_location.city = None
ip_location.latitude = None
ip_location.longitude = None
return ip_location
class NeustarWeb(IGeoIpDatabase):
"""
Class for accessing geolocation data provided by searching directly
on https://www.home.neustar/resources/tools/ip-geolocation-lookup-tool/.
"""
@staticmethod
def get(ip_address, api_key=None, db_path=None, username=None, password=None):
# process request
try:
request = requests.post('https://www.home.neustar/resources/tools/ip-geolocation-lookup-tool',
headers={'User-Agent': 'Mozilla/5.0'},
data=[('ip', ip_address)],
timeout=62)
except:
raise ServiceError()
# check for HTTP errors
if request.status_code != 200:
raise ServiceError()
# check for errors
if b'rate limit exceeded' in request.content.lower():
raise LimitExceededError()
# parse content
try:
content = request.content.decode('utf-8')
pq = pyquery.PyQuery(content)
parsed_ip = pq('html > body > section.full.resource article h2 > strong') \
.text() \
.strip()
parsed_country = pq('html > body > section.full.resource article div.data >table:first tr:contains("Country Code:") td:not(.item)') \
.text() \
.strip() \
.upper()
parsed_region = pq('html > body > section.full.resource article div.data >table:first tr:contains("Region:") td:not(.item)') \
.text() \
.strip() \
.title()
parsed_state = pq('html > body > section.full.resource article div.data >table:first tr:contains("State:") td:not(.item)') \
.text() \
.strip() \
.title()
parsed_city = pq('html > body > section.full.resource article div.data >table:first tr:contains("City:") td:not(.item)') \
.text() \
.strip() \
.title()
parsed_latitude = pq('html > body > section.full.resource article div.data >table:first tr:contains("Latitude:") td:not(.item)') \
.text() \
.strip()
parsed_longitude = pq('html > body > section.full.resource article div.data >table:first tr:contains("Longitude:") td:not(.item)') \
.text() \
.strip()
except:
raise InvalidResponseError()
# check for errors
if ip_address != parsed_ip:
raise IpAddressNotFoundError(ip_address)
# prepare return value
ip_location = IpLocation(ip_address)
# format data
try:
ip_location.country = parsed_country
if parsed_region is None:
ip_location.region = parsed_region
else:
ip_location.region = parsed_state
ip_location.city = parsed_city
ip_location.latitude = float(parsed_latitude)
ip_location.longitude = float(parsed_longitude)
except:
ip_location.country = None
ip_location.region = None
ip_location.city = None
ip_location.latitude = None
ip_location.longitude = None
return ip_location
class GeobytesCityDetails(IGeoIpDatabase):
"""
Class for accessing geolocation data provided by
http://geobytes.com/get-city-details-api/.
"""
@staticmethod
def get(ip_address, api_key=None, db_path=None, username=None, password=None):
# process request
try:
request = requests.get('http://getcitydetails.geobytes.com/GetCityDetails?fqcn='
+ quote(ip_address),
timeout=62)
except:
raise ServiceError()
# check for HTTP errors
if request.status_code != 200:
raise ServiceError()
# parse content
try:
content = request.content.decode('latin-1')
content = json.loads(content)
except:
raise InvalidResponseError()
# prepare return value
ip_location = IpLocation(ip_address)
# format data
ip_location.country = content.get('geobytesinternet')
ip_location.region = content.get('geobytesregion')
ip_location.city = content.get('geobytescity')
if content.get('geobyteslatitude') and content.get('geobyteslongitude'):
ip_location.latitude = float(content['geobyteslatitude'])
ip_location.longitude = float(content['geobyteslongitude'])
else:
ip_location.latitude = None
ip_location.longitude = None
return ip_location
class SkyhookContextAcceleratorIp(IGeoIpDatabase):
"""
Class for accessing geolocation data provided by http://www.skyhookwireless.com/.
"""
@staticmethod
def get(ip_address, api_key=None, db_path=None, username=None, password=None):
# process request
try:
request = requests.get('https://context.skyhookwireless.com/accelerator/ip?'
+ 'ip=' + quote(ip_address)
+ '&user=' + quote(username)
+ '&key=' + quote(password)
+ '&version=2.0',
timeout=62)
except:
raise ServiceError()
# check for HTTP errors
if request.status_code != 200:
if request.status_code == 400:
raise InvalidRequestError()
elif request.status_code == 401:
raise PermissionRequiredError(ip_address)
else:
raise ServiceError()
# content decode
try:
content = request.content.decode('utf-8')
except:
raise InvalidResponseError()
# check for IP address not found error
if content == '{"data":{"ip":"' + ip_address + '"}}':
raise IpAddressNotFoundError(ip_address)
# parse content
try:
content = json.loads(content)
except:
raise InvalidResponseError()
# prepare return value
ip_location = IpLocation(ip_address)
# format data
if content.get('data'):
if content['data'].get('civic'):
ip_location.country = content['data']['civic'].get('countryIso')
ip_location.region = content['data']['civic'].get('state')
ip_location.city = content['data']['civic'].get('city')
else:
ip_location.country = None
ip_location.region = None
ip_location.city = None
if content['data'].get('location'):
if content['data']['location'].get('latitude') \
and content['data']['location'].get('longitude'):
ip_location.latitude = content['data']['location']['latitude']
ip_location.longitude = content['data']['location']['longitude']
else:
ip_location.latitude = None
ip_location.longitude = None
else:
ip_location.latitude = None
ip_location.longitude = None
else:
ip_location.country = None
ip_location.region = None
ip_location.city = None
ip_location.latitude = None
ip_location.longitude = None
return ip_location
class IpInfo(IGeoIpDatabase):
"""
Class for accessing geolocation data provided by https://ipinfo.io/.
"""
@staticmethod
def get(ip_address, api_key=None, db_path=None, username=None, password=<PASSWORD>):
# process request
try:
request = requests.get('https://ipinfo.io/' + quote(ip_address) + '/geo/',
timeout=62)
except:
raise ServiceError()
# check for HTTP errors
if request.status_code != 200:
if request.status_code == 404:
raise IpAddressNotFoundError(ip_address)
elif request.status_code == 429:
raise LimitExceededError()
elif request.status_code == 500:
raise InvalidRequestError()
else:
raise ServiceError()
# parse content
try:
content = request.content.decode('utf-8')
content = json.loads(content)
except:
raise InvalidResponseError()
# prepare return value
| |
<filename>Cogs/Bot.py<gh_stars>0
import asyncio, discord, os, re, psutil, platform, time, sys, fnmatch, subprocess, speedtest, json, struct, shutil, tempfile
from PIL import Image
from discord.ext import commands
from Cogs import Utils, Settings, DisplayName, ReadableTime, GetImage, ProgressBar, UserTime, Message, DL
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Bot(bot, settings, sys.argv[0], 'python'))
# This is the Bot module - it contains things like nickname, status, etc
class Bot(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings, path = None, pypath = None):
self.bot = bot
self.settings = settings
self.startTime = int(time.time())
self.path = path
self.pypath = pypath
self.regex = re.compile(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?")
self.is_current = False
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
self.is_current = False
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
await self.bot.wait_until_ready()
self.is_current = True
self.bot.loop.create_task(self.status_loop())
async def status_loop(self):
# Helper method to loop through and ensure the status remains
while not self.bot.is_closed():
try:
if not self.is_current:
# Bail if we're not the current instance
return
await self._update_status()
except Exception as e:
print(str(e))
await asyncio.sleep(3600) # runs only every 60 minutes (3600 seconds)
async def onserverjoin(self, server):
# Iterate the blocked list and see if we are blocked
serverList = self.settings.getGlobalStat('BlockedServers',[])
for serv in serverList:
serverName = str(serv).lower()
try:
serverID = int(serv)
except Exception:
serverID = None
if serverName == server.name.lower() or serverID == server.id:
# Found it
try:
await server.leave()
except:
pass
return True
# Check for owner name and id quick
# Name *MUST* be case-sensitive and have the discriminator for safety
namecheck = server.owner.name + "#" + str(server.owner.discriminator)
if serv == namecheck or serverID == server.owner.id:
# Got the owner
try:
await server.leave()
except:
pass
return True
return False
@commands.command(pass_context=True)
async def botinfo(self, ctx):
"""Lists some general stats about the bot."""
bot_member = self.bot.user if not ctx.guild else ctx.guild.get_member(self.bot.user.id)
color = bot_member if isinstance(bot_member,discord.Member) else None
message = await Message.EmbedText(title="Gathering info...", color=color).send(ctx)
# Get guild count
guild_count = "{:,}".format(len(self.bot.guilds))
# Try to do this more efficiently, and faster
total_members = [x.id for x in self.bot.get_all_members()]
unique_members = set(total_members)
if len(total_members) == len(unique_members):
member_count = "{:,}".format(len(total_members))
else:
member_count = "{:,} ({:,} unique)".format(len(total_members), len(unique_members))
# Get commands/cogs count
cog_amnt = 0
empty_cog = 0
for cog in self.bot.cogs:
visible = []
for c in self.bot.get_cog(cog).get_commands():
if c.hidden:
continue
visible.append(c)
if not len(visible):
empty_cog +=1
# Skip empty cogs
continue
cog_amnt += 1
cog_count = "{:,} cog".format(cog_amnt)
# Easy way to append "s" if needed:
if not len(self.bot.cogs) == 1:
cog_count += "s"
if empty_cog:
cog_count += " [{:,} without commands]".format(empty_cog)
visible = []
for command in self.bot.commands:
if command.hidden:
continue
visible.append(command)
command_count = "{:,}".format(len(visible))
# Get localized created time
local_time = UserTime.getUserTime(ctx.author, self.settings, bot_member.created_at)
created_at = "{} {}".format(local_time['time'], local_time['zone'])
# Get localized joined time if in a server
if isinstance(bot_member,discord.Member):
local_time = UserTime.getUserTime(ctx.author, self.settings, bot_member.joined_at)
joined_at = "{} {}".format(local_time['time'], local_time['zone'])
# Get the current prefix
prefix = await self.bot.command_prefix(self.bot, ctx.message)
prefix = ", ".join([x for x in prefix if not x == "<@!{}> ".format(self.bot.user.id)])
# Get the owners
ownerList = self.settings.getGlobalStat('Owner',[])
owners = "Unclaimed..."
if len(ownerList):
userList = []
for owner in ownerList:
# Get the owner's name
user = self.bot.get_user(int(owner))
if not user:
userString = "Unknown User ({})".format(owner)
else:
userString = "{}#{}".format(user.name, user.discriminator)
userList.append(userString)
owners = ', '.join(userList)
# Get bot's avatar url
avatar = Utils.get_avatar(bot_member)
# Build the embed
fields = [
{"name":"Members","value":member_count,"inline":True},
{"name":"Servers","value":guild_count,"inline":True},
{"name":"Commands","value":command_count + " (in {})".format(cog_count),"inline":True},
{"name":"Created","value":created_at,"inline":True},
{"name":"Owners","value":owners,"inline":True},
{"name":"Prefixes","value":prefix,"inline":True},
{"name":"Shard Count","value":self.bot.shard_count,"inline":True}
]
if isinstance(bot_member,discord.Member):
fields.append({"name":"Joined","value":joined_at,"inline":True})
# Get status
status_text = ":green_heart:"
if bot_member.status == discord.Status.offline:
status_text = ":black_heart:"
elif bot_member.status == discord.Status.dnd:
status_text = ":heart:"
elif bot_member.status == discord.Status.idle:
status_text = ":yellow_heart:"
fields.append({"name":"Status","value":status_text,"inline":True})
if bot_member.activity and bot_member.activity.name:
play_list = [ "Playing", "Streaming", "Listening to", "Watching" ]
try:
play_string = play_list[bot_member.activity.type]
except:
play_string = "Playing"
fields.append({"name":play_string,"value":str(bot_member.activity.name),"inline":True})
if bot_member.activity.type == 1:
# Add the URL too
fields.append({"name":"Stream URL","value":"[Watch Now]({})".format(bot_member.activity.url),"inline":True})
# Update the embed
await Message.Embed(
title=DisplayName.name(bot_member) + " Info",
color=color,
description="Current Bot Information",
fields=fields,
thumbnail=avatar
).edit(ctx, message)
@commands.command(pass_context=True)
async def ping(self, ctx):
"""Feeling lonely?"""
before_typing = time.monotonic()
await ctx.trigger_typing()
after_typing = time.monotonic()
ms = int((after_typing - before_typing) * 1000)
msg = '*{}*, ***PONG!*** (~{}ms)'.format(ctx.message.author.mention, ms)
await ctx.send(msg,allowed_mentions=discord.AllowedMentions.all())
@commands.command(pass_context=True)
async def nickname(self, ctx, *, name : str = None):
"""Set the bot's nickname (admin-only)."""
if not await Utils.is_admin_reply(ctx): return
# Let's get the bot's member in the current server
botName = "{}#{}".format(self.bot.user.name, self.bot.user.discriminator)
botMember = ctx.message.guild.get_member_named(botName)
await botMember.edit(nick=name)
@commands.command(pass_context=True)
async def hostinfo(self, ctx):
"""List info about the bot's host environment."""
message = await ctx.channel.send('Gathering info...')
# cpuCores = psutil.cpu_count(logical=False)
# cpuThred = psutil.cpu_count()
cpuThred = os.cpu_count()
cpuUsage = psutil.cpu_percent(interval=1)
memStats = psutil.virtual_memory()
memPerc = memStats.percent
memUsed = memStats.used
memTotal = memStats.total
memUsedGB = "{0:.1f}".format(((memUsed / 1024) / 1024) / 1024)
memTotalGB = "{0:.1f}".format(((memTotal/1024)/1024)/1024)
currentOS = platform.platform()
system = platform.system()
release = platform.release()
version = platform.version()
processor = platform.processor()
botMember = DisplayName.memberForID(self.bot.user.id, ctx.message.guild)
botName = DisplayName.name(botMember)
currentTime = int(time.time())
timeString = ReadableTime.getReadableTimeBetween(self.startTime, currentTime)
pythonMajor = sys.version_info.major
pythonMinor = sys.version_info.minor
pythonMicro = sys.version_info.micro
pythonRelease = sys.version_info.releaselevel
pyBit = struct.calcsize("P") * 8
process = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], shell=False, stdout=subprocess.PIPE)
git_head_hash = process.communicate()[0].strip()
threadString = 'thread'
if not cpuThred == 1:
threadString += 's'
msg = '***{}\'s*** **Home:**\n'.format(botName)
msg += '```\n'
msg += 'OS : {}\n'.format(currentOS)
if not self.settings.getGlobalStat("HideHostname",False):
msg += 'Hostname : {}\n'.format(platform.node())
msg += 'Language : Python {}.{}.{} {} ({} bit)\n'.format(pythonMajor, pythonMinor, pythonMicro, pythonRelease, pyBit)
msg += 'Commit : {}\n\n'.format(git_head_hash.decode("utf-8"))
msg += ProgressBar.center('{}% of {} {}'.format(cpuUsage, cpuThred, threadString), 'CPU') + '\n'
msg += ProgressBar.makeBar(int(round(cpuUsage))) + "\n\n"
msg += ProgressBar.center('{} ({}%) of {}GB used'.format(memUsedGB, memPerc, memTotalGB), 'RAM') + '\n'
msg += ProgressBar.makeBar(int(round(memPerc))) + "\n\n"
msg += '{} uptime```'.format(timeString)
await message.edit(content=msg)
@commands.command()
async def hidehostname(self, ctx, *, yes_no = None):
"""Queries or turns on/off hostname hiding in the hostinfo command (owner-only)."""
if not await Utils.is_owner_reply(ctx): return
await ctx.send(Utils.yes_no_setting(
ctx,
"Hostname hiding in `hostinfo`".format(ctx.prefix),
"HideHostname",
yes_no,
default=False,
is_global=True
))
@commands.command(pass_context=True)
async def getimage(self, ctx, *, image):
"""Tests downloading - owner only"""
# Only allow owner to modify the limits
if not await Utils.is_owner_reply(ctx): return
mess = await Message.Embed(title="Test", description="Downloading file...").send(ctx)
file_path = await GetImage.download(image)
mess = await Message.Embed(title="Test", description="Uploading file...").edit(ctx, mess)
await Message.EmbedText(title="Image", file=file_path).edit(ctx, mess)
GetImage.remove(file_path)
@commands.command(pass_context=True)
async def speedtest(self, ctx):
"""Run a network speed test (owner only)."""
if not await Utils.is_owner_reply(ctx): return
message = await ctx.send('Running speed test...')
try:
st = speedtest.Speedtest()
st.get_best_server()
l = asyncio.get_event_loop()
msg = '**Speed Test Results:**\n'
msg += '```\n'
await message.edit(content="Running speed test...\n- Downloading...")
d = await self.bot.loop.run_in_executor(None, st.download)
msg += ' Ping: {} ms\nDownload: {} Mb/s\n'.format(round(st.results.ping, 2), round(d/1024/1024, 2))
await message.edit(content="Running speed test...\n- Downloading...\n- Uploading...")
u = await self.bot.loop.run_in_executor(None, st.upload)
msg += ' Upload: {} Mb/s```'.format(round(u/1024/1024, 2))
await message.edit(content=msg)
except Exception as e:
await message.edit(content="Speedtest Error: {}".format(str(e)))
@commands.command(pass_context=True)
async def adminunlim(self, ctx, *, yes_no : str = None):
"""Sets whether or not to allow unlimited xp to admins (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Admin unlimited xp","AdminUnlimited",yes_no))
@commands.command(pass_context=True)
async def basadmin(self, ctx, *, yes_no : str = None):
"""Sets whether or not to treat bot-admins as admins with regards to xp (admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Bot-admin as admin","BotAdminAsAdmin",yes_no))
@commands.command(pass_context=True)
async def joinpm(self, ctx, *, yes_no : str = None):
"""Sets whether or not to pm the rules to new users when they join (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"New user pm","JoinPM",yes_no))
@commands.command(pass_context=True)
async def avatar(self, ctx, filename = None):
"""Sets the bot's avatar (owner only)."""
if not await Utils.is_owner_reply(ctx): return
if filename is None and not len(ctx.message.attachments):
m = await ctx.send("Removing avatar...")
try:
await self.bot.user.edit(avatar=None)
except discord.errors.HTTPException as e:
return await m.edit(content="Looks like I can't do that right now. Try again later!")
return await m.edit(content='Avatar removed!')
# Check if attachment
if filename == None:
filename = ctx.message.attachments[0].url
# Let's check if the "url" is actually a user
test_user = DisplayName.memberForName(filename, ctx.guild)
if test_user:
# Got a user!
filename = Utils.get_avatar(test_user)
# Ensure string
filename = str(filename)
# Check if we created a temp folder for this image
isTemp = False
status = await ctx.send('Checking if url (and downloading if valid)...')
# File name is *something* - let's first check it as a url, then a file
extList = ["jpg", "jpeg", "png", "gif", "tiff", "tif", "webp"]
if GetImage.get_ext(filename).lower() in extList:
# URL has an image extension
f = await GetImage.download(filename)
if f:
# we got a download - let's reset and continue
filename = f
isTemp = True
if not os.path.isfile(filename):
if not os.path.isfile('./{}'.format(filename)):
return await status.edit(content='*{}* doesn\'t exist absolutely, or in my working directory.'.format(filename))
else:
# Local file name
filename = './{}'.format(filename)
# File exists - check if image
img = Image.open(filename)
ext = img.format
if not ext:
# File isn't a valid image
return await status.edit(content='*{}* isn\'t a valid image format.'.format(filename))
wasConverted = False
# Is an image PIL understands
if not ext.lower == "png":
# Not a PNG - let's convert
await status.edit(content='Converting to png...')
filename = '{}.png'.format(filename)
img.save(filename)
wasConverted = True
# We got it - crop and go from there
w, h = img.size
dw = dh = 0
if w > h:
# Wide
dw = int((w-h)/2)
elif h > w:
# Tall
dh = int((h-w)/2)
# Run the crop
img.crop((dw, dh, w-dw, h-dh)).save(filename)
await status.edit(content='Uploading and applying avatar...')
with open(filename, 'rb') as f:
newAvatar = f.read()
try:
await self.bot.user.edit(avatar=newAvatar)
except discord.errors.HTTPException as e:
return await status.edit(content="Looks like I can't do that right now. Try again later!")
# Cleanup - try removing with shutil.rmtree, then with os.remove()
await status.edit(content='Cleaning up...')
if isTemp:
GetImage.remove(filename)
else:
if wasConverted:
os.remove(filename)
await status.edit(content='Avatar set!')
@commands.command()
async def setname(self, ctx, *, name = None):
"""Sets the bot's name - may take awhile to reflect (owner only)."""
if not await Utils.is_owner_reply(ctx): return
if not name: return await ctx.send("Usage: `{}setname [new name]`".format(ctx.prefix))
if name == self.bot.user.name: return await ctx.send("That's already my name!")
try:
await self.bot.user.edit(username=name)
except discord.errors.HTTPException as e:
return await ctx.send(content="Looks like I can't do that right now. Try again later!")
# Must | |
<gh_stars>1-10
#!/usr/bin/env python
import os
import argparse
import sys
import pandas as pd
import numpy as np
from collections import defaultdict
from collections import Counter
import bin.round1_annotation as round1_annotation
import bin.round2_annotation as round2_annotation
import bin.genome_locations as genome_locations
pd.options.mode.chained_assignment = 'raise'
import traceback
import warnings
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = warn_with_traceback
def text2vector(filename):
"""convert list delimited by newlines in a text file into a vector"""
content = []
with open(filename) as f:
for line in f:
content.append(line.strip())
return (content)
def uniq_vals_incommon(list1, list2):
"""find unique values in common between two lists"""
return list(set([x for x in list1 if x in list2]))
def listOrFalse(res_series):
false_list = pd.Series([True if x == False or x == "False" else False for x in res_series])
if false_list.all():
return False
else:
string_list = ";".join([str(y) for y in res_series if (y != False and y != "False")])
return string_list
def seriesLen(x):
"""check the length of a series"""
if len(x) == 1:
if x[0] != x[0]:
return(0)
else:
return(1)
else:
return(len(x))
def restricted_float(x):
try:
x = float(x)
except ValueError:
raise argparse.ArgumentTypeError("%r not a floating-point literal" % (x,))
if x < 0.0 or x > 1.0:
raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]"%(x,))
return x
def gene_overlap_dnase(row):
if row.start_b <= row.gene_start:
if row.stop_b <= row.gene_start:
return(0)
elif row.stop_b >= row.gene_stop:
return (row.gene_stop-row.gene_start)
else:
return (row.stop_b-row.gene_start)
elif row.start_b < row.gene_stop:
if row.stop_b >= row.gene_stop:
return (row.gene_stop-row.start_b)
else:
return (row.stop_b-row.start_b)
else:
return (0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="At its core the purpose of \
this code is to annotate ChIP peak regions to genes. \
Other parameters allow for users to annotate both the peaks and the \
genes further with external datasets.")
parser.add_argument('prefix', help='prefix for output file(eg. LFY)')
parser.add_argument('dir_name', help='directory where output will go')
parser.add_argument('bed_file', help='bed file containing ChIP peaks. \
Warning: peaks should have original peak names.')
parser.add_argument('gene_bedfile', help='bed file with gene locations')
parser.add_argument('-n', '--narrowpeak_file', help='narrowPeak file \
containing ChIP peaks.')
parser.add_argument('-pi', '--percent_inter', help='round1 \
annotation only annotates genes that overlap at least this \
percent of the gene. For example if this is set to 1 then \
the peak must overlap the gene 100%% to annotate to it. \
If it is set to .5 then the peak must overlap \
at least half. (default:0)', default=0, type=restricted_float)
parser.add_argument('-tss', '--filter_tss_upstream', help='round1 \
annotation is limited to upstream genes of this many bp \
(default: 3000)', default=3000, type=int)
parser.add_argument('-tts', '--filter_tts_downstream', help='round1 \
annotation is limited to downstream genes of this many bp \
(default: 100)', default=100, type=int)
parser.add_argument('-icv', '--ignore_conv_peaks', help='do not output \
peak information of peaks that annotate to two different peaks ',
action='store_true')
parser.add_argument('-r2', '--round2ann', help='Annotate outlier peaks \
(peaks not annotated in round 1) to differentially expressed genes. \
For this parameter, set the RNA differential expression output files',
nargs='*')
parser.add_argument('-rl', '--rnaDESignificance', nargs=3, \
help='using the tab-delimited text files set by the `round2ann` parameter, \
we can limit which genes are defined as differentially expressed. \
This parameter requires 3 values:[column] [min/max] [value]. For example, \
if you want to limit DE genes to genes output from DESeq2 with adjp<=0.01 \
then set this to `--rnaDESignificance adjp max 0.01`. Another example is \
if you want to limit DE genes to genes output from DESeq2 with \
log2FoldChange>=1 then set this to `--rnaDESignificance log2FoldChange min 1`.', \
required=False)
parser.add_argument('-of', '--outlier_filter', required=False, type=int,
help='If `--round2ann` is set, this is the \
maximum bp distance upstream/downstream of genes of outlier peaks \
(peaks not annotated \
in round 1) (default:10000).', default=10000)
parser.add_argument('-pb', '--comparePeaksToBeds', nargs='*', help='list of \
bed files that you want to compare with the peaks in this sample.', \
required=False)
parser.add_argument('-pbn', '--comparePeaksToBedsNames', nargs='*', \
help='list of prefixes for the bed files that you want to compare with. \
This must be equal to "comparePeaksToBeds" variable \
(eg.LFY_seedlings)', required=False, default=[])
parser.add_argument('-pbf', '--compBedScores', help='if peak overlaps with \
a row in one of the bed files in `comparePeaksToBeds` then report a \
feature of the old peak. Otherwise put NA. Any \
column in a narrowPeak file can be reported (see \
https://genome.ucsc.edu/FAQ/FAQformat.html#format12 for description \
of each column).', choices=["chrom", "chromStart", "chromEnd", "name", \
"score", "strand", \
"singalValue", "pValue", "qValue", "peak"])
parser.add_argument('-sb', '--compareSumRegToBed', nargs='*',
help='list of bed files that you want to compare with the region \
around the ChIP summit site. Note that for this function to work, \
the following other parameters must also be set correctly: \
`narrowpeak_file`, `compareSumRegToBedColNames`, and \
`summitRegion`', required=False)
parser.add_argument('-sbn', '--compareSumRegToBedColNames', nargs='*',
help='list of column names for the output of the comparison \
between the features in the bed file(s) in `compareSumRegToBed` \
and the ChIP summit regions (eg.LFY1_motif)', required=False, default=[])
parser.add_argument('-sbf', '--compSummitOverlapScores', help='if summit \
overlaps with a row in one f the bed files in `compareSumRegToBed` \
then report the either the `score` or `name` value of the overlapping \
feature. Otherwise put NA.',
choices=['name','score'])
parser.add_argument('-sbr', '--summitRegion', nargs='*',
help='To run `compareSumRegToBed` the user must set the \
boundaries around the peak summit to compare to. The specific \
format for this parameter is: \
`[compareSumRegToBedColNames],[bp_upstream],[bp_downstream]`. \
For example, if you want to compare a motif bed file \
(given the column name "LFY1_motif") to -50/+100 bp from the summit \
and a bed file with MNase nucleosome (given the column name \
"High_MNase") to the actual summit site you would set \
`--summitRegion LFY1_motif,50,100 High_MNase,0,0`')
parser.add_argument('-pt', '--comparePeaksToText', nargs='*', help='list of \
files that contain tables with headers. To compare peakwise the table must \
include at least the following columns: chrom, start, stop', \
required=False)
parser.add_argument('-ptn', '--comparePeaksToTextNames', nargs='*', \
help='list of prefixes for the text files that you want to compare with. \
The number of values set to this parameter must be equal to \
the number of values set to parameter `comparePeaksToText`. \
(eg.dexVmock_db)', required=False, default=[])
parser.add_argument('-ptf', '--addPeaksToTextFeatures', help='In each \
peak-centric file given in `comparePeaksToText` there are other \
columns that you may want to include in this analysis. If so you \
need to set this in a specific format: \
`[comparePeaksToTextName]:[column_of_interest_x],[column_of_interest_y],[...]`. \
This data will be output in columns formatted: \
`[comparePeaksToTextName]:[column_of_interest]`. For example, if we set \
`--comparePeaksToTextNames timepoint1_dexVmock_db timepoint2_dexVmock_db \
--addPeaksToTextFeatures timepoint1_dexVmock_db:logFC \
timepoint2_dexVmock_db:logFC,adjP` then the following columns will be \
written: `timepoint1_dexVmock_db:logFC`, `timepoint2_dexVmock_db:logFC`, \
and `timepoint2_dexVmock_db:adjP`', required=False, default=[], \
nargs='*')
parser.add_argument('-gt', '--compareGenesToText', help='tab-delimited \
text file(s) that contain gene information. Each row must contain a \
column containing gene IDs that are labeled with the header "gene_id". \
If this is set, `compareGenesToTextNames` and `addGeneToTextFeatures` \
must also be set.', \
required=False, default=[], nargs='*')
parser.add_argument('-gtn', '--compareGenesToTextNames', help='prefix names for \
each genewise text file given in `compareGenesToText`', \
required=False, default=[], nargs='*')
parser.add_argument('-gtf', '--addGeneToTextFeatures', help='Given the \
gene-centric file(s) in `compareGenesToText` the script will report \
True or False whether the genes annotated appear in the text file \
("gene-match") and/or report other columns from the table in \
`compareGenesToText`. To specify the columns ([col1],[col2][..][coln]) \
that are output set this parameter to this specific format: \
`[compareGenesToTextNames]:[col1],[col2][..][coln]`. \
To report the True/False "gene-match" set one of the columns to the \
dash symbol "-". This data will be output in columns labeled by the \
`compareGenesToTextName". For example, if this parameter is set: \
`--addGeneToTextFeatures RPM:sample1,sample2 \
Sample1vsSample2_DE:-,logFC,adjp` then the output will contain columns: \
"RPM:sample1", "RPM:sample2", "Sample1vsSample2_DE" (which gives the \
gene-match), "Sample1vsSample2_DE:logFC", and \
"Sample1vsSample2_DE:adjp"', required=False, default=[], nargs='*')
parser.add_argument('-df', '--dnase_files', nargs='*', help='list of bed \
files with locations of DNase Hypersensitivity sites', required=False)
parser.add_argument('-dn', '--dnase_names', nargs='*', help='list giving \
names for each DNAse experiment corresponding to DNAse bed files in \
--dnase_files. This list must be of equal length to the "dnase_files" \
variable (eg.DNAse_flowers)', required=False, default=[])
parser.add_argument('-idr', '--globalIDRpval', help='global IDR pvalue \
used on to get this output', default="")
parser.add_argument('--keep_tmps', help='keep temp files', \
action='store_true')
parser.add_argument('--verbose', help='echo processing', \
action='store_true')
args = | |
"""
Module: LMR_verify_gridRNL.py
Purpose: Generates spatial verification statistics of various LMR gridded fields
against 20th century reanalyses.
Originator: <NAME>, U. of Washington, March 2016
Revisions:
"""
import matplotlib
# need to do this backend when running remotely or to suppress figures interactively
matplotlib.use('Agg')
# generic imports
import numpy as np
import glob, os, sys
from datetime import datetime, timedelta
from netCDF4 import Dataset
import mpl_toolkits.basemap as bm
import matplotlib.pyplot as plt
from matplotlib import ticker
from spharm import Spharmt, getspecindx, regrid
import warnings
# LMR specific imports
sys.path.append('../')
from LMR_utils import global_hemispheric_means, assimilated_proxies, coefficient_efficiency
from load_gridded_data import read_gridded_data_CMIP5_model
from LMR_plot_support import *
# change default value of latlon kwarg to True.
bm.latlon_default = True
warnings.filterwarnings('ignore')
##################################
# START: set user parameters here
##################################
# option to suppress figures
iplot = True
iplot_individual_years = False
# centered time mean (nya must be odd! 3 = 3 yr mean; 5 = 5 year mean; etc 0 = none)
nya = 0
# option to print figures
fsave = True
#fsave = False
# set paths, the filename for plots, and global plotting preferences
# where to find reconstruction data
#datadir_output = './data/'
#datadir_output = '/home/disk/kalman2/wperkins/LMR_output/archive'
datadir_output = '/home/disk/kalman3/rtardif/LMR/output'
#datadir_output = '/home/disk/ekman4/rtardif/LMR/output'
#datadir_output = '/home/disk/kalman3/hakim/LMR'
# Directory where reanalysis data can be found
datadir_reanl = '/home/disk/kalman3/rtardif/LMR/data/model/'
# file specification
#
# current datasets
# ---
#nexp = 'production_gis_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_ccsm4_pagesall_0.75'
#nexp = 'production_cru_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_era20c_pagesall_0.75'
#nexp = 'production_mlost_era20cm_pagesall_0.75'
# ---
nexp = 'test'
# ---
# perform verification using all recon. MC realizations ( MCset = None )
# or over a custom selection ( MCset = (begin,end) )
# ex. MCset = (0,0) -> only the first MC run
# MCset = (0,10) -> the first 11 MC runs (from 0 to 10 inclusively)
# MCset = (80,100) -> the 80th to 100th MC runs (21 realizations)
MCset = None
#MCset = (0,10)
# Definition of variables to verify
# kind name variable long name bounds units mult. factor
verif_dict = \
{
#'psl_sfc_Amon' : ('anom', 'MSLP', 'Mean sea level pressure',-8.0,8.0,'(hPa)',0.01), \
'zg_500hPa_Amon' : ('anom','Z500', '500hPa geopotential height',-60.0,60.0,'(m)',1.0), \
#'wap_500hPa_Amon' : ('anom','W500', '500hPa vertical motion',-0.04,0.04,'(Pa/s)',1.0), \
#'ua_1000hPa_Amon' : ('anom','U1000', '1000hPa zonal wind',-2.0,2.0,'(m/s)',1.0), \
#'va_1000hPa_Amon' : ('anom','V1000', '1000hPa meridional wind',-2.0,2.0,'(m/s)',1.0), \
#'ua_850hPa_Amon' : ('anom','U850', '850hPa zonal wind',-2.0,2.0,'(m/s)',1.0), \
#'va_850hPa_Amon' : ('anom','V850', '850hPa meridional wind',-2.0,2.0,'(m/s)',1.0), \
#'ua_700hPa_Amon' : ('anom','U700', '700hPa zonal wind',-2.0,2.0,'(m/s)',1.0), \
#'va_700hPa_Amon' : ('anom','V700', '700hPa meridional wind',-2.0,2.0,'(m/s)',1.0), \
#'ua_600hPa_Amon' : ('anom','U600', '600hPa zonal wind',-2.0,2.0,'(m/s)',1.0), \
#'va_600hPa_Amon' : ('anom','V600', '600hPa meridional wind',-2.0,2.0,'(m/s)',1.0), \
#'ua_500hPa_Amon' : ('anom','U500', '500hPa zonal wind',-2.0,2.0,'(m/s)',1.0), \
#'ua_250hPa_Amon' : ('anom','U250', '250Pa zonal wind',-2.0,2.0,'(m/s)',1.0), \
#'prw_int_Amon' : ('anom','PRW', 'Precipitable water',-10.0,10.0,'(kg/m^2)',1.0), \
}
# time range for verification (in years CE)
trange = [1850,2000] #works for nya = 0
#trange = [1880,2000] #works for nya = 0
#trange = [1900,2000] #works for nya = 0
#trange = [1885,1995] #works for nya = 5
#trange = [1890,1990] #works for nya = 10
# reference period over which mean is calculated & subtracted
# from all datasets (in years CE)
#ref_period = [1951, 1980] # as in instrumental-era products (e.g. GISTEMP)
ref_period = [1900, 1999] # 20th century
valid_frac = 0.0
# number of contours for plots
nlevs = 21
# plot alpha transparency
alpha = 0.5
# set the default size of the figure in inches. ['figure.figsize'] = width, height;
# aspect ratio appears preserved on smallest of the two
plt.rcParams['figure.figsize'] = 10, 10 # that's default image size for this interactive session
plt.rcParams['axes.linewidth'] = 2.0 # set the value globally
plt.rcParams['font.weight'] = 'bold' # set the font weight globally
plt.rcParams['font.size'] = 11 # set the font size globally
#plt.rc('text', usetex=True)
plt.rc('text', usetex=False)
##################################
# END: set user parameters here
##################################
verif_vars = list(verif_dict.keys())
workdir = datadir_output + '/' + nexp
print('working directory = %s' % workdir)
print('\n getting file system information...\n')
# get number of mc realizations from directory count
# RT: modified way to determine list of directories with mc realizations
# get a listing of the iteration directories
dirs = glob.glob(workdir+"/r*")
# selecting the MC iterations to keep
if MCset:
dirset = dirs[MCset[0]:MCset[1]+1]
else:
dirset = dirs
mcdir = [item.split('/')[-1] for item in dirset]
niters = len(mcdir)
print('mcdir: %s' % str(mcdir))
print('niters = %s' % str(niters))
# check availability of target variables
vars_to_remove = []
for var in verif_vars:
available = True
for dir in mcdir:
ensfiln = workdir + '/' + dir + '/ensemble_mean_'+var+'.npz'
if not os.path.exists(ensfiln):
available = False
continue
if not available:
print('WARNING: Variable %s not found in reconstruction output...' %var)
vars_to_remove.append(var)
if len(vars_to_remove) > 0:
for var in vars_to_remove:
verif_vars.remove(var)
# Finally, loop over available verif. variables
for var in verif_vars:
# read ensemble mean data
print('\n reading LMR ensemble-mean data...\n')
first = True
k = -1
for dir in mcdir:
k = k + 1
ensfiln = workdir + '/' + dir + '/ensemble_mean_'+var+'.npz'
npzfile = np.load(ensfiln)
print(npzfile.files)
tmp = npzfile['xam']
print('shape of tmp: %s' % str(np.shape(tmp)))
if first:
first = False
recon_times = npzfile['years']
LMR_time = np.array(list(map(int,recon_times)))
lat = npzfile['lat']
lon = npzfile['lon']
nlat = npzfile['nlat']
nlon = npzfile['nlon']
lat2 = np.reshape(lat,(nlat,nlon))
lon2 = np.reshape(lon,(nlat,nlon))
years = npzfile['years']
nyrs = len(years)
xam = np.zeros([nyrs,np.shape(tmp)[1],np.shape(tmp)[2]])
xam_all = np.zeros([niters,nyrs,np.shape(tmp)[1],np.shape(tmp)[2]])
xam = xam + tmp
xam_all[k,:,:,:] = tmp
# this is the sample mean computed with low-memory accumulation
xam = xam/len(mcdir)
# this is the sample mean computed with numpy on all data
xam_check = xam_all.mean(0)
# check..
max_err = np.max(np.max(np.max(xam_check - xam)))
if max_err > 1e-4:
print('max error = %s' % str(max_err))
raise Exception('sample mean does not match what is in the ensemble files!')
# sample variance
xam_var = xam_all.var(0)
print(np.shape(xam_var))
print('\n shape of the ensemble array: %s \n' % str(np.shape(xam_all)))
print('\n shape of the ensemble-mean array: %s \n' % str(np.shape(xam)))
#################################################################
# BEGIN: load verification data (20CR and ERA20C) #
#################################################################
print('\nloading verification data...\n')
# Define month sequence for the calendar year
# (argument needed in upload of reanalysis data)
annual = list(range(1,13))
# load 20th century reanalysis (TCR) reanalysis --------------------------------
vardict = {var: verif_dict[var][0]}
vardef = var
datadir = datadir_reanl +'20cr'
datafile = vardef +'_20CR_185101-201112.nc'
dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual,
anom_ref=ref_period)
rtime = dd[vardef]['years']
TCR_time = np.array([d.year for d in rtime])
lats = dd[vardef]['lat']
lons = dd[vardef]['lon']
latshape = lats.shape
lonshape = lons.shape
if len(latshape) == 2 & len(lonshape) == 2:
# stored in 2D arrays
lat_TCR = np.unique(lats)
lon_TCR = np.unique(lons)
nlat_TCR, = lat_TCR.shape
nlon_TCR, = lon_TCR.shape
else:
# stored in 1D arrays
lon_TCR = lons
lat_TCR = lats
nlat_TCR = len(lat_TCR)
nlon_TCR = len(lon_TCR)
lon2d_TCR, lat2d_TCR = np.meshgrid(lon_TCR, lat_TCR)
#TCR = dd[vardef]['value'] + dd[vardef]['climo'] # Full field
TCR = dd[vardef]['value'] # Anomalies
# load ERA20C reanalysis -------------------------------------------------------
vardict = {var: verif_dict[var][0]}
vardef = var
datadir = datadir_reanl+'era20c'
datafile = var+'_ERA20C_190001-201012.nc'
dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual,
anom_ref=ref_period)
rtime = dd[vardef]['years']
ERA20C_time = np.array([d.year for d in rtime])
lats = dd[vardef]['lat']
lons = dd[vardef]['lon']
latshape = lats.shape
lonshape = lons.shape
if len(latshape) == 2 & len(lonshape) == 2:
# stored in 2D arrays
lat_ERA20C = np.unique(lats)
lon_ERA20C = np.unique(lons)
nlat_ERA20C, = lat_ERA20C.shape
nlon_ERA20C, = lon_ERA20C.shape
else:
# stored in 1D arrays
lon_ERA20C = lons
lat_ERA20C = lats
nlat_ERA20C = len(lat_ERA20C)
nlon_ERA20C = len(lon_ERA20C)
lon2_ERA20C, lat2_ERA20C = np.meshgrid(lon_ERA20C, lat_ERA20C)
#ERA20C = dd[vardef]['value'] + dd[vardef]['climo'] # Full field
ERA20C = dd[vardef]['value'] # Anomalies
###############################################################
# END: load verification data (20CR and ERA20C) #
###############################################################
# ----------------------------------------------------------
# Adjust so that all anomaly data pertain to the mean over a
# user-defined reference period (e.g. 20th century)
# ----------------------------------------------------------
stime = ref_period[0]
etime = ref_period[1]
# LMR
LMR = xam
smatch, ematch = find_date_indices(LMR_time,stime,etime)
LMR = LMR - np.mean(LMR[smatch:ematch,:,:],axis=0)
# TCR
smatch, ematch = find_date_indices(TCR_time,stime,etime)
TCR = TCR - np.mean(TCR[smatch:ematch,:,:],axis=0)
# ERA
smatch, ematch = find_date_indices(ERA20C_time,stime,etime)
ERA20C = ERA20C - np.mean(ERA20C[smatch:ematch,:,:],axis=0)
# -----------------------------------
# Regridding the data for comparisons
# -----------------------------------
print('\n regridding data to a common T42 grid...\n')
iplot_loc= False
#iplot_loc= True
# create instance of the spherical harmonics object for each grid
specob_lmr = Spharmt(nlon,nlat,gridtype='regular',legfunc='computed')
specob_tcr = Spharmt(nlon_TCR,nlat_TCR,gridtype='regular',legfunc='computed')
specob_era20c = Spharmt(nlon_ERA20C,nlat_ERA20C,gridtype='regular',legfunc='computed')
# truncate to a lower resolution grid (common:21, 42, 62, 63, 85, 106, 255, 382, 799)
ntrunc_new = 42 # T42
ifix = np.remainder(ntrunc_new,2.0).astype(int)
nlat_new = ntrunc_new + ifix
nlon_new = int(nlat_new*1.5)
# lat, lon grid in the truncated space
dlat = 90./((nlat_new-1)/2.)
dlon = 360./nlon_new
veclat = np.arange(-90.,90.+dlat,dlat)
veclon = np.arange(0.,360.,dlon)
blank = np.zeros([nlat_new,nlon_new])
lat2_new = (veclat + blank.T).T
lon2_new = (veclon + blank)
# create | |
<reponame>rome2rio/TokyoGTFS<filename>src/handlers.py<gh_stars>1-10
try: import ijson.backends.yajl2_c as ijson
except: import ijson
from datetime import datetime, timedelta, date
from warnings import warn
from bs4 import BeautifulSoup
from copy import copy
from tempfile import TemporaryDirectory
import pykakasi
import requests
import iso8601
import json
import csv
import io
import os
import re
from .err import DataAssertion
from .utils import print_log
from .const import GTFS_HEADERS, ADDITIONAL_ENGLISH, GET_TIMEOUT
"""
This file contains object that are used to
_handle_ some more complicated stuff.
Currently used to simplify:
- Getting and caching data from API
- handling calendars
- handling translations
- calculating time-realted things
"""
def JSONIterator(buffer):
"""Creates a ijson iterator over all items,
then automatically closes the provided buffer.
"""
try:
yield from ijson.items(buffer, "item")
finally:
buffer.close()
def TrainTripIterator(api, conversion_time):
odpt_trips = api.get("TrainTimetable")
parsed_trips = set()
for trip in odpt_trips:
# Check if trip has not expired
if "dct:valid" in trip:
valid_until = iso8601.parse_date(trip["dct:valid"])
if valid_until <= conversion_time:
continue
# Avoid duplicate trips, it sometimes happens
if trip["owl:sameAs"] in parsed_trips:
continue
parsed_trips.add(trip["owl:sameAs"])
yield trip
class TimeValue:
"""An object representing a GTFS time value.
:param seconds: The amount of seconds since 12:00 - 12 hours.
:type secnonds: int
"""
def __init__(self, seconds):
self.m, self.s = divmod(int(seconds), 60)
self.h, self.m = divmod(self.m, 60)
def __str__(self):
"Return GTFS-compliant string representation of time"
return f"{self.h:0>2}:{self.m:0>2}:{self.s:0>2}"
def __repr__(self): return "<Time " + self.__str__() + ">"
def __int__(self): return self.h * 3600 + self.m * 60 + self.s
def __add__(self, other): return TimeValue(self.__int__() + int(other))
def __sub__(self, other): return TimeValue(self.__int__() - int(other))
def __lt__(self, other): return self.__int__() < int(other)
def __le__(self, other): return self.__int__() <= int(other)
def __gt__(self, other): return self.__int__() > int(other)
def __ge__(self, other): return self.__int__() >= int(other)
def __eq__(self, other): return self.__int__() == int(other)
def __ne__(self, other): return self.__int__() != int(other)
@classmethod
def from_str(cls, string):
str_split = list(map(int, string.split(":")))
if len(str_split) == 2:
return cls(str_split[0]*3600 + str_split[1]*60)
elif len(str_split) == 3:
return cls(str_split[0]*3600 + str_split[1]*60 + str_split[2])
else:
raise ValueError("invalid string for _Time.from_str(), {} (should be HH:MM or HH:MM:SS)".format(string))
class ApiHandler:
"""An object to request and cache API data dumps
:param apikey: Apikey to ODPT
:type apikey: str
"""
def __init__(self, apikey):
self.session = requests.Session()
self.dir = TemporaryDirectory()
self.apikey = apikey
def get(self, endpoint, data_dump=True, force_vanilla_json=False):
"""Get the `endpoint` data dump.
If `cache` is truthy, the data dump is cached for later use.
If `data_dump` is truthy requests the data dump of the given endpoint.
"""
if data_dump:
endpoint = endpoint + ".json"
print_log(f"Requesting data dump for {endpoint}")
req = self.session.get(
f"https://api-tokyochallenge.odpt.org/api/v4/odpt:{endpoint}",
params={"acl:consumerKey": self.apikey}, timeout=GET_TIMEOUT,
stream=True,
)
req.raise_for_status()
buffer = req.raw
if force_vanilla_json:
return (i for i in req.json())
else:
return JSONIterator(buffer)
class CalendarHandler:
"""An object which handles services,
calendars and all that kind of stuff.
:param apihandler: The ApiHandler object
:type apihandler: ApiHandler
:param start_date: Calendar start date
:type start_date: datetime.date
:param end_date: Calendar end date. If not provded assumed
to be 180 days after start_date
:type end_date: datetime.date
"""
def __init__(self, apihandler, start_date, end_date=None):
"""Inits the CalednarHandler
"""
if end_date is None:
end_date = start_date + timedelta(days=180)
self.apihandler = apihandler
self.start = start_date
self.end = end_date
self.used = {}
self.valid = set()
self.holidays = set()
self.special = {}
self.outputted = set()
self.built_ins = {
"Everyday", "Weekday", "SaturdayHoliday", "Holiday",
"Sunday", "Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"
}
# Load holidays
self.load_valid()
self.load_holidays()
def load_holidays(self):
"""Loads Japan holidays into self.holidays.
Data comes from Japan's Cabinet Office:
https://www8.cao.go.jp/chosei/shukujitsu/gaiyou.html
Only holdays within self.start and self.end are saved.
"""
print_log("Loading calendar holidays")
req = requests.get("https://www8.cao.go.jp/chosei/shukujitsu/syukujitsu.csv")
req.raise_for_status()
req.encoding = "shift-jis"
buffer = io.StringIO(req.text)
reader = csv.DictReader(buffer)
for row in reader:
date_str = row["国民の祝日・休日月日"]
date_val = datetime.strptime(date_str, "%Y/%m/%d").date()
if self.start <= date_val <= self.end:
self.holidays.add(date_val)
buffer.close()
def load_valid(self):
"""Loads list of **usable** calendars into self.valid
in order to ensure that each trips points to a
service_id active on at least one day.
"""
calendars = self.apihandler.get("Calendar")
for calendar in calendars:
calendar_id = calendar["owl:sameAs"].split(":")[1]
if calendar_id in self.built_ins:
self.valid.add(calendar_id)
elif calendar.get("odpt:day", []) != []:
dates = [datetime.strptime(i, "%Y-%m-%d").date() for i in calendar["odpt:day"]]
dates = [i for i in dates if self.start <= i <= self.end]
if dates:
# Save dates of special calendars
for date in dates:
if date not in self.special: self.special[date] = set()
self.special[date].add(calendar_id)
# Add this special calendar to self.valid
self.valid.add(calendar_id)
def use(self, route_id, calendar_id):
"""Checks if this pair of route_id and calendar_id can be used.
If yes, returns the service_id to be used in the GTFS.
If no, returns None
"""
if calendar_id in self.valid:
service_id = route_id + "." + calendar_id
# List calendars used by this route
if route_id not in self.used:
self.used[route_id] = set()
self.used[route_id].add(calendar_id)
return service_id
else:
return None
def was_exported(self, service_id):
"""Check if this service_id (route_id.calendar_id)
was exported to calendar_dates.txt
:rtype: bool
"""
if service_id in self.outputted:
return True
else:
return False
def export(self):
"""Exports all used services into
gtfs/calendar_dates.txt
"""
# Open file
buffer = open("gtfs/calendar_dates.txt", mode="w", encoding="utf8", newline="")
writer = csv.DictWriter(buffer, GTFS_HEADERS["calendar_dates.txt"], extrasaction="ignore")
writer.writeheader()
for route_id, calendars_used in self.used.items():
print_log(f"Exporting calendars: {route_id}")
working_date = copy(self.start)
while working_date <= self.end:
active_services = []
weekday = working_date.weekday()
is_holiday = working_date in self.holidays
special_calendars = calendars_used.intersection(self.special[working_date])
# == DIFFERENT ACTIVE SERIVCE SWITCH-CASE == #
if special_calendars:
active_services = list(special_calendars)
# Holidays
elif is_holiday and "Holiday" in calendars_used:
active_services = ["Holiday"]
elif is_holiday and "SaturdayHoliday" in calendars_used:
active_services = ["SaturdayHoliday"]
# Specific weekdays
elif weekday == 0 and "Monday" in calendars_used:
active_services = ["Monday"]
elif weekday == 1 and "Tuesday" in calendars_used:
active_services = ["Tuesday"]
elif weekday == 2 and "Wednesday" in calendars_used:
active_services = ["Wednesday"]
elif weekday == 3 and "Thursday" in calendars_used:
active_services = ["Thursday"]
elif weekday == 4 and "Friday" in calendars_used:
active_services = ["Friday"]
elif weekday == 5 and "Saturday" in calendars_used:
active_services = ["Saturday"]
elif weekday == 6 and "Sunday" in calendars_used:
active_services = ["Sunday"]
# Weekend vs Workday
elif weekday <= 4 and "Weekday" in calendars_used:
active_services = ["Weekday"]
elif weekday >= 5 and "SaturdayHoliday" in calendars_used:
active_services = ["SaturdayHoliday"]
# Everyday
elif "Everyday" in calendars_used:
active_services = ["Everyday"]
# == END SWITCH-CASE == #
for active_service in active_services:
service_id = route_id + "." + active_service
self.outputted.add(service_id)
writer.writerow({
"service_id": service_id,
"date": working_date.strftime("%Y%m%d"),
"exception_type": "1",
})
working_date += timedelta(days=1)
buffer.close()
class TranslationHandler:
"""An object to handle translations"""
def __init__(self):
"""Sets up the TranslationHandler
"""
kakasi_loader = pykakasi.kakasi()
kakasi_loader.setMode("H", "a")
kakasi_loader.setMode("K", "a")
kakasi_loader.setMode("J", "a")
kakasi_loader.setMode("r", "Hepburn")
kakasi_loader.setMode("s", True)
kakasi_loader.setMode("C", True)
self.converter = kakasi_loader.getConverter()
self.print_warns = False
self.strings = {}
def get_english(self, japanese, dont_save=False):
"""Given a Japanese text,
returns the corresponding English string.
"""
# Ignore empty strings
if japanese is None or japanese == "":
return japanese
# Check if this text is already known
inside_self = self.strings.get(japanese)
if inside_self:
return inside_self
# Check if it is defined in ADDITIONAL_ENGLISH
inside_additional = ADDITIONAL_ENGLISH.get(japanese)
if inside_additional:
self.strings[japanese] = inside_additional
return inside_additional
# Fallback to using pykakasi
english = self.converter.do(japanese)
english = english.title()
# Fix for hepburn macrons (Ooki → Ōki)
english = english.replace("Uu", "Ū").replace("uu", "ū")
english = english.replace("Oo", "Ō").replace("oo", "ō")
english = english.replace("Ou", "Ō").replace("ou", "ō")
# Fix for katakana chōonpu (ta-minaru → taaminaru)
english = english.replace("A-", "Aa").replace("a-", "aa")
english = english.replace("I-", "Ii").replace("i-", "ii")
english = english.replace("U-", "Ū").replace("u-", "ū")
english = english.replace("E-", "Ee").replace("e-", "ee")
english = english.replace("O-", "Ō").replace("o-", "ō")
english = english.title()
if not dont_save:
self.strings[japanese] = english
if self.print_warns:
print_log(f"no english for string {japanese} (generated {english})", 1)
return english
def get_headsign_english(self, japanese, dont_save=False):
"""Given a Japanese text,
returns the corresponding English string,
with optimization for trip_headsign.
"""
# Check if this text is already known
inside_self = self.strings.get(japanese)
if inside_self:
return inside_self
# Check if it is defined in ADDITIONAL_ENGLISH
inside_additional = ADDITIONAL_ENGLISH.get(japanese)
if inside_additional:
self.strings[japanese] = inside_additional
return inside_additional
# Analyze the text (bonus points for using regex!)
jap_parsed = japanese.replace(")", ")").replace("(", "(")
via = re.search(r"(\w+)経由", jap_parsed)
brackets = re.search(r"(\()(\w+)(\))", jap_parsed)
if via:
via_txt = via[1]
jap_parsed = jap_parsed.replace(via[0], "")
else:
via_txt = None
if brackets:
brackets_txt = brackets[2]
jap_parsed = jap_parsed.replace(brackets[0], "")
else:
brackets_txt = None
destination = re.sub(r"(行|行き|ゆき)$", "", jap_parsed.strip())
# Translate parts into english
| |
+ ' Mbytes'
else:
imageSize = imageSize / 1000
logging.debug('\u001B[1m ' + image + ' size is ' + ('%.3f' % imageSize) + ' Gbytes\u001B[0m')
self.allImagesSize[image] = str(round(imageSize,1)) + ' Gbytes'
else:
logging.debug('ran-build size is unknown')
self.allImagesSize[image] = 'unknown'
# Now pruning dangling images in between target builds
mySSH.command(self.cli + ' image prune --force', '\$', 30)
# Analyzing the logs
mySSH.command('cd ' + lSourcePath + '/cmake_targets', '\$', 5)
mySSH.command('mkdir -p build_log_' + self.testCase_id, '\$', 5)
mySSH.command('mv log/* ' + 'build_log_' + self.testCase_id, '\$', 5)
mySSH.command('cd ' + lSourcePath + '/cmake_targets', '\$', 5)
mySSH.command('rm -f build_log_' + self.testCase_id + '.zip || true', '\$', 5)
if (os.path.isfile('./build_log_' + self.testCase_id + '.zip')):
os.remove('./build_log_' + self.testCase_id + '.zip')
if (os.path.isdir('./build_log_' + self.testCase_id)):
shutil.rmtree('./build_log_' + self.testCase_id)
mySSH.command('zip -r -qq build_log_' + self.testCase_id + '.zip build_log_' + self.testCase_id, '\$', 5)
mySSH.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/build_log_' + self.testCase_id + '.zip', '.')
mySSH.command('rm -f build_log_' + self.testCase_id + '.zip','\$', 5)
mySSH.close()
ZipFile('build_log_' + self.testCase_id + '.zip').extractall('.')
#Trying to identify the errors and warnings for each built images
imageNames1 = imageNames
shared = ('ran-build','ran')
imageNames1.insert(0, shared)
for image,pattern in imageNames1:
files = {}
file_list = [f for f in os.listdir('build_log_' + self.testCase_id + '/' + image) if os.path.isfile(os.path.join('build_log_' + self.testCase_id + '/' + image, f)) and f.endswith('.txt')]
for fil in file_list:
errorandwarnings = {}
warningsNo = 0
errorsNo = 0
with open('build_log_{}/{}/{}'.format(self.testCase_id,image,fil), mode='r') as inputfile:
for line in inputfile:
result = re.search(' ERROR ', str(line))
if result is not None:
errorsNo += 1
result = re.search(' error:', str(line))
if result is not None:
errorsNo += 1
result = re.search(' WARNING ', str(line))
if result is not None:
warningsNo += 1
result = re.search(' warning:', str(line))
if result is not None:
warningsNo += 1
errorandwarnings['errors'] = errorsNo
errorandwarnings['warnings'] = warningsNo
errorandwarnings['status'] = status
files[fil] = errorandwarnings
# Let analyze the target image creation part
if os.path.isfile('build_log_{}/{}.log'.format(self.testCase_id,image)):
errorandwarnings = {}
with open('build_log_{}/{}.log'.format(self.testCase_id,image), mode='r') as inputfile:
startOfTargetImageCreation = False
buildStatus = False
for line in inputfile:
result = re.search('FROM .* [aA][sS] ' + image + '$', str(line))
if result is not None:
startOfTargetImageCreation = True
if startOfTargetImageCreation:
result = re.search('Successfully tagged ' + image + ':', str(line))
if result is not None:
buildStatus = True
result = re.search('COMMIT ' + image + ':', str(line))
if result is not None:
buildStatus = True
inputfile.close()
if buildStatus:
errorandwarnings['errors'] = 0
else:
errorandwarnings['errors'] = 1
errorandwarnings['warnings'] = 0
errorandwarnings['status'] = buildStatus
files['Target Image Creation'] = errorandwarnings
self.collectInfo[image] = files
if status:
logging.info('\u001B[1m Building OAI Image(s) Pass\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'OK', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(self.collectInfo, self.allImagesSize)
else:
logging.error('\u001B[1m Building OAI Images Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(self.collectInfo, self.allImagesSize)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
def DeployObject(self, HTML, EPC):
if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = <PASSWORD>
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = <PASSWORD>
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.<PASSWORD>
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('\u001B[1m Deploying OAI Object on server: ' + lIpAddr + '\u001B[0m')
mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord)
# Putting the CPUs in a good state, we do that only on a few servers
mySSH.command('hostname', '\$', 5)
result = re.search('obelix|asterix', mySSH.getBefore())
if result is not None:
mySSH.command('if command -v cpupower &> /dev/null; then echo ' + lPassWord + ' | sudo -S cpupower idle-set -D 0; fi', '\$', 5)
time.sleep(5)
mySSH.command('cd ' + lSourcePath + '/' + self.yamlPath[self.eNB_instance], '\$', 5)
mySSH.command('cp docker-compose.yml ci-docker-compose.yml', '\$', 5)
imageTag = 'develop'
if (self.ranAllowMerge):
imageTag = 'ci-temp'
mySSH.command('sed -i -e "s/image: oai-enb:latest/image: oai-enb:' + imageTag + '/" ci-docker-compose.yml', '\$', 2)
localMmeIpAddr = EPC.MmeIPAddress
mySSH.command('sed -i -e "s/CI_MME_IP_ADDR/' + localMmeIpAddr + '/" ci-docker-compose.yml', '\$', 2)
if self.flexranCtrlDeployed:
mySSH.command('sed -i -e \'s/FLEXRAN_ENABLED:.*/FLEXRAN_ENABLED: "yes"/\' ci-docker-compose.yml', '\$', 2)
mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/' + self.flexranCtrlIpAddress + '/" ci-docker-compose.yml', '\$', 2)
else:
mySSH.command('sed -i -e "s/FLEXRAN_ENABLED:.*$/FLEXRAN_ENABLED: \"no\"/" ci-docker-compose.yml', '\$', 2)
mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/127.0.0.1/" ci-docker-compose.yml', '\$', 2)
# Currently support only one
mySSH.command('docker-compose --file ci-docker-compose.yml config --services | sed -e "s@^@service=@"', '\$', 2)
result = re.search('service=(?P<svc_name>[a-zA-Z0-9\_]+)', mySSH.getBefore())
if result is not None:
svcName = result.group('svc_name')
mySSH.command('docker-compose --file ci-docker-compose.yml up -d ' + svcName, '\$', 2)
# Checking Status
mySSH.command('docker-compose --file ci-docker-compose.yml config', '\$', 5)
result = re.search('container_name: (?P<container_name>[a-zA-Z0-9\-\_]+)', mySSH.getBefore())
unhealthyNb = 0
healthyNb = 0
startingNb = 0
containerName = ''
if result is not None:
containerName = result.group('container_name')
time.sleep(5)
cnt = 0
while (cnt < 3):
mySSH.command('docker inspect --format=\'{{.State.Health.Status}}\' ' + containerName, '\$', 5)
unhealthyNb = mySSH.getBefore().count('unhealthy')
healthyNb = mySSH.getBefore().count('healthy') - unhealthyNb
startingNb = mySSH.getBefore().count('starting')
if healthyNb == 1:
cnt = 10
else:
time.sleep(10)
cnt += 1
logging.debug(' -- ' + str(healthyNb) + ' healthy container(s)')
logging.debug(' -- ' + str(unhealthyNb) + ' unhealthy container(s)')
logging.debug(' -- ' + str(startingNb) + ' still starting container(s)')
status = False
if healthyNb == 1:
cnt = 0
while (cnt < 20):
mySSH.command('docker logs ' + containerName + ' | egrep --text --color=never -i "wait|sync|Starting"', '\$', 30)
result = re.search('got sync|Starting F1AP at CU', mySSH.getBefore())
if result is None:
time.sleep(6)
cnt += 1
else:
cnt = 100
status = True
logging.info('\u001B[1m Deploying OAI object Pass\u001B[0m')
time.sleep(10)
mySSH.close()
self.testCase_id = HTML.testCase_id
self.eNB_logFile[self.eNB_instance] = 'enb_' + self.testCase_id + '.log'
if status:
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
else:
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.ALL_PROCESSES_OK)
def UndeployObject(self, HTML, RAN):
if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = <PASSWORD>.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('\u001B[1m Deploying OAI Object on server: ' + lIpAddr + '\u001B[0m')
mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord)
mySSH.command('cd ' + lSourcePath + '/' + self.yamlPath[self.eNB_instance], '\$', 5)
# Currently support only one
mySSH.command('docker-compose --file ci-docker-compose.yml config', '\$', 5)
result = re.search('container_name: (?P<container_name>[a-zA-Z0-9\-\_]+)', mySSH.getBefore())
if result is not None:
containerName = result.group('container_name')
mySSH.command('docker kill --signal INT ' + containerName, '\$', 30)
time.sleep(5)
mySSH.command('docker logs ' + containerName + ' > ' + lSourcePath + '/cmake_targets/' + self.eNB_logFile[self.eNB_instance], '\$', 30)
mySSH.command('docker rm -f ' + containerName, '\$', 30)
# Putting the CPUs back in a idle state, we do that only on a few servers
mySSH.command('hostname', '\$', 5)
result = re.search('obelix|asterix', mySSH.getBefore())
if result is not None:
mySSH.command('if command -v cpupower &> /dev/null; then echo ' + lPassWord + ' | sudo -S cpupower idle-set -E; fi', '\$', 5)
mySSH.close()
# Analyzing log file!
copyin_res = mySSH.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/' + self.eNB_logFile[self.eNB_instance], '.')
nodeB_prefix = 'e'
if (copyin_res == -1):
HTML.htmleNBFailureMsg='Could not copy ' + nodeB_prefix + 'NB logfile to analyze it!'
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.ENB_PROCESS_NOLOGFILE_TO_ANALYZE)
else:
logging.debug('\u001B[1m Analyzing ' + nodeB_prefix + 'NB logfile \u001B[0m ' + self.eNB_logFile[self.eNB_instance])
logStatus = RAN.AnalyzeLogFile_eNB(self.eNB_logFile[self.eNB_instance], HTML)
if (logStatus < 0):
HTML.CreateHtmlTestRow(RAN.runtime_stats, 'KO', logStatus)
else:
HTML.CreateHtmlTestRow(RAN.runtime_stats, 'OK', CONST.ALL_PROCESSES_OK)
logging.info('\u001B[1m Undeploying OAI Object Pass\u001B[0m')
def DeployGenObject(self, HTML):
self.exitStatus = 0
logging.info('\u001B[1m Checking Services to deploy\u001B[0m')
cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose config --services'
logging.debug(cmd)
try:
listServices = subprocess.check_output(cmd, shell=True, universal_newlines=True)
except Exception as e:
self.exitStatus = 1
HTML.CreateHtmlTestRow('SVC not Found', 'KO', CONST.ALL_PROCESSES_OK)
return
for reqSvc in self.services[0].split(' '):
res = re.search(reqSvc, listServices)
if res is None:
logging.error(reqSvc + ' not found in specified docker-compose')
self.exitStatus = 1
if (self.exitStatus == 1):
HTML.CreateHtmlTestRow('SVC not Found', 'KO', CONST.ALL_PROCESSES_OK)
return
if (self.ranAllowMerge):
cmd = 'cd ' + self.yamlPath[0] + ' && sed -e "s@develop@ci-temp@" docker-compose.y*ml > docker-compose-ci.yml'
else:
cmd = 'cd ' + self.yamlPath[0] + ' && sed -e "s@develop@develop@" docker-compose.y*ml > docker-compose-ci.yml'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose -f docker-compose-ci.yml up -d ' + self.services[0]
logging.debug(cmd)
try:
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=30)
except Exception as e:
self.exitStatus = 1
logging.error('Could not deploy')
HTML.CreateHtmlTestRow('Could not deploy', 'KO', CONST.ALL_PROCESSES_OK)
return
logging.info('\u001B[1m Checking if all deployed healthy\u001B[0m')
cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose -f docker-compose-ci.yml ps -a'
count = 0
healthy = 0
while (count < 10):
count += 1
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10)
healthy = 0
for state in deployStatus.split('\n'):
res = re.search('Up \(healthy\)', state)
if res is not None:
healthy += 1
if healthy == self.nb_healthy[0]:
count = 100
else:
time.sleep(10)
# HACK TO REMOVE LATER WHEN FIX
res = re.search('oai-nr-ue', self.services[0])
if res is not None:
cmd = 'docker exec rfsim5g-oai-nr-ue /bin/bash -c "ip route del default"'
logging.debug(cmd)
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10)
cmd = 'docker exec rfsim5g-oai-nr-ue /bin/bash -c "ip route del 172.16.58.3/24"'
logging.debug(cmd)
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10)
cmd = 'docker exec rfsim5g-oai-nr-ue /bin/bash -c "ip route add default via 172.16.31.10 dev oaitun_ue1"'
logging.debug(cmd)
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10)
# END OF HACK TO REMOVE LATER WHEN FIX
if count == 100 and healthy == self.nb_healthy[0]:
HTML.CreateHtmlTestRow('n/a', 'OK', CONST.ALL_PROCESSES_OK)
logging.info('\u001B[1m Deploying OAI Object(s) PASS\u001B[0m')
else:
self.exitStatus = 1
HTML.CreateHtmlTestRow('Could not deploy in time', 'KO', CONST.ALL_PROCESSES_OK)
logging.error('\u001B[1m Deploying OAI Object(s) FAILED\u001B[0m')
def UndeployGenObject(self, HTML):
self.exitStatus = 0
if (self.ranAllowMerge):
cmd = 'cd ' + self.yamlPath[0] + ' && sed -e "s@develop@ci-temp@" docker-compose.y*ml > docker-compose-ci.yml'
else:
cmd = 'cd ' + self.yamlPath[0] + ' && sed -e "s@develop@develop@" docker-compose.y*ml > docker-compose-ci.yml'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
# if the containers are running, recover the logs!
cmd = 'cd ' + self.yamlPath[0] + ' && docker-compose -f docker-compose-ci.yml ps --all'
logging.debug(cmd)
deployStatus = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True, timeout=10)
anyLogs = False
for state in deployStatus.split('\n'):
res = re.search('Name|----------', state)
if res is not None:
continue
if len(state) == 0:
continue
res = re.search('^(?P<container_name>[a-zA-Z0-9\-\_]+) ', state)
if res is not None:
anyLogs = True
cName = | |
<gh_stars>0
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for making API requests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
import json
from googlecloudsdk.api_lib.compute import batch_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.compute import waiters
from googlecloudsdk.core import log
import six
from six.moves import zip # pylint: disable=redefined-builtin
def _RequestsAreListRequests(requests):
list_requests = [
method in ('List', 'AggregatedList') for _, method, _ in requests
]
if all(list_requests):
return True
elif not any(list_requests):
return False
else:
raise ValueError(
'All requests must be either list requests or non-list requests.')
def _HandleJsonList(response, service, method, errors):
"""Extracts data from one *List response page as JSON and stores in dicts.
Args:
response: str, The *List response in JSON
service: The service which responded to *List request
method: str, Method used to list resources. One of 'List' or
'AggregatedList'.
errors: list, Errors from response will be appended to this list.
Returns:
Pair of:
- List of items returned in response as dicts
- Next page token (if present, otherwise None).
"""
items = []
response = json.loads(response)
# If the request is a list call, then yield the items directly.
if method == 'List':
items = response.get('items', [])
# If the request is an aggregatedList call, then do all the
# magic necessary to get the actual resources because the
# aggregatedList responses are very complicated data
# structures...
elif method == 'AggregatedList':
items_field_name = service.GetMethodConfig(
'AggregatedList').relative_path.split('/')[-1]
for scope_result in six.itervalues(response['items']):
# If the given scope is unreachable, record the warning
# message in the errors list.
warning = scope_result.get('warning', None)
if warning and warning['code'] == 'UNREACHABLE':
errors.append((None, warning['message']))
items.extend(scope_result.get(items_field_name, []))
return items, response.get('nextPageToken', None)
def _HandleMessageList(response, service, method, errors):
"""Extracts data from one *List response page as Message object."""
items = []
# If the request is a list call, then yield the items directly.
if method == 'List':
items = response.items
# If the request is an aggregatedList call, then do all the
# magic necessary to get the actual resources because the
# aggregatedList responses are very complicated data
# structures...
else:
items_field_name = service.GetMethodConfig(
'AggregatedList').relative_path.split('/')[-1]
for scope_result in response.items.additionalProperties:
# If the given scope is unreachable, record the warning
# message in the errors list.
warning = scope_result.value.warning
if warning and warning.code == warning.CodeValueValuesEnum.UNREACHABLE:
errors.append((None, warning.message))
items.extend(getattr(scope_result.value, items_field_name))
return items, response.nextPageToken
def _ListCore(requests, http, batch_url, errors, response_handler):
"""Makes a series of list and/or aggregatedList batch requests.
Args:
requests: A list of requests to make. Each element must be a 3-element tuple
where the first element is the service, the second element is the method
('List' or 'AggregatedList'), and the third element is a protocol buffer
representing either a list or aggregatedList request.
http: An httplib2.Http-like object.
batch_url: The handler for making batch requests.
errors: A list for capturing errors. If any response contains an error, it
is added to this list.
response_handler: The function to extract information responses.
Yields:
Resources encapsulated in format chosen by response_handler as they are
received from the server.
"""
while requests:
responses, request_errors = batch_helper.MakeRequests(
requests=requests, http=http, batch_url=batch_url)
errors.extend(request_errors)
new_requests = []
for i, response in enumerate(responses):
if not response:
continue
service, method, request_protobuf = requests[i]
items, next_page_token = response_handler(response, service, method,
errors)
for item in items:
yield item
if next_page_token:
new_request_protobuf = copy.deepcopy(request_protobuf)
new_request_protobuf.pageToken = next_page_token
new_requests.append((service, method, new_request_protobuf))
requests = new_requests
def _List(requests, http, batch_url, errors):
"""Makes a series of list and/or aggregatedList batch requests.
Args:
requests: A list of requests to make. Each element must be a 3-element tuple
where the first element is the service, the second element is the method
('List' or 'AggregatedList'), and the third element is a protocol buffer
representing either a list or aggregatedList request.
http: An httplib2.Http-like object.
batch_url: The handler for making batch requests.
errors: A list for capturing errors. If any response contains an error, it
is added to this list.
Returns:
Resources encapsulated as protocol buffers as they are received
from the server.
"""
return _ListCore(requests, http, batch_url, errors, _HandleMessageList)
def ListJson(requests, http, batch_url, errors):
"""Makes a series of list and/or aggregatedList batch requests.
This function does all of:
- Sends batch of List/AggragatedList requests
- Extracts items from responses
- Handles pagination
All requests must be sent to the same client - Compute.
Args:
requests: A list of requests to make. Each element must be a 3-element tuple
where the first element is the service, the second element is the method
('List' or 'AggregatedList'), and the third element is a protocol buffer
representing either a list or aggregatedList request.
http: An httplib2.Http-like object.
batch_url: The handler for making batch requests.
errors: A list for capturing errors. If any response contains an error, it
is added to this list.
Yields:
Resources in dicts as they are received from the server.
"""
# This is compute-specific helper. It is assumed at this point that all
# requests are being sent to the same client (for example Compute).
with requests[0][0].client.JsonResponseModel():
for item in _ListCore(requests, http, batch_url, errors, _HandleJsonList):
yield item
def MakeRequests(requests,
http,
batch_url,
errors,
progress_tracker=None,
followup_overrides=None):
"""Makes one or more requests to the API.
Each request can be either a synchronous API call or an asynchronous
one. For synchronous calls (e.g., get and list), the result from the
server is yielded immediately. For asynchronous calls (e.g., calls
that return operations like insert), this function waits until the
operation reaches the DONE state and fetches the corresponding
object and yields that object (nothing is yielded for deletions).
Currently, a heterogenous set of synchronous calls can be made
(e.g., get request to fetch a disk and instance), however, the
asynchronous requests must be homogenous (e.g., they must all be the
same verb on the same collection). In the future, heterogenous
asynchronous requests will be supported. For now, it is up to the
client to ensure that the asynchronous requests are
homogenous. Synchronous and asynchronous requests can be mixed.
Args:
requests: A list of requests to make. Each element must be a 3-element tuple
where the first element is the service, the second element is the string
name of the method on the service, and the last element is a protocol
buffer representing the request.
http: An httplib2.Http-like object.
batch_url: The handler for making batch requests.
errors: A list for capturing errors. If any response contains an error, it
is added to this list.
progress_tracker: progress tracker to be ticked while waiting for operations
to finish.
followup_overrides: A list of new resource names to GET once the operation
finishes. Generally used in renaming calls.
Yields:
A response for each request. For deletion requests, no corresponding
responses are returned.
"""
if _RequestsAreListRequests(requests):
for item in _List(
requests=requests, http=http, batch_url=batch_url, errors=errors):
yield item
return
responses, new_errors = batch_helper.MakeRequests(
requests=requests, http=http, batch_url=batch_url)
errors.extend(new_errors)
operation_service = None
resource_service = None
# Collects all operation objects in a list so they can be waited on
# and yields all non-operation objects since non-operation responses
# cannot be waited on.
operations_data = []
if not followup_overrides:
followup_overrides = [None for _ in requests]
for request, response, followup_override in zip(requests, responses,
followup_overrides):
if response is None:
continue
service, _, request_body = request
if (isinstance(response, service.client.MESSAGES_MODULE.Operation) and
service.__class__.__name__ not in (
'GlobalOperationsService', 'RegionOperationsService',
'ZoneOperationsService', 'GlobalOrganizationOperationsService',
'GlobalAccountsOperationsService')):
resource_service = service
project = None
if hasattr(request_body, 'project'):
project = request_body.project
if response.zone:
operation_service = service.client.zoneOperations
elif response.region:
operation_service = service.client.regionOperations
| |
<filename>vlcgan/miscc/utils.py<gh_stars>1-10
import os
import errno
import numpy as np
import PIL
from copy import deepcopy
from .config import cfg
import pdb
from torch.nn import init
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils as vutils
import torchvision.transforms as transforms
from torch.autograd import Variable
from tqdm import tqdm
def func_attention(query, context, gamma1):
"""
query: batch x ndf x queryL
context: batch x ndf x ih x iw (sourceL=ihxiw)
mask: batch_size x sourceL
"""
batch_size, queryL = query.size(0), query.size(2)
ih, iw = context.size(2), context.size(3)
sourceL = ih * iw
# print(context.shape)
# --> batch x sourceL x ndf
context = context.view(batch_size, -1, sourceL)
contextT = torch.transpose(context, 1, 2).contiguous()
# print(context.shape, query.shape)
# Get attention
# (batch x sourceL x ndf)(batch x ndf x queryL)
# -->batch x sourceL x queryL
attn = torch.bmm(contextT, query)
# --> batch*sourceL x queryL
attn = attn.view(batch_size*sourceL, queryL)
attn = nn.Softmax(dim=-1)(attn) # Eq. (8)
# --> batch x sourceL x queryL
attn = attn.view(batch_size, sourceL, queryL)
# --> batch*queryL x sourceL
attn = torch.transpose(attn, 1, 2).contiguous()
attn = attn.view(batch_size*queryL, sourceL)
attn = attn * gamma1
attn = nn.Softmax(dim=-1)(attn)
attn = attn.view(batch_size, queryL, sourceL)
# --> batch x sourceL x queryL
attnT = torch.transpose(attn, 1, 2).contiguous()
# (batch x ndf x sourceL)(batch x sourceL x queryL)
# --> batch x ndf x queryL
weightedContext = torch.bmm(context, attnT)
return weightedContext, attn.view(batch_size, -1, ih, iw)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
#############################
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
# TODO: Masked cross-entropy loss
def compute_dual_discriminator_loss(netD, img_features, real_captions, gpus, mode='image'):
if len(gpus) > 1:
netD = torch.nn.DataParallel(netD)
real_input_ids, real_masks = real_captions
if mode == 'story':
batch_size = img_features.size(0)
story_len = img_features.size(1)
mask = torch.ones(batch_size, story_len)
if len(gpus) > 0:
mask = mask.cuda()
outputs = netD.get_captions(img_features, mask, real_input_ids)
else:
outputs = netD.get_captions(img_features, real_input_ids)
# print(outputs.shape)
# print(real_captions.shape)
criterion = nn.CrossEntropyLoss(ignore_index=1)
# criterion = LabelSmoothingLoss(outputs.size(-1), 0.1)
errDual = criterion(outputs, real_input_ids.view(-1))
return errDual
def compute_dual_densecap_loss(netDual, imgs, targets, gpus, cfg, mode='image', ):
transform = transforms.Compose([
transforms.Resize((480, 640)),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
print(mode)
print(imgs.shape)
print({k:v.shape for k,v in targets.items()})
if len(gpus) > 1:
netDual = torch.nn.DataParallel(netDual)
if mode == 'image':
bs, n_channels, h, w = imgs.size()
imgs = [F.relu(imgs[i].clone(), inplace=True) for i in range(bs)]
imgs_ = [transform(imgs[i].clone()) for i in range(bs)]
imgs_ = [F.relu(imgs_[i], inplace=True) for i in range(bs)]
targets_ = [{'boxes': b, 'caps': c, 'caps_len': c_len} for b, c, c_len in zip(targets['boxes'], targets['caps'], targets['caps_len'])]
# targets_ = [{'boxes': b, 'caps': c, 'caps_len': c_len} for i in zip(targets['boxes'], targets['caps'], targets['caps_len'])]
elif mode == 'story':
bs, n_channel, video_len, h, w = imgs.size()
imgs = imgs.permute(0, 2, 1, 3, 4).view(bs*video_len, n_channel, h, w)
imgs_ = [transform(img) for img in imgs]
targets_ = []
else:
raise ValueError
#
# imgs = imgs.view(-1, imgs.size(-3), imgs.size(-2), imgs.size(-1))
# targets['boxes'] = targets['boxes'].view(-1, targets['boxes'].size)
# targets['boxes'] = targets['boxes'].view(-1, targets['boxes'].size)
# targets['boxes'] = targets['boxes'].view(-1, targets['boxes'].size)
losses = netDual(imgs_, targets_)
detect_loss = losses['loss_objectness'] + losses['loss_rpn_box_reg'] + \
losses['loss_classifier'] + losses['loss_box_reg']
caption_loss = losses['loss_caption']
errDual = cfg.DENSECAP.detect_loss_weight*detect_loss + cfg.DENSECAP.caption_loss_weight*caption_loss
loss_report = {
mode + ' Object Detection Loss --> ': detect_loss.data.item(),
mode + ' Dense Caption Loss --> ': caption_loss.data.item(),
}
return errDual, loss_report
def compute_contrastive_loss(netD, img_features, text_features, gpus):
loss_fct = nn.CrossEntropyLoss()
# print(img_features.shape, text_features.shape)
temp = netD.img_pooler(img_features).squeeze()
# print(temp.shape)
I_f = netD.img_joint_fc(temp)
T_f = netD.txt_joint_fc(text_features)
# normalized features
img_projections = I_f / I_f.norm(dim=-1, keepdim=True)
text_projections = T_f / T_f.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = netD.logit_scale.exp()
logits_per_image = logit_scale * img_projections @ text_projections.t()
bs = img_features.shape[0]
labels = torch.arange(bs)
if gpus:
labels = labels.cuda()
loss_i = loss_fct(logits_per_image, labels)
loss_t = loss_fct(logits_per_image.t(), labels)
loss = (loss_i + loss_t)/2
return loss
def compute_discriminator_loss(netD, real_imgs, fake_imgs,
real_labels, fake_labels, real_catelabels,
conditions, gpus, mode='image',
dual=False,
real_captions=None,
contrastive=False,
real_caption_embeds=None):
criterion = nn.BCELoss()
cate_criterion =nn.MultiLabelSoftMarginLoss()
batch_size = real_imgs.size(0)
cond = conditions.detach()
fake = fake_imgs.detach()
real_features = nn.parallel.data_parallel(netD, (real_imgs), gpus)
fake_features = nn.parallel.data_parallel(netD, (fake), gpus)
if mode == 'story':
real_features_st = real_features
fake_features = fake_features.mean(1).squeeze()
real_features = real_features.mean(1).squeeze()
# real pairs
inputs = (real_features, cond)
real_logits = nn.parallel.data_parallel(netD.get_cond_logits, inputs, gpus)
errD_real = criterion(real_logits, real_labels)
# wrong pairs
inputs = (real_features[:(batch_size-1)], cond[1:])
wrong_logits = \
nn.parallel.data_parallel(netD.get_cond_logits, inputs, gpus)
errD_wrong = criterion(wrong_logits, fake_labels[1:])
# fake pairs
inputs = (fake_features, cond)
fake_logits = nn.parallel.data_parallel(netD.get_cond_logits, inputs, gpus)
errD_fake = criterion(fake_logits, fake_labels)
if netD.get_uncond_logits is not None:
real_logits = \
nn.parallel.data_parallel(netD.get_uncond_logits,
(real_features), gpus)
fake_logits = \
nn.parallel.data_parallel(netD.get_uncond_logits,
(fake_features), gpus)
uncond_errD_real = criterion(real_logits, real_labels)
uncond_errD_fake = criterion(fake_logits, fake_labels)
#
errD = ((errD_real + uncond_errD_real) / 2. +
(errD_fake + errD_wrong + uncond_errD_fake) / 3.)
errD_real = (errD_real + uncond_errD_real) / 2.
errD_fake = (errD_fake + uncond_errD_fake) / 2.
else:
errD = errD_real + (errD_fake + errD_wrong) * 0.5
loss_report = {
mode + ' Fake/Real Discriminator Loss (Real pairs) --> ': errD_real.data.item(),
mode + ' Fake/Real Discriminator Loss (Wrong pairs) --> ': errD_wrong.data.item(),
mode + 'Fake/Real Discriminator Loss (Fake pairs) --> ': errD_fake.data.item(),
}
# if dual:
# if mode == 'story':
# errDual = compute_dual_discriminator_loss(netD, real_features_st, real_captions, gpus, mode=mode)
# else:
# errDual = compute_dual_discriminator_loss(netD, real_features, real_captions, gpus, mode=mode)
# # TODO: Add a weight for ErrDual
# errD = errD + errDual
# loss_report[mode + ' Dual Discriminator Loss --> '] = errDual.data.item()
if contrastive:
if mode == "story":
vid_len, embed_dim = real_caption_embeds.shape[1], real_caption_embeds.shape[2]
real_caption_embeds = real_caption_embeds.view(-1, vid_len*embed_dim)
errContrast = compute_contrastive_loss(netD, real_features, real_caption_embeds, gpus)
loss_report[mode + ' Constrastive Discriminator Loss --> '] = errContrast.data.item()
errD = errD + errContrast
if netD.cate_classify is not None:
# print('Real features shape', real_features.shape)
cate_logits = nn.parallel.data_parallel(netD.cate_classify, real_features, gpus)
# print('Categorical logits shape', cate_logits.shape)
cate_logits = cate_logits.squeeze()
errD = errD + 1.0 * cate_criterion(cate_logits, real_catelabels)
acc = get_multi_acc(cate_logits.cpu().data.numpy(), real_catelabels.cpu().data.numpy())
loss_report[mode + ' Character Classifier Accuracy (Discriminator) --> '] = acc
return errD, loss_report
def compute_generator_loss(netD, fake_imgs, real_labels, fake_catelabels, conditions, gpus, mode='image',
dual=False, real_captions=None, contrastive=False, fake_caption_embeds=None):
criterion = nn.BCELoss()
cate_criterion =nn.MultiLabelSoftMarginLoss()
cond = conditions.detach()
fake_features = nn.parallel.data_parallel(netD, (fake_imgs), gpus)
if mode == 'story':
fake_features_st = fake_features
fake_features = torch.mean(fake_features, dim=1).squeeze()
# fake pairs
inputs = (fake_features, cond)
fake_logits = nn.parallel.data_parallel(netD.get_cond_logits, inputs, gpus)
errD_fake = criterion(fake_logits, real_labels)
if netD.get_uncond_logits is not None:
fake_logits = \
nn.parallel.data_parallel(netD.get_uncond_logits,
(fake_features), gpus)
uncond_errD_fake = criterion(fake_logits, real_labels)
errD_fake += uncond_errD_fake
loss_report = {
mode + ' Fake/Real Generator Loss (Fake pairs) --> ': errD_fake.data.item(),
}
if netD.cate_classify is not None:
# print('Fake features shape', fake_features.shape)
cate_logits = nn.parallel.data_parallel(netD.cate_classify, fake_features, gpus)
cate_logits = cate_logits.mean(dim=-1).mean(dim=-1)
cate_logits = cate_logits.squeeze()
# print(cate_logits.shape, fake_catelabels.shape)
errD_fake = errD_fake + 1.0 * cate_criterion(cate_logits, fake_catelabels)
acc = get_multi_acc(cate_logits.cpu().data.numpy(), fake_catelabels.cpu().data.numpy())
loss_report[mode + ' Character Classifier Accuracy (Generator) --> '] = acc
# TODO: Add weight for dual learning loss
if dual:
if mode == 'story':
errDual = compute_dual_discriminator_loss(netD, fake_imgs, real_captions, gpus, mode=mode)
else:
errDual = compute_dual_discriminator_loss(netD, fake_imgs, real_captions, gpus, mode=mode)
errD_fake = errD_fake + errDual
loss_report[mode + ' Dual Generator Loss --> '] = errDual.data.item()
if contrastive:
if mode == "story":
vid_len, embed_dim = fake_caption_embeds.shape[1], fake_caption_embeds.shape[2]
fake_caption_embeds = fake_caption_embeds.view(-1, vid_len*embed_dim)
errContrast_fake = compute_contrastive_loss(netD, fake_features, fake_caption_embeds, gpus)
loss_report[mode + ' Constrastive Generator Loss --> '] = errContrast_fake.data.item()
errD_fake = errD_fake + errContrast_fake
return errD_fake, loss_report
#############################
def weights_init(module):
initializer_range = 0.02
""" Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
else:
classname = module.__class__.__name__
if classname == "MyInceptionFeatureExtractor":
pass
elif classname == 'BertLayerNorm':
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif classname.find('Conv') != -1:
module.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
module.weight.data.normal_(1.0, 0.02)
module.bias.data.fill_(0)
elif classname.find('Linear') != -1:
module.weight.data.normal_(0.0, 0.02)
if module.bias is not None:
module.bias.data.fill_(0.0)
#############################
def save_img_results(data_img, fake, texts, epoch, image_dir):
num = cfg.VIS_COUNT
fake = fake[0:num]
# data_img is changed to [0,1]
if data_img is not | |
#
# create.py
#
# Python interface for the iRobot Create
# Licensed as Artistic License
#
# <NAME> <EMAIL>
# updated for SIGCSE 3/9/07
#
# modified by <NAME> Oct 20 2007
#
# modified by <NAME> Dec 9 2009
# 1. Two new functions (senseFunc and sleepTill).
# 2. getPose fixed (call to r.sensors([POSE]) added to stop()).
# 3. move() changed to accept parameters in centimeters instead of milimeters
# for consistency with printSensors/getPose.
#
# modified by <NAME> Feb 2016
# 1. Added support for dirt sensor, encoders, and light bump.
# 2. Adjusted the size of the Roomba in WHEEL_SPAN
# 3. getPose seems to be broken.
import serial
import math
import time
import thread
import threading
# some module-level definitions for the robot commands
START = chr(128) # already converted to bytes...
BAUD = chr(129) # + 1 byte
CONTROL = chr(130) # deprecated for Create
SAFE = chr(131)
FULL = chr(132)
POWER = chr(133)
SPOT = chr(134) # Same for the Roomba and Create
CLEAN = chr(135) # Clean button - Roomba
COVER = chr(135) # Cover demo - Create
MAX = chr(136) # Roomba
DEMO = chr(136) # Create
DRIVE = chr(137) # + 4 bytes
MOTORS = chr(138) # + 1 byte
LEDS = chr(139) # + 3 bytes
SONG = chr(140) # + 2N+2 bytes, where N is the number of notes
PLAY = chr(141) # + 1 byte
SENSORS = chr(142) # + 1 byte
FORCESEEKINGDOCK = chr(143) # same on Roomba and Create
# the above command is called "Cover and Dock" on the Create
DRIVEDIRECT = chr(145) # Create only
STREAM = chr(148) # Create only
QUERYLIST = chr(149) # Create only
PAUSERESUME = chr(150) # Create only
#### Sean
SCRIPT = chr(152)
ENDSCRIPT = chr(153)
WAITDIST = chr(156)
WAITANGLE = chr(157)
# the four SCI modes
# the code will try to keep track of which mode the system is in,
# but this might not be 100% trivial...
OFF_MODE = 0
PASSIVE_MODE = 1
SAFE_MODE = 2
FULL_MODE = 3
# the sensors
BUMPS_AND_WHEEL_DROPS = 7
WALL_IR_SENSOR = 8
CLIFF_LEFT = 9
CLIFF_FRONT_LEFT = 10
CLIFF_FRONT_RIGHT = 11
CLIFF_RIGHT = 12
VIRTUAL_WALL = 13
LSD_AND_OVERCURRENTS = 14
DIRT_DETECTED = 15
INFRARED_BYTE = 17
BUTTONS = 18
DISTANCE = 19
ANGLE = 20
CHARGING_STATE = 21
VOLTAGE = 22
CURRENT = 23
BATTERY_TEMP = 24
BATTERY_CHARGE = 25
BATTERY_CAPACITY = 26
WALL_SIGNAL = 27
CLIFF_LEFT_SIGNAL = 28
CLIFF_FRONT_LEFT_SIGNAL = 29
CLIFF_FRONT_RIGHT_SIGNAL = 30
CLIFF_RIGHT_SIGNAL = 31
CARGO_BAY_DIGITAL_INPUTS = 32
CARGO_BAY_ANALOG_SIGNAL = 33
CHARGING_SOURCES_AVAILABLE = 34
OI_MODE = 35
SONG_NUMBER = 36
SONG_PLAYING = 37
NUM_STREAM_PACKETS = 38
REQUESTED_VELOCITY = 39
REQUESTED_RADIUS = 40
REQUESTED_RIGHT_VELOCITY = 41
REQUESTED_LEFT_VELOCITY = 42
ENCODER_LEFT = 43
ENCODER_RIGHT = 44
LIGHTBUMP = 45
LIGHTBUMP_LEFT = 46
LIGHTBUMP_FRONT_LEFT = 47
LIGHTBUMP_CENTER_LEFT = 48
LIGHTBUMP_CENTER_RIGHT = 49
LIGHTBUMP_FRONT_RIGHT = 50
LIGHTBUMP_RIGHT = 51
# others just for easy access to particular parts of the data
POSE = 100
LEFT_BUMP = 101
RIGHT_BUMP = 102
LEFT_WHEEL_DROP = 103
RIGHT_WHEEL_DROP = 104
CENTER_WHEEL_DROP = 105
LEFT_WHEEL_OVERCURRENT = 106
RIGHT_WHEEL_OVERCURRENT = 107
ADVANCE_BUTTON = 108
PLAY_BUTTON = 109
# 0 1 2 3 4 5 6 7 8 9101112131415161718192021222324252627282930313233343536373839404142434445464748495051
SENSOR_DATA_WIDTH = [0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,2,2,1,2,2,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,2,2,2,2,2,2,1,2,2,2,2,2,2]
#The original value was 258.0 but my roomba has 235.0
WHEEL_SPAN = 235.0
WHEEL_DIAMETER = 72.0
TICK_PER_REVOLUTION = 508.8 # original 508.8
TICK_PER_MM = TICK_PER_REVOLUTION/(math.pi*WHEEL_DIAMETER)
# on my floor, a full turn is measured as sth like 450 deg
# add an error to the computation to account for that.
ANGULAR_ERROR = 360.0/450.0
# for printing the SCI modes
def modeStr( mode ):
""" prints a string representing the input SCI mode """
if mode == OFF_MODE: return 'OFF_MODE'
if mode == PASSIVE_MODE: return 'PASSIVE_MODE'
if mode == SAFE_MODE: return 'SAFE_MODE'
if mode == FULL_MODE: return 'FULL_MODE'
print 'Warning: unknown mode', mode, 'seen in modeStr'
return 'UNKNOWN_MODE'
#
# some module-level functions for dealing with bits and bytes
#
def _bytesOfR( r ):
""" for looking at the raw bytes of a sensor reply, r """
print 'raw r is', r
for i in range(len(r)):
print 'byte', i, 'is', ord(r[i])
print 'finished with formatR'
def _bitOfByte( bit, byte ):
""" returns a 0 or 1: the value of the 'bit' of 'byte' """
if bit < 0 or bit > 7:
print 'Your bit of', bit, 'is out of range (0-7)'
print 'returning 0'
return 0
return ((byte >> bit) & 0x01)
def _toBinary( val, numbits ):
""" prints numbits digits of val in binary """
if numbits == 0: return
_toBinary( val>>1 , numbits-1 )
print (val & 0x01), # print least significant bit
def _fromBinary( s ):
""" s is a string of 0's and 1's """
if s == '': return 0
lowbit = ord(s[-1]) - ord('0')
return lowbit + 2*_fromBinary( s[:-1] )
def _twosComplementInt1byte( byte ):
""" returns an int of the same value of the input
int (a byte), but interpreted in two's
complement
the output range should be -128 to 127
"""
# take everything except the top bit
topbit = _bitOfByte( 7, byte )
lowerbits = byte & 127
if topbit == 1:
return lowerbits - (1 << 7)
else:
return lowerbits
def _twosComplementInt2bytes( highByte, lowByte ):
""" returns an int which has the same value
as the twosComplement value stored in
the two bytes passed in
the output range should be -32768 to 32767
chars or ints can be input, both will be
truncated to 8 bits
"""
# take everything except the top bit
topbit = _bitOfByte( 7, highByte )
lowerbits = highByte & 127
unsignedInt = lowerbits << 8 | (lowByte & 0xFF)
if topbit == 1:
# with sufficient thought, I've convinced
# myself of this... we'll see, I suppose.
return unsignedInt - (1 << 15)
else:
return unsignedInt
def _toTwosComplement2Bytes( value ):
""" returns two bytes (ints) in high, low order
whose bits form the input value when interpreted in
two's complement
"""
# if positive or zero, it's OK
if value >= 0:
eqBitVal = value
# if it's negative, I think it is this
else:
eqBitVal = (1<<16) + value
return ( (eqBitVal >> 8) & 0xFF, eqBitVal & 0xFF )
#
# this class represents a snapshot of the robot's data
#
class SensorFrame:
""" the sensorFrame class is really a struct whose
fields are filled in by sensorStatus
"""
def __init__(self):
""" constructor -- set all fields to 0
"""
self.casterDrop = 0
self.leftWheelDrop = 0
self.rightWheelDrop = 0
self.leftBump = 0
self.rightBump = 0
self.wallSensor = 0
self.leftCliff = 0
self.frontLeftCliff = 0
self.frontRightCliff = 0
self.rightCliff = 0
self.virtualWall = 0
self.driveLeft = 0
self.driveRight = 0
self.mainBrush = 0
self.vacuum = 0
self.sideBrush = 0
self.leftDirt = 0
self.rightDirt = 0
self.remoteControlCommand = 0
self.powerButton = 0
self.spotButton = 0
self.cleanButton = 0
self.maxButton = 0
self.distance = 0
self.rawAngle = 0
self.angleInRadians = 0
self.chargingState = 0
self.voltage = 0
self.current = 0
self.temperature = 0
self.charge = 0
self.capacity = 0
self.lightBumpLeft = 0
self.lightBumpFrontLeft = 0
self.lightCenterLeft = 0
self.lightCenterRight = 0
self.lightBumpFrontRight = 0
self.lightBumpRight = 0
self.dirt = 0
def __str__(self):
""" returns a string with the information
from this SensorFrame
"""
# there's probably a more efficient way to do this...
# perhaps just making it all + instead of the separate
# += would be more efficient
#
# actually, we should make a list and call ''.join(list)
# not that we will...
#
s = ''
s += 'casterDrop: ' + str(self.casterDrop) + '\n'
s += 'leftWheelDrop: ' + str(self.leftWheelDrop) + '\n'
s += 'rightWheelDrop: ' + str(self.rightWheelDrop) + '\n'
s += 'leftBump: ' + str(self.leftBump) + '\n'
s += 'rightBump: ' + str(self.rightBump) + '\n'
s += 'wallSensor: ' + str(self.wallSensor) + '\n'
s += 'leftCliff: ' + str(self.leftCliff) + '\n'
s += 'frontLeftCliff: ' + str(self.frontLeftCliff) + '\n'
s += 'frontRightCliff: ' + str(self.frontRightCliff) + '\n'
s += 'rightCliff: ' + str(self.rightCliff) + '\n'
s += 'virtualWall: ' + str(self.virtualWall) + '\n'
s += 'driveLeft: ' + str(self.driveLeft) + '\n'
s += 'driveRight: ' + str(self.driveRight) + '\n'
s += 'mainBrush: ' + str(self.mainBrush) + '\n'
s += 'vacuum: ' + str(self.vacuum) + '\n'
s += 'sideBrush: ' + str(self.sideBrush) + '\n'
s += 'leftDirt: ' + str(self.leftDirt) + '\n'
s += 'rightDirt: ' + str(self.rightDirt) + '\n'
s += 'remoteControlCommand: ' + str(self.remoteControlCommand) + '\n'
s += 'powerButton: ' + str(self.powerButton) + '\n'
s += 'spotButton: ' + str(self.spotButton) + '\n'
s += | |
1,
}
self.assertEqual(expect, ctxt())
def test_zeromq_context_unrelated(self):
self.is_relation_made.return_value = False
self.assertEquals(context.ZeroMQContext()(), {})
def test_zeromq_context_related(self):
self.is_relation_made.return_value = True
self.relation_ids.return_value = ['zeromq-configuration:1']
self.related_units.return_value = ['openstack-zeromq/0']
self.relation_get.side_effect = ['nonce-data', 'hostname', 'redis']
self.assertEquals(context.ZeroMQContext()(),
{'zmq_host': 'hostname',
'zmq_nonce': 'nonce-data',
'zmq_redis_address': 'redis'})
def test_notificationdriver_context_nomsg(self):
relations = {
'zeromq-configuration': False,
'amqp': False,
}
rels = fake_is_relation_made(relations=relations)
self.is_relation_made.side_effect = rels.rel_made
self.assertEquals(context.NotificationDriverContext()(),
{'notifications': 'False'})
def test_notificationdriver_context_zmq_nometer(self):
relations = {
'zeromq-configuration': True,
'amqp': False,
}
rels = fake_is_relation_made(relations=relations)
self.is_relation_made.side_effect = rels.rel_made
self.assertEquals(context.NotificationDriverContext()(),
{'notifications': 'False'})
def test_notificationdriver_context_zmq_meter(self):
relations = {
'zeromq-configuration': True,
'amqp': False,
}
rels = fake_is_relation_made(relations=relations)
self.is_relation_made.side_effect = rels.rel_made
self.assertEquals(context.NotificationDriverContext()(),
{'notifications': 'False'})
def test_notificationdriver_context_amq(self):
relations = {
'zeromq-configuration': False,
'amqp': True,
}
rels = fake_is_relation_made(relations=relations)
self.is_relation_made.side_effect = rels.rel_made
self.assertEquals(context.NotificationDriverContext()(),
{'notifications': 'True'})
@patch.object(context, 'psutil')
def test_num_cpus_xenial(self, _psutil):
_psutil.cpu_count.return_value = 4
self.assertTrue(context._num_cpus(), 4)
@patch.object(context, 'psutil')
def test_num_cpus_trusty(self, _psutil):
_psutil.NUM_CPUS = 4
self.assertTrue(context._num_cpus(), 4)
@patch.object(context, '_num_cpus')
def test_calculate_workers_float(self, _num_cpus):
self.config.side_effect = fake_config({
'worker-multiplier': 0.3
})
_num_cpus.return_value = 4
self.assertTrue(context._calculate_workers(), 4)
@patch.object(context, '_num_cpus')
def test_calculate_workers_not_quite_0(self, _num_cpus):
# Make sure that the multiplier evaluating to somewhere between
# 0 and 1 in the floating point range still has at least one
# worker.
self.config.side_effect = fake_config({
'worker-multiplier': 0.001
})
_num_cpus.return_value = 100
self.assertTrue(context._calculate_workers(), 1)
@patch.object(context, 'psutil')
def test_calculate_workers_0(self, _psutil):
self.config.side_effect = fake_config({
'worker-multiplier': 0
})
_psutil.cpu_count.return_value = 2
self.assertTrue(context._calculate_workers(), 0)
@patch.object(context, '_num_cpus')
def test_calculate_workers_noconfig(self, _num_cpus):
self.config.return_value = None
_num_cpus.return_value = 1
self.assertTrue(context._calculate_workers(), 2)
@patch.object(context, '_num_cpus')
def test_calculate_workers_noconfig_container(self, _num_cpus):
self.config.return_value = None
self.is_container.return_value = True
_num_cpus.return_value = 1
self.assertTrue(context._calculate_workers(), 2)
@patch.object(context, '_num_cpus')
def test_calculate_workers_noconfig_lotsa_cpus_container(self,
_num_cpus):
self.config.return_value = None
self.is_container.return_value = True
_num_cpus.return_value = 32
self.assertTrue(context._calculate_workers(), 4)
@patch.object(context, '_num_cpus')
def test_calculate_workers_noconfig_lotsa_cpus_not_container(self,
_num_cpus):
self.config.return_value = None
_num_cpus.return_value = 32
self.assertTrue(context._calculate_workers(), 64)
@patch.object(context, '_calculate_workers', return_value=256)
def test_worker_context(self, calculate_workers):
self.assertEqual(context.WorkerConfigContext()(),
{'workers': 256})
def test_apache_get_addresses_no_network_config(self):
self.config.side_effect = fake_config({
'os-internal-network': None,
'os-admin-network': None,
'os-public-network': None
})
self.resolve_address.return_value = '10.5.1.50'
self.unit_get.return_value = '10.5.1.50'
apache = context.ApacheSSLContext()
apache.external_ports = '8776'
addresses = apache.get_network_addresses()
expected = [('10.5.1.50', '10.5.1.50')]
self.assertEqual(addresses, expected)
self.get_address_in_network.assert_not_called()
self.resolve_address.assert_has_calls([
call(context.INTERNAL),
call(context.ADMIN),
call(context.PUBLIC)
])
def test_apache_get_addresses_with_network_config(self):
self.config.side_effect = fake_config({
'os-internal-network': '10.5.1.0/24',
'os-admin-network': '10.5.2.0/24',
'os-public-network': '10.5.3.0/24',
})
_base_addresses = ['10.5.1.100',
'10.5.2.100',
'10.5.3.100']
self.get_address_in_network.side_effect = _base_addresses
self.resolve_address.side_effect = _base_addresses
self.unit_get.return_value = '10.5.1.50'
apache = context.ApacheSSLContext()
addresses = apache.get_network_addresses()
expected = [('10.5.1.100', '10.5.1.100'),
('10.5.2.100', '10.5.2.100'),
('10.5.3.100', '10.5.3.100')]
self.assertEqual(addresses, expected)
calls = [call('10.5.1.0/24', '10.5.1.50'),
call('10.5.2.0/24', '10.5.1.50'),
call('10.5.3.0/24', '10.5.1.50')]
self.get_address_in_network.assert_has_calls(calls)
self.resolve_address.assert_has_calls([
call(context.INTERNAL),
call(context.ADMIN),
call(context.PUBLIC)
])
def test_apache_get_addresses_network_spaces(self):
self.config.side_effect = fake_config({
'os-internal-network': None,
'os-admin-network': None,
'os-public-network': None
})
self.network_get_primary_address.side_effect = None
self.network_get_primary_address.return_value = '10.5.2.50'
self.resolve_address.return_value = '10.5.2.100'
self.unit_get.return_value = '10.5.1.50'
apache = context.ApacheSSLContext()
apache.external_ports = '8776'
addresses = apache.get_network_addresses()
expected = [('10.5.2.50', '10.5.2.100')]
self.assertEqual(addresses, expected)
self.get_address_in_network.assert_not_called()
self.resolve_address.assert_has_calls([
call(context.INTERNAL),
call(context.ADMIN),
call(context.PUBLIC)
])
def test_config_flag_parsing_simple(self):
# Standard key=value checks...
flags = context.config_flags_parser('key1=value1, key2=value2')
self.assertEqual(flags, {'key1': 'value1', 'key2': 'value2'})
# Check for multiple values to a single key
flags = context.config_flags_parser('key1=value1, '
'key2=value2,value3,value4')
self.assertEqual(flags, {'key1': 'value1',
'key2': 'value2,value3,value4'})
# Check for yaml formatted key value pairings for more complex
# assignment options.
flags = context.config_flags_parser('key1: subkey1=value1,'
'subkey2=value2')
self.assertEqual(flags, {'key1': 'subkey1=value1,subkey2=value2'})
# Check for good measure the ldap formats
test_string = ('user_tree_dn: ou=ABC General,'
'ou=User Accounts,dc=example,dc=com')
flags = context.config_flags_parser(test_string)
self.assertEqual(flags, {'user_tree_dn': ('ou=ABC General,'
'ou=User Accounts,'
'dc=example,dc=com')})
def _fake_get_hwaddr(self, arg):
return MACHINE_MACS[arg]
def _fake_get_ipv4(self, arg, fatal=False):
return MACHINE_NICS[arg]
@patch('charmhelpers.contrib.openstack.context.config')
def test_no_ext_port(self, mock_config):
self.config.side_effect = config = fake_config({})
mock_config.side_effect = config
self.assertEquals(context.ExternalPortContext()(), {})
@patch('charmhelpers.contrib.openstack.context.config')
def test_ext_port_eth(self, mock_config):
config = fake_config({'ext-port': 'eth1010'})
self.config.side_effect = config
mock_config.side_effect = config
self.assertEquals(context.ExternalPortContext()(),
{'ext_port': 'eth1010'})
@patch('charmhelpers.contrib.openstack.context.is_phy_iface',
lambda arg: True)
@patch('charmhelpers.contrib.openstack.context.get_nic_hwaddr')
@patch('charmhelpers.contrib.openstack.context.list_nics')
@patch('charmhelpers.contrib.openstack.context.get_ipv6_addr')
@patch('charmhelpers.contrib.openstack.context.get_ipv4_addr')
@patch('charmhelpers.contrib.openstack.context.config')
def test_ext_port_mac(self, mock_config, mock_get_ipv4_addr,
mock_get_ipv6_addr, mock_list_nics,
mock_get_nic_hwaddr):
config_macs = ABSENT_MACS + " " + MACHINE_MACS['eth2']
config = fake_config({'ext-port': config_macs})
self.config.side_effect = config
mock_config.side_effect = config
mock_get_ipv4_addr.side_effect = self._fake_get_ipv4
mock_get_ipv6_addr.return_value = []
mock_list_nics.return_value = MACHINE_MACS.keys()
mock_get_nic_hwaddr.side_effect = self._fake_get_hwaddr
self.assertEquals(context.ExternalPortContext()(),
{'ext_port': 'eth2'})
config = fake_config({'ext-port': ABSENT_MACS})
self.config.side_effect = config
mock_config.side_effect = config
self.assertEquals(context.ExternalPortContext()(), {})
@patch('charmhelpers.contrib.openstack.context.is_phy_iface',
lambda arg: True)
@patch('charmhelpers.contrib.openstack.context.get_nic_hwaddr')
@patch('charmhelpers.contrib.openstack.context.list_nics')
@patch('charmhelpers.contrib.openstack.context.get_ipv6_addr')
@patch('charmhelpers.contrib.openstack.context.get_ipv4_addr')
@patch('charmhelpers.contrib.openstack.context.config')
def test_ext_port_mac_one_used_nic(self, mock_config,
mock_get_ipv4_addr,
mock_get_ipv6_addr, mock_list_nics,
mock_get_nic_hwaddr):
self.relation_ids.return_value = ['neutron-plugin-api:1']
self.related_units.return_value = ['neutron-api/0']
self.relation_get.return_value = {'network-device-mtu': 1234,
'l2-population': 'False'}
config_macs = "%s %s" % (MACHINE_MACS['eth1'],
MACHINE_MACS['eth2'])
mock_get_ipv4_addr.side_effect = self._fake_get_ipv4
mock_get_ipv6_addr.return_value = []
mock_list_nics.return_value = MACHINE_MACS.keys()
mock_get_nic_hwaddr.side_effect = self._fake_get_hwaddr
config = fake_config({'ext-port': config_macs})
self.config.side_effect = config
mock_config.side_effect = config
self.assertEquals(context.ExternalPortContext()(),
{'ext_port': 'eth2', 'ext_port_mtu': 1234})
@patch('charmhelpers.contrib.openstack.context.NeutronPortContext.'
'resolve_ports')
def test_data_port_eth(self, mock_resolve):
self.config.side_effect = fake_config({'data-port':
'phybr1:eth1010 '
'phybr1:eth1011'})
mock_resolve.side_effect = lambda ports: ['eth1010']
self.assertEquals(context.DataPortContext()(),
{'eth1010': 'phybr1'})
@patch.object(context, 'get_nic_hwaddr')
@patch.object(context.NeutronPortContext, 'resolve_ports')
def test_data_port_mac(self, mock_resolve, mock_get_nic_hwaddr):
extant_mac = 'cb:23:ae:72:f2:33'
non_extant_mac = 'fa:16:3e:12:97:8e'
self.config.side_effect = fake_config({'data-port':
'phybr1:%s phybr1:%s' %
(non_extant_mac, extant_mac)})
def fake_resolve(ports):
resolved = []
for port in ports:
if port == extant_mac:
resolved.append('eth1010')
return resolved
mock_get_nic_hwaddr.side_effect = lambda nic: extant_mac
mock_resolve.side_effect = fake_resolve
self.assertEquals(context.DataPortContext()(),
{'eth1010': 'phybr1'})
@patch.object(context.NeutronAPIContext, '__call__', lambda *args:
{'network_device_mtu': 5000})
@patch.object(context, 'get_nic_hwaddr', lambda inst, port: port)
@patch.object(context.NeutronPortContext, 'resolve_ports',
lambda inst, ports: ports)
def test_phy_nic_mtu_context(self):
self.config.side_effect = fake_config({'data-port':
'phybr1:eth0'})
ctxt = context.PhyNICMTUContext()()
self.assertEqual(ctxt, {'devs': 'eth0', 'mtu': 5000})
@patch.object(context.glob, 'glob')
@patch.object(context.NeutronAPIContext, '__call__', lambda *args:
{'network_device_mtu': 5000})
@patch.object(context, 'get_nic_hwaddr', lambda inst, port: port)
@patch.object(context.NeutronPortContext, 'resolve_ports',
lambda inst, ports: ports)
def test_phy_nic_mtu_context_vlan(self, mock_glob):
self.config.side_effect = fake_config({'data-port':
'phybr1:eth0.100'})
mock_glob.return_value = ['/sys/class/net/eth0.100/lower_eth0']
ctxt = context.PhyNICMTUContext()()
self.assertEqual(ctxt, {'devs': 'eth0\\neth0.100', 'mtu': 5000})
@patch.object(context.glob, 'glob')
@patch.object(context.NeutronAPIContext, '__call__', lambda *args:
{'network_device_mtu': 5000})
@patch.object(context, 'get_nic_hwaddr', lambda inst, port: port)
@patch.object(context.NeutronPortContext, 'resolve_ports',
lambda inst, ports: ports)
def test_phy_nic_mtu_context_vlan_w_duplicate_raw(self, mock_glob):
self.config.side_effect = fake_config({'data-port':
'phybr1:eth0.100 '
'phybr1:eth0.200'})
def fake_glob(wcard):
if 'eth0.100' in wcard:
return ['/sys/class/net/eth0.100/lower_eth0']
elif 'eth0.200' in wcard:
return ['/sys/class/net/eth0.200/lower_eth0']
raise Exception("Unexpeced key '%s'" % (wcard))
mock_glob.side_effect = fake_glob
ctxt = context.PhyNICMTUContext()()
self.assertEqual(ctxt, {'devs': 'eth0\\neth0.100\\neth0.200',
'mtu': 5000})
def test_neutronapicontext_defaults(self):
self.relation_ids.return_value = []
expected_keys = [
'l2_population', 'enable_dvr', 'enable_l3ha',
'overlay_network_type', 'network_device_mtu',
'enable_qos', 'enable_nsg_logging'
]
api_ctxt = context.NeutronAPIContext()()
for key in expected_keys:
self.assertTrue(key in api_ctxt)
self.assertEquals(api_ctxt['polling_interval'], 2)
self.assertEquals(api_ctxt['rpc_response_timeout'], 60)
self.assertEquals(api_ctxt['report_interval'], 30)
self.assertEquals(api_ctxt['enable_nsg_logging'], False)
def setup_neutron_api_context_relation(self, cfg):
self.relation_ids.return_value = ['neutron-plugin-api:1']
self.related_units.return_value = ['neutron-api/0']
# The l2-population key is used by the context as a way of checking if
# the api service on the other end is sending data in a recent format.
self.relation_get.return_value = cfg
def test_neutronapicontext_extension_drivers_qos_on(self):
self.setup_neutron_api_context_relation({
'enable-qos': 'True',
'l2-population': 'True'})
api_ctxt = context.NeutronAPIContext()()
self.assertTrue(api_ctxt['enable_qos'])
self.assertEquals(api_ctxt['extension_drivers'], 'qos')
def test_neutronapicontext_extension_drivers_qos_off(self):
self.setup_neutron_api_context_relation({
'enable-qos': 'False',
'l2-population': 'True'})
api_ctxt = context.NeutronAPIContext()()
self.assertFalse(api_ctxt['enable_qos'])
self.assertEquals(api_ctxt['extension_drivers'], '')
def test_neutronapicontext_extension_drivers_qos_absent(self):
self.setup_neutron_api_context_relation({
'l2-population': 'True'})
api_ctxt = context.NeutronAPIContext()()
self.assertFalse(api_ctxt['enable_qos'])
self.assertEquals(api_ctxt['extension_drivers'], '')
def test_neutronapicontext_extension_drivers_log_off(self):
self.setup_neutron_api_context_relation({
'enable-nsg-logging': 'False',
'l2-population': 'True'})
api_ctxt = context.NeutronAPIContext()()
self.assertEquals(api_ctxt['extension_drivers'], '')
def test_neutronapicontext_extension_drivers_log_on(self):
self.setup_neutron_api_context_relation({
'enable-nsg-logging': 'True',
'l2-population': 'True'})
api_ctxt = context.NeutronAPIContext()()
self.assertEquals(api_ctxt['extension_drivers'], 'log')
def test_neutronapicontext_extension_drivers_log_qos_on(self):
self.setup_neutron_api_context_relation({
'enable-qos': 'True',
'enable-nsg-logging': 'True',
'l2-population': 'True'})
api_ctxt = context.NeutronAPIContext()()
self.assertEquals(api_ctxt['extension_drivers'], 'qos,log')
def test_neutronapicontext_string_converted(self):
self.setup_neutron_api_context_relation({
'l2-population': 'True'})
api_ctxt = context.NeutronAPIContext()()
self.assertEquals(api_ctxt['l2_population'], True)
def test_neutronapicontext_none(self):
self.relation_ids.return_value = ['neutron-plugin-api:1']
self.related_units.return_value = ['neutron-api/0']
self.relation_get.return_value = {'l2-population': 'True'}
api_ctxt = context.NeutronAPIContext()()
self.assertEquals(api_ctxt['network_device_mtu'], None)
def test_network_service_ctxt_no_units(self):
self.relation_ids.return_value = []
self.relation_ids.return_value = ['foo']
self.related_units.return_value = []
self.assertEquals(context.NetworkServiceContext()(), {})
@patch.object(context.OSContextGenerator, 'context_complete')
def test_network_service_ctxt_no_data(self, mock_context_complete):
rel = FakeRelation(QUANTUM_NETWORK_SERVICE_RELATION)
self.relation_ids.side_effect = rel.relation_ids
self.related_units.side_effect = rel.relation_units
relation = FakeRelation(relation_data=QUANTUM_NETWORK_SERVICE_RELATION)
self.relation_get.side_effect = relation.get
mock_context_complete.return_value = False
self.assertEquals(context.NetworkServiceContext()(), {})
def test_network_service_ctxt_data(self):
data_result = {
'keystone_host': '10.5.0.1',
'service_port': '5000',
'auth_port': '20000',
'service_tenant': 'tenant',
'service_username': 'username',
'service_password': 'password',
'quantum_host': '10.5.0.2',
'quantum_port': '9696',
'quantum_url': 'http://10.5.0.2:9696/v2',
'region': 'aregion',
'service_protocol': 'http',
'auth_protocol': 'http',
'api_version': '2.0',
}
rel = FakeRelation(QUANTUM_NETWORK_SERVICE_RELATION)
self.relation_ids.side_effect = rel.relation_ids
self.related_units.side_effect = rel.relation_units
relation = FakeRelation(relation_data=QUANTUM_NETWORK_SERVICE_RELATION)
self.relation_get.side_effect = relation.get
self.assertEquals(context.NetworkServiceContext()(), data_result)
def test_network_service_ctxt_data_api_version(self):
data_result = {
'keystone_host': '10.5.0.1',
'service_port': '5000',
'auth_port': '20000',
'service_tenant': 'tenant',
'service_username': 'username',
'service_password': 'password',
'quantum_host': '10.5.0.2',
'quantum_port': '9696',
'quantum_url': 'http://10.5.0.2:9696/v2',
'region': 'aregion',
'service_protocol': 'http',
'auth_protocol': 'http',
'api_version': '3',
}
rel = FakeRelation(QUANTUM_NETWORK_SERVICE_RELATION_VERSIONED)
self.relation_ids.side_effect = rel.relation_ids
self.related_units.side_effect = rel.relation_units
relation = FakeRelation(
relation_data=QUANTUM_NETWORK_SERVICE_RELATION_VERSIONED)
self.relation_get.side_effect = relation.get
self.assertEquals(context.NetworkServiceContext()(), data_result)
def test_internal_endpoint_context(self):
config = {'use-internal-endpoints': False}
self.config.side_effect = fake_config(config)
ctxt = context.InternalEndpointContext()
self.assertFalse(ctxt()['use_internal_endpoints'])
config = {'use-internal-endpoints': True}
self.config.side_effect = fake_config(config)
self.assertTrue(ctxt()['use_internal_endpoints'])
@patch.object(context, 'os_release')
def test_volume_api_context(self, mock_os_release):
mock_os_release.return_value = 'ocata'
config = {'use-internal-endpoints': False}
self.config.side_effect = fake_config(config)
ctxt = context.VolumeAPIContext('cinder-common')
c = ctxt()
self.assertEqual(c['volume_api_version'], '2')
self.assertEqual(c['volume_catalog_info'],
'volumev2:cinderv2:publicURL')
mock_os_release.return_value = 'pike'
config['use-internal-endpoints'] = True
self.config.side_effect = fake_config(config)
ctxt = context.VolumeAPIContext('cinder-common')
c = ctxt()
self.assertEqual(c['volume_api_version'], '3')
self.assertEqual(c['volume_catalog_info'],
'volumev3:cinderv3:internalURL')
def test_volume_api_context_no_pkg(self):
self.assertRaises(ValueError, context.VolumeAPIContext, "")
self.assertRaises(ValueError, context.VolumeAPIContext, None)
def test_apparmor_context_call_not_valid(self):
''' Tests for the apparmor context'''
mock_aa_object = context.AppArmorContext()
# Test with invalid config
self.config.return_value = 'NOTVALID'
self.assertEquals(mock_aa_object.__call__(), None)
def test_apparmor_context_call_complain(self):
''' Tests for the apparmor context'''
mock_aa_object = context.AppArmorContext()
# Test complain mode
self.config.return_value | |
"""
ET Correction Tool:
This script creates evapotranspiration Dfs2 from single/multiple reference ET
time-series, and applies spatially, monthly varying solar radiation correction
factors to the reference ET data and creates the MIKE SHE input ET Dfs2 file.
Created on Wed Apr 28 15:50:07 2021
@author: <NAME>
<EMAIL>
DHI,US
"""
# marks dependencies
import os
import clr
import sys
import time
import numpy as np #
import pandas as pd #
import datetime as dt
import shapefile #pyshp
from winreg import ConnectRegistry, OpenKey, HKEY_LOCAL_MACHINE, QueryValueEx
def get_mike_bin_directory_from_registry():
x86 = False
dhiRegistry = "SOFTWARE\Wow6432Node\DHI\\"
aReg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
try:
_ = OpenKey(aReg, dhiRegistry)
except FileNotFoundError:
x86 = True
dhiRegistry = "SOFTWARE\Wow6432Node\DHI\\"
aReg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
try:
_ = OpenKey(aReg, dhiRegistry)
except FileNotFoundError:
raise FileNotFoundError
year = 2030
while year > 2010:
try:
mikeHomeDirKey = OpenKey(aReg, dhiRegistry + str(year))
except FileNotFoundError:
year -= 1
continue
if year > 2020:
mikeHomeDirKey = OpenKey(aReg, dhiRegistry + "MIKE Zero\\" + str(year))
mikeBin = QueryValueEx(mikeHomeDirKey, "HomeDir")[0]
mikeBin += "bin\\"
if not x86:
mikeBin += "x64\\"
if not os.path.exists(mikeBin):
print(f"Cannot find MIKE ZERO in {mikeBin}")
raise NotADirectoryError
return mikeBin
print("Cannot find MIKE ZERO")
return ""
sys.path.append(get_mike_bin_directory_from_registry())
clr.AddReference("DHI.Generic.MikeZero.DFS")
clr.AddReference("DHI.Generic.MikeZero.EUM")
clr.AddReference("DHI.Projections")
from mikeio import * #
from mikeio.eum import ItemInfo
from shapely.geometry import Polygon, Point #
from tkinter import Frame, Label, Button, Entry, Tk, W, END
from tkinter import messagebox as tkMessageBox
# from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
#------------------------------------------------------------------------------
## File locations for testing tool:
# PolygonsShapefileName = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\NLDASzones3.shp"
# SolarRadiationShapefileName = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\SolarRad_SCALING_bymonth.shp"
# refDfs0path= r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\PET_NLDAS2000_2020_1st10.dfs0"
# projpath = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\SolarRad_SCALING_bymonth.prj"
# filePath = r"C:\Users\ssin\OneDrive - DHI\Desktop\MIKE SHE ET\RefET_Prucha\Test.dfs2"
#------------------------------------------------------------------------------
# Read reference ET Dfs0 file(s), and create a dataframe:
def ReferenceET2Dataframe(refDfs0path):
ReferenceET_Directory = os.path.dirname(refDfs0path)
os.chdir(ReferenceET_Directory)
input_file_names = os.listdir(ReferenceET_Directory)
ReferenceET_File_Names = [filenames for filenames in input_file_names
if filenames.endswith('dfs0')]
ReferenceET_df = pd.DataFrame()
for num_ET in range(len(ReferenceET_File_Names)):
ReferenceET_dfs0 = Dfs0(ReferenceET_File_Names[num_ET]).to_dataframe()
ReferenceET_df = pd.concat([ReferenceET_df, ReferenceET_dfs0], axis=1)
return ReferenceET_df
# Reference ET metadata for creating Dfs2:
def RefETMetadata(refDfs0path):
ReferenceET_df = ReferenceET2Dataframe(refDfs0path)
metadata_file = Dfs0(refDfs0path)
ETMetadata = {
"NumStations" : ReferenceET_df.shape[1],
"Type" : metadata_file.items[0].type,
"Unit" : metadata_file.items[0].unit,
"StartTime" : ReferenceET_df.index[0],
"NumTimesteps" : len(ReferenceET_df.index),
"Timestep" : (ReferenceET_df.index[1]-ReferenceET_df.index[0]).total_seconds(),
"Max" : round(ReferenceET_df.max().max(),2),
"MaxStation" : ReferenceET_df.max().idxmax(),
"MaxTimestep" : ReferenceET_df.idxmax()[ReferenceET_df.max().idxmax()]
}
return ETMetadata
print('Maximum ET in reference data is '+str(ETMetadata.Max)+' ' +str(ETMetadata.Unit)[8:] +
' at station '+ str(ETMetadata.MaxStation) + ' on ' + str(ETMetadata.MaxTimestep))
#Read correction factor grid shape file:
def Correction_df(SolarRadiationShapefileName):
SolarRadiation_Shapefile = shapefile.Reader(SolarRadiationShapefileName)
SolarRadiation_fields = [field[0] for field
in SolarRadiation_Shapefile.fields[1:]]
SolarRadiation_fields[2:14] = [dt.date(2000, month, 1).strftime('%B')
for month in range(1,13)] #Month Names
SolarRadiation_records = SolarRadiation_Shapefile.records()
SolarRadiation_df = pd.DataFrame(columns = SolarRadiation_fields,
data = SolarRadiation_records)
Grid_X = SolarRadiation_df.X.sort_values(ascending = True).unique()
Grid_Y = SolarRadiation_df.Y.sort_values(ascending = False).unique()
return SolarRadiation_df, Grid_X, Grid_Y
# Correct ref ET by scaling factors and create ET Dfs2 input data nparray:
def ETCorrection(PolygonsShapefileName, SolarRadiationShapefileName, refDfs0path):
# Input ref ET in dataframe
ReferenceET_df = ReferenceET2Dataframe(refDfs0path)
# Ref ET polygons reading:
Polygons_Shapefile = shapefile.Reader(PolygonsShapefileName)
#Excluding deletion flag
Polygons_Shapefile_fieldnames = [field[0] for field
in Polygons_Shapefile.fields[1:]]
for index in range(len(Polygons_Shapefile_fieldnames)):
if Polygons_Shapefile_fieldnames[index] == 'ETStation':
ETStation_field_index = index
Attribute_table = Polygons_Shapefile.records()
ETStation_Names = [Polygons_Shapefile.record(record)[ETStation_field_index]
for record in range(len(Attribute_table))]
Num_Polygons = len(Polygons_Shapefile.shapes())
Polygons_Coordinates = [Polygons_Shapefile.shape(poly).points
for poly in range(Num_Polygons)]
# Correction factor grid reading
SolarRadiation_df, Grid_X, Grid_Y = Correction_df(SolarRadiationShapefileName)
#Find points inside every polygon:
ListPointsinPolygons = [[] for poly in range(len(Polygons_Coordinates))]
for loc in range(len(SolarRadiation_df)):
SolarRadiation_point = Point(SolarRadiation_df.X[loc],
SolarRadiation_df.Y[loc]) #shapely point
for poly in range(len(Polygons_Coordinates)):
ET_Polygon = Polygon(Polygons_Coordinates[poly]) #shapely polygon
if ET_Polygon.contains(SolarRadiation_point):
ListPointsinPolygons[poly].append(loc) # Self Note: check point on poly line
print('Solar radiation points inside each ET polygon identified')
# Define output corrected data array:
Corrected_ET = np.zeros((len(ReferenceET_df),
len(Grid_Y),
len(Grid_X)))
# Corrected_ET = np.zeros((1,
# len(Grid_Y),
# len(Grid_X)))
# Correction of ET data looping all polygons, identifying their ref ET
for poly in range(len(Polygons_Coordinates)):
ThisPolygon_ReferenceET = ReferenceET_df[ETStation_Names[poly]] #[0:1]
ThisPolygon_ReferenceET_Copy = ThisPolygon_ReferenceET.copy()
# Correction of all points within the polygon in loop
for point_index in ListPointsinPolygons[poly]:
for month in range(1,13):
ThisPoint_CorrectionFactor = SolarRadiation_df.iloc[point_index,
month+1]
This_month_index = ThisPolygon_ReferenceET_Copy.index.month==month
# Correction of ref ET for a grid point with correcponding correction factor
if len(This_month_index) !=0:
This_month_values = ThisPolygon_ReferenceET[This_month_index].copy()
ThisPolygon_ReferenceET_Copy[This_month_index] = ThisPoint_CorrectionFactor * This_month_values
#Define spatial location for corrected ET time-series of a grid point
for x in range(len(Grid_X)):
if SolarRadiation_df.X[point_index] == Grid_X[x]:
X=x
break
for y in range(len(Grid_Y)):
if SolarRadiation_df.Y[point_index] == Grid_Y[y]:
Y=y
break
# Store corrected ET time-series
Corrected_ET[:,Y,X] = ThisPolygon_ReferenceET_Copy
print('Grid points in polygon > '+ str(poly) +' corrected for solar radiation')
return Corrected_ET
# Write Dfs2 ouput file:
def buildETDfs(filePath, Corrected_ET, SolarRadiationShapefileName, projpath, refDfs0path):
if os.path.exists(filePath):
os.remove(filePath)
dfs = Dfs2()
#Projection sys from shape file
projString = open(projpath, "r").read()
#ET timeseries data
ETMetadata = RefETMetadata(refDfs0path)
SolarRadiation_df, Grid_X, Grid_Y = Correction_df(SolarRadiationShapefileName)
Dx = Grid_X[1]-Grid_X[0]
Dy = Grid_Y[0]-Grid_Y[1]
dfs.write(filename = filePath,
data = [Corrected_ET],
start_time = ETMetadata["StartTime"],
dt = ETMetadata["Timestep"],
items=[ItemInfo("Evapotranspiration",
ETMetadata["Type"],
ETMetadata["Unit"],
data_value_type='Instantaneous')],
dx = Dx,
dy = Dy,
coordinate = [projString,
Grid_X[0],
Grid_Y[-1],
0],
title="ET_RadiationCorrected")
print('Dfs2 created')
def ETCorrectionTool(refDfs0path, PolygonsShapefileName, SolarRadiationShapefileName, projpath, filePath):
Tool_start_time = time.time()
Corrected_ET = ETCorrection(PolygonsShapefileName, SolarRadiationShapefileName, refDfs0path)
Dfs2_start_time = time.time()
buildETDfs(filePath, Corrected_ET, SolarRadiationShapefileName, projpath, refDfs0path)
print('Writing time' + "- %s seconds" % (time.time() - Dfs2_start_time))
print('Total time'+"- %s seconds" % (time.time() - Tool_start_time))
#------------------------------------------------------------------------------
# UI for this tool:
class interface(Frame):
def __init__(self, master = None):
""" Initialize Frame. """
Frame.__init__(self,master)
self.grid()
self.createWidgets()
def message(self):
tkMessageBox.showinfo("Task Completed", "Reference ET data corrected!")
def run(self):
# input1 - Ref ET timeseries in Dfs0:
filename1 = self.file_name1.get()
# input2 - Polygons for every ET timeseries in shp file:
filename2 = self.file_name2.get()
# input3 - Correction factor grid with monthly values in shape file:
filename3 = self.file_name3.get()
# input4 - Projection file:
filename4 = self.file_name4.get()
# Output:
outputFile = self.file_name5.get()
# Tool
ETCorrectionTool(filename1, filename2, filename3, filename4, outputFile)
self.message()
def createWidgets(self):
# set all labels of inputs:
Label(self, text = "Note: Output Dfs2's start time, time steps, and ET data units will be same as reference ET Dfs0")\
.grid(row=0, columnspan=3,sticky=W)
Label(self, text = "Reference ET (*.dfs0) :")\
.grid(row=1, column=0, sticky=W)
Label(self, text = "ET Polygons (*.shp) :")\
.grid(row=2, column=0, sticky=W)
Label(self, text = "Solar Radiation Factors (*.shp) :")\
.grid(row=3, column=0, sticky=W)
Label(self, text = "Projection (*.prj) :")\
.grid(row=4, column=0, sticky=W)
Label(self, text = "Output Corrected ET (*.dfs2) :")\
.grid(row=5, column=0, sticky=W)
# set buttons
Button(self, text = "Browse", command=self.load_file1, width=10)\
.grid(row=1, column=6, sticky=W)
Button(self, text = "Browse", command=self.load_file2, width=10)\
.grid(row=2, column=6, sticky=W)
Button(self, text = "Browse", command=self.load_file3, width=10)\
.grid(row=3, column=6, sticky=W)
Button(self, text = "Browse", command=self.load_file4, width=10)\
.grid(row=4, column=6, sticky=W)
Button(self, text = "Save As", command=self.load_file5, width=10)\
.grid(row=5, column=6, sticky=W)
Button(self, text = "Run ET Correction", command=self.run, width=20)\
.grid(row=6, column=2, sticky=W)
# set entry field
self.file_name1 = Entry(self, width=65)
self.file_name1.grid(row=1, column=1, columnspan=4, sticky=W)
self.file_name2 = Entry(self, width=65)
self.file_name2.grid(row=2, column=1, columnspan=4, sticky=W)
self.file_name3 = Entry(self, width=65)
self.file_name3.grid(row=3, column=1, columnspan=4, sticky=W)
self.file_name4 = Entry(self, width=65)
self.file_name4.grid(row=4, column=1, columnspan=4, sticky=W)
self.file_name5 = Entry(self, width=65)
self.file_name5.grid(row=5, column=1, columnspan=4, sticky=W)
def load_file1(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name1.delete(0, END)
self.file_name1.insert(0, self.filename)
self.file_name1.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file2(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name2.delete(0, END)
self.file_name2.insert(0, self.filename)
self.file_name2.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file3(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name3.delete(0, END)
self.file_name3.insert(0, self.filename)
self.file_name3.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file4(self):
self.filename = askopenfilename(initialdir=os.path.curdir)
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name4.delete(0, END)
self.file_name4.insert(0, self.filename)
self.file_name4.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
def load_file5(self):
self.filename = asksaveasfilename(initialdir=os.path.curdir,defaultextension=".dfs2", filetypes=(("Dfs2 File", "*.dfs2"),("All Files", "*.*") ))
if self.filename:
try:
#self.settings.set(self.filename)
self.file_name5.delete(0, END)
self.file_name5.insert(0, self.filename)
self.file_name5.xview_moveto(1.0)
except IOError:
tkMessageBox.showerror("Error","Failed to read file \n'%s'"%self.filename)
##### main program
root = Tk()
UI = interface(master=root)
UI.master.title("Evapotranspiration Correction Tool")
UI.master.geometry('680x270')
for child | |
<reponame>jjiege/odoo<gh_stars>0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from datetime import datetime, timedelta, time
from odoo import fields
from odoo.tests.common import TransactionCase
import pytz
import re
class TestCalendar(TransactionCase):
def setUp(self):
super(TestCalendar, self).setUp()
self.CalendarEvent = self.env['calendar.event']
# In Order to test calendar, I will first create One Simple Event with real data
self.event_tech_presentation = self.CalendarEvent.create({
'privacy': 'private',
'start': '2011-04-30 16:00:00',
'stop': '2011-04-30 18:30:00',
'description': 'The Technical Presentation will cover following topics:\n* Creating Odoo class\n* Views\n* Wizards\n* Workflows',
'duration': 2.5,
'location': 'Odoo S.A.',
'name': 'Technical Presentation'
})
def test_calender_simple_event(self):
m = self.CalendarEvent.create({
'name': "Test compute",
'start': '2017-07-12 14:30:00',
'allday': False,
'stop': '2017-07-12 15:00:00',
})
self.assertEqual(
(str(m.start_datetime), str(m.stop_datetime)),
(u'2017-07-12 14:30:00', u'2017-07-12 15:00:00'),
"Sanity check"
)
def test_calender_event(self):
# Now I will set recurrence for this event to occur monday and friday of week
data = {
'fr': 1,
'mo': 1,
'interval': 1,
'rrule_type': 'weekly',
'end_type': 'end_date',
'final_date': '2011-05-31 00:00:00',
'recurrency': True
}
self.event_tech_presentation.write(data)
# In order to check that recurrent events are views successfully in calendar view, I will open calendar view of events|
self.CalendarEvent.fields_view_get(False, 'calendar')
# In order to check that recurrent events are views successfully in calendar view, I will search for one of the recurrent event and count the number of events
rec_events = self.CalendarEvent.with_context({'virtual_id': True}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
self.assertEqual(len(rec_events), 9, 'Wrong number of events found')
# Now I move a virtual event, to see that a real event is well created and depending from the native recurrence
before = self.CalendarEvent.with_context({'virtual_id': False}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
# We start by detach the event
newevent = rec_events[1].detach_recurring_event()
newevent.with_context({'virtual_id': True}).write({'name': '<NAME>', 'recurrency': True})
after = self.CalendarEvent.with_context({'virtual_id': False}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
self.assertEqual(len(after), len(before) + 1, 'Wrong number of events found, after to have moved a virtual event')
new_event = after - before
self.assertEqual(new_event[0].recurrent_id, before.id, 'Recurrent_id not correctly passed to the new event')
# Now I will test All day event
allday_event = self.CalendarEvent.create({
'allday': 1,
'privacy': 'confidential',
'start': '2011-04-30 00:00:00',
'stop': '2011-04-30 00:00:00',
'description': 'All day technical test',
'location': 'School',
'name': 'All day test event'
})
# In order to check reminder I will first create reminder
res_alarm_day_before_event_starts = self.env['calendar.alarm'].create({
'name': '1 Day before event starts',
'duration': 1,
'interval': 'days',
'type': 'notification'
})
# Now I will assign this reminder to all day event|
allday_event.write({'alarm_ids': [(6, 0, [res_alarm_day_before_event_starts.id])]})
# I create a recuring rule for my event
calendar_event_sprint_review = self.CalendarEvent.create({
'name': 'Begin of month meeting',
'start': datetime.combine(fields.Date.today(), time(12, 0)),
'stop': datetime.combine(fields.Date.today(), time(18, 0)),
'recurrency': True,
'rrule': 'FREQ=MONTHLY;INTERVAL=1;COUNT=12;BYDAY=1MO'
})
# I check that the attributes are set correctly
self.assertEqual(calendar_event_sprint_review.rrule_type, 'monthly', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.count, 12, 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.month_by, 'day', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.byday, '1', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.week_list, 'MO', 'rrule_type should be mothly')
def test_validation_error(self):
"""
Ideally this should build the base event in such a way that calling
write() triggers detach_recurring_event, but I've no idea how that
actually works so just calling it directly for now
"""
m = self.CalendarEvent.create({
'name': "wheee",
'start': '2017-07-12 14:30:00',
'allday': False,
'rrule': u'FREQ=WEEKLY;BYDAY=WE;INTERVAL=1;COUNT=100',
'duration': 0.5,
'stop': '2017-07-12 15:00:00',
})
self.assertEqual(
(str(m.start_datetime), str(m.stop_datetime)),
(u'2017-07-12 14:30:00', u'2017-07-12 15:00:00'),
"Sanity check"
)
values = {
'allday': False,
'name': u'wheee',
'attendee_ids': [
(0, 0, {'state': u'needsAction', 'partner_id': 8, 'email': u'<EMAIL>'}),
(0, 0, {'state': u'needsAction', 'partner_id': 10, 'email': u'<EMAIL>'}),
],
'recurrency': True,
'privacy': u'public',
'stop': '2017-07-10 16:00:00',
'alarm_ids': [(6, 0, [])],
'start': '2017-07-10 15:30:00',
'location': u"XXX",
'duration': 0.5,
'partner_ids': [(4, 10), (4, 8)],
'description': u"A thing"
}
records = m.detach_recurring_event(values)
self.assertEqual(
(str(m.start_datetime), str(m.stop_datetime)),
('2017-07-12 14:30:00', u'2017-07-12 15:00:00'),
)
self.assertEquals(
(str(records.start_datetime), str(records.stop_datetime)),
(u'2017-07-10 15:30:00', u'2017-07-10 16:00:00'),
)
def test_event_order(self):
""" check the ordering of events when searching """
def create_event(name, date):
return self.CalendarEvent.create({
'name': name,
'start': date + ' 12:00:00',
'stop': date + ' 14:00:00',
'duration': 2.0,
})
foo1 = create_event('foo', '2011-04-01')
foo2 = create_event('foo', '2011-06-01')
bar1 = create_event('bar', '2011-05-01')
bar2 = create_event('bar', '2011-06-01')
domain = [('id', 'in', (foo1 + foo2 + bar1 + bar2).ids)]
# sort them by name only
events = self.CalendarEvent.search(domain, order='name')
self.assertEqual(events.mapped('name'), ['bar', 'bar', 'foo', 'foo'])
events = self.CalendarEvent.search(domain, order='name desc')
self.assertEqual(events.mapped('name'), ['foo', 'foo', 'bar', 'bar'])
# sort them by start date only
events = self.CalendarEvent.search(domain, order='start')
self.assertEqual(events.mapped('start'), (foo1 + bar1 + foo2 + bar2).mapped('start'))
events = self.CalendarEvent.search(domain, order='start desc')
self.assertEqual(events.mapped('start'), (foo2 + bar2 + bar1 + foo1).mapped('start'))
# sort them by name then start date
events = self.CalendarEvent.search(domain, order='name asc, start asc')
self.assertEqual(list(events), [bar1, bar2, foo1, foo2])
events = self.CalendarEvent.search(domain, order='name asc, start desc')
self.assertEqual(list(events), [bar2, bar1, foo2, foo1])
events = self.CalendarEvent.search(domain, order='name desc, start asc')
self.assertEqual(list(events), [foo1, foo2, bar1, bar2])
events = self.CalendarEvent.search(domain, order='name desc, start desc')
self.assertEqual(list(events), [foo2, foo1, bar2, bar1])
# sort them by start date then name
events = self.CalendarEvent.search(domain, order='start asc, name asc')
self.assertEqual(list(events), [foo1, bar1, bar2, foo2])
events = self.CalendarEvent.search(domain, order='start asc, name desc')
self.assertEqual(list(events), [foo1, bar1, foo2, bar2])
events = self.CalendarEvent.search(domain, order='start desc, name asc')
self.assertEqual(list(events), [bar2, foo2, bar1, foo1])
events = self.CalendarEvent.search(domain, order='start desc, name desc')
self.assertEqual(list(events), [foo2, bar2, bar1, foo1])
def test_event_activity(self):
# ensure meeting activity type exists
meeting_act_type = self.env['mail.activity.type'].search([('category', '=', 'meeting')], limit=1)
if not meeting_act_type:
meeting_act_type = self.env['mail.activity.type'].create({
'name': 'Meeting Test',
'category': 'meeting',
})
# have a test model inheriting from activities
test_record = self.env['res.partner'].create({
'name': 'Test',
})
now = datetime.now()
test_user = self.env.ref('base.user_demo')
test_name, test_description, test_description2 = 'Test-Meeting', '<p>Test-Description</p>', '<p>NotTest</p>'
# create using default_* keys
test_event = self.env['calendar.event'].sudo(test_user).with_context(
default_res_model=test_record._name,
default_res_id=test_record.id,
).create({
'name': test_name,
'description': test_description,
'start': fields.Datetime.to_string(now + timedelta(days=-1)),
'stop': fields.Datetime.to_string(now + timedelta(hours=2)),
'user_id': self.env.user.id,
})
self.assertEqual(test_event.res_model, test_record._name)
self.assertEqual(test_event.res_id, test_record.id)
self.assertEqual(len(test_record.activity_ids), 1)
self.assertEqual(test_record.activity_ids.summary, test_name)
self.assertEqual(test_record.activity_ids.note, test_description)
self.assertEqual(test_record.activity_ids.user_id, self.env.user)
self.assertEqual(test_record.activity_ids.date_deadline, (now + timedelta(days=-1)).date())
# updating event should update activity
test_event.write({
'name': '%s2' % test_name,
'description': test_description2,
'start': fields.Datetime.to_string(now + timedelta(days=-2)),
'user_id': test_user.id,
})
self.assertEqual(test_record.activity_ids.summary, '%s2' % test_name)
self.assertEqual(test_record.activity_ids.note, test_description2)
self.assertEqual(test_record.activity_ids.user_id, test_user)
self.assertEqual(test_record.activity_ids.date_deadline, (now + timedelta(days=-2)).date())
# deleting meeting should delete its activity
test_record.activity_ids.unlink()
self.assertEqual(self.env['calendar.event'], self.env['calendar.event'].search([('name', '=', test_name)]))
# create using active_model keys
test_event = self.env['calendar.event'].sudo(self.env.ref('base.user_demo')).with_context(
active_model=test_record._name,
active_id=test_record.id,
).create({
'name': test_name,
'description': test_description,
'start': now + timedelta(days=-1),
'stop': now + timedelta(hours=2),
'user_id': self.env.user.id,
})
self.assertEqual(test_event.res_model, test_record._name)
self.assertEqual(test_event.res_id, test_record.id)
self.assertEqual(len(test_record.activity_ids), 1)
def test_event_allday(self):
self.env.user.tz = 'Pacific/Honolulu'
event = self.CalendarEvent.create({
'name': '<NAME>',
'start': "2018-10-16 00:00:00",
'start_date': "2018-10-16",
'start_datetime': False,
'stop': "2018-10-18 00:00:00",
'stop_date': "2018-10-18",
'stop_datetime': False,
'allday': True,
})
self.assertEqual(str(event.start), '2018-10-16 08:00:00')
self.assertEqual(str(event.stop), '2018-10-18 18:00:00')
def test_recurring_around_dst(self):
m = self.CalendarEvent.create({
'name': "wheee",
'start': '2018-10-27 14:30:00',
'allday': False,
'rrule': u'FREQ=DAILY;INTERVAL=1;COUNT=4',
'duration': 2,
'stop': '2018-10-27 16:30:00',
})
start_recurring_dates = m.with_context({'tz': 'Europe/Brussels'})._get_recurrent_date_by_event()
self.assertEqual(len(start_recurring_dates), 4)
for d in start_recurring_dates:
self.assertEqual(d.tzinfo, pytz.UTC)
if d.day < 28: # DST switch happens between 2018-10-27 and 2018-10-28
self.assertEqual(d.hour, 14)
else:
self.assertEqual(d.hour, 15)
self.assertEqual(d.minute, 30)
def test_event_activity_timezone(self):
activty_type = self.env['mail.activity.type'].create({
'name': 'Meeting',
'category': 'meeting'
})
activity_id = self.env['mail.activity'].create({
'summary': 'Meeting with partner',
'activity_type_id': activty_type.id,
'res_model_id': self.env['ir.model'].search([('model', '=', 'res.partner')], limit=1).id,
'res_id': self.env['res.partner'].search([('name', 'ilike', 'Deco Addict')], limit=1).id,
})
calendar_event = self.env['calendar.event'].create({
'name': 'Meeting with partner',
'activity_ids': [(6, False, activity_id.ids)],
'start': '2018-11-12 21:00:00',
'stop': '2018-11-13 00:00:00',
})
# Check output in UTC
self.assertEqual(str(activity_id.date_deadline), '2018-11-12')
# Check output in the user's tz
# write on the event to trigger sync of activities
calendar_event.with_context({'tz': 'Australia/Brisbane'}).write({
'start': '2018-11-12 21:00:00',
})
self.assertEqual(str(activity_id.date_deadline), '2018-11-13')
def test_event_allday_activity_timezone(self):
# Covers use case of commit eef4c3b48bcb4feac028bf640b545006dd0c9b91
# Also, read the comment in the code at calendar.event._inverse_dates
activty_type = self.env['mail.activity.type'].create({
'name': 'Meeting',
'category': 'meeting'
})
activity_id = self.env['mail.activity'].create({
'summary': 'Meeting with partner',
'activity_type_id': activty_type.id,
'res_model_id': self.env['ir.model'].search([('model', '=', 'res.partner')], limit=1).id,
'res_id': self.env['res.partner'].search([('name', 'ilike', '<NAME>')], limit=1).id,
})
calendar_event = self.env['calendar.event'].create({
'name': '<NAME>',
'start': "2018-10-16 00:00:00",
'start_date': "2018-10-16",
'start_datetime': False,
'stop': "2018-10-18 00:00:00",
'stop_date': "2018-10-18",
'stop_datetime': False,
'allday': True,
'activity_ids': [(6, False, activity_id.ids)],
})
# Check output in UTC
self.assertEqual(str(activity_id.date_deadline), '2018-10-16')
# Check output in the user's tz
# write on the event to trigger sync of activities
calendar_event.with_context({'tz': 'Pacific/Honolulu'}).write({
'start': '2018-10-16 00:00:00',
'start_date': '2018-10-16',
})
self.assertEqual(str(activity_id.date_deadline), '2018-10-16')
def test_event_creation_mail(self):
"""
Check that mail are sent to the | |
metadata:
if 'readme_files' in metadata:
readme_files_dict = readme_util.build_readme_files_dict( metadata )
folder_id, readme_files_root_folder = build_readme_files_folder( trans, folder_id, readme_files_dict )
containers_dict[ 'readme_files' ] = readme_files_root_folder
# Repository dependencies container.
folder_id, repository_dependencies_root_folder = build_repository_dependencies_folder( trans=trans,
folder_id=folder_id,
repository_dependencies=repository_dependencies,
label='Repository dependencies',
installed=False )
if repository_dependencies_root_folder:
containers_dict[ 'repository_dependencies' ] = repository_dependencies_root_folder
# Tool dependencies container.
if metadata:
if 'tool_dependencies' in metadata:
tool_dependencies = metadata[ 'tool_dependencies' ]
if trans.webapp.name == 'tool_shed':
if 'orphan_tool_dependencies' in metadata:
orphan_tool_dependencies = metadata[ 'orphan_tool_dependencies' ]
tool_dependencies = add_orphan_settings_to_tool_dependencies( tool_dependencies, orphan_tool_dependencies )
folder_id, tool_dependencies_root_folder = build_tool_dependencies_folder( trans,
folder_id,
tool_dependencies,
missing=False,
new_install=False )
containers_dict[ 'tool_dependencies' ] = tool_dependencies_root_folder
# Valid tools container.
if metadata:
if 'tools' in metadata:
valid_tools = metadata[ 'tools' ]
folder_id, valid_tools_root_folder = build_tools_folder( trans,
folder_id,
valid_tools,
repository,
changeset_revision,
label='Valid tools' )
containers_dict[ 'valid_tools' ] = valid_tools_root_folder
# Tool test results container.
if tool_test_results and len( tool_test_results ) > 1:
# Only create and populate this folder if there are actual tool test results to display, since the display of the 'Test environment'
# folder by itself can be misleading. We check for more than a single entry in the tool_test_results dictionary because it may have
# only the "test_environment" entry, but we want at least 1 of "passed_tests", "failed_tests", "installation_errors", "missing_test_components"
# "skipped_tests", "not_tested" or any other entry that may be added in the future.
folder_id, tool_test_results_root_folder = build_tool_test_results_folder( trans, folder_id, tool_test_results, time_last_tested=time_last_tested )
containers_dict[ 'tool_test_results' ] = tool_test_results_root_folder
# Workflows container.
if metadata:
if 'workflows' in metadata:
workflows = metadata[ 'workflows' ]
folder_id, workflows_root_folder = build_workflows_folder( trans=trans,
folder_id=folder_id,
workflows=workflows,
repository_metadata_id=repository_metadata.id,
repository_id=None,
label='Workflows' )
containers_dict[ 'workflows' ] = workflows_root_folder
# Valid Data Managers container
if metadata:
if 'data_manager' in metadata:
data_managers = metadata['data_manager'].get( 'data_managers', None )
folder_id, data_managers_root_folder = build_data_managers_folder( trans, folder_id, data_managers, label="Data Managers" )
containers_dict[ 'valid_data_managers' ] = data_managers_root_folder
error_messages = metadata['data_manager'].get( 'error_messages', None )
data_managers = metadata['data_manager'].get( 'invalid_data_managers', None )
folder_id, data_managers_root_folder = build_invalid_data_managers_folder( trans, folder_id, data_managers, error_messages, label="Invalid Data Managers" )
containers_dict[ 'invalid_data_managers' ] = data_managers_root_folder
except Exception, e:
log.debug( "Exception in build_repository_containers_for_tool_shed: %s" % str( e ) )
finally:
lock.release()
return containers_dict
def build_repository_dependencies_folder( trans, folder_id, repository_dependencies, label='Repository dependencies', installed=False ):
"""Return a folder hierarchy containing repository dependencies."""
if repository_dependencies:
repository_dependency_id = 0
folder_id += 1
# Create the root folder.
repository_dependencies_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
# Create the Repository dependencies folder and add it to the root folder.
repository_dependencies_folder_key = repository_dependencies[ 'root_key' ]
repository_dependencies_folder = Folder( id=folder_id, key=repository_dependencies_folder_key, label=label, parent=repository_dependencies_root_folder )
del repository_dependencies[ 'root_key' ]
# The received repository_dependencies is a dictionary with keys: 'root_key', 'description', and one or more repository_dependency keys.
# We want the description value associated with the repository_dependencies_folder.
repository_dependencies_folder.description = repository_dependencies.get( 'description', None )
repository_dependencies_root_folder.folders.append( repository_dependencies_folder )
del repository_dependencies[ 'description' ]
repository_dependencies_folder, folder_id, repository_dependency_id = \
populate_repository_dependencies_container( trans, repository_dependencies_folder, repository_dependencies, folder_id, repository_dependency_id )
repository_dependencies_folder = prune_repository_dependencies( repository_dependencies_folder )
else:
repository_dependencies_root_folder = None
return folder_id, repository_dependencies_root_folder
def build_tools_folder( trans, folder_id, tool_dicts, repository, changeset_revision, valid=True, label='Valid tools' ):
"""Return a folder hierarchy containing valid tools."""
if tool_dicts:
tool_id = 0
folder_id += 1
tools_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
folder = Folder( id=folder_id, key='tools', label=label, parent=tools_root_folder )
if trans.webapp.name == 'galaxy':
folder.description = 'click the name to inspect the tool metadata'
tools_root_folder.folders.append( folder )
# Insert a header row.
tool_id += 1
tool = Tool( id=tool_id,
tool_config='',
tool_id='',
name='Name',
description='Description',
version='Version',
requirements='',
repository_id='',
changeset_revision='' )
folder.valid_tools.append( tool )
if repository:
repository_id = repository.id
if trans.webapp.name == 'galaxy':
repository_installation_status = repository.status
else:
repository_installation_status = None
else:
repository_id = None
repository_installation_status = None
for tool_dict in tool_dicts:
tool_id += 1
if 'requirements' in tool_dict:
requirements = tool_dict[ 'requirements' ]
requirements_str = ''
for requirement_dict in requirements:
requirements_str += '%s (%s), ' % ( requirement_dict[ 'name' ], requirement_dict[ 'type' ] )
requirements_str = requirements_str.rstrip( ', ' )
else:
requirements_str = 'none'
tool = Tool( id=tool_id,
tool_config=tool_dict[ 'tool_config' ],
tool_id=tool_dict[ 'id' ],
name=tool_dict[ 'name' ],
description=tool_dict[ 'description' ],
version=tool_dict[ 'version' ],
requirements=requirements_str,
repository_id=repository_id,
changeset_revision=changeset_revision,
repository_installation_status=repository_installation_status )
folder.valid_tools.append( tool )
else:
tools_root_folder = None
return folder_id, tools_root_folder
def build_tool_dependencies_folder( trans, folder_id, tool_dependencies, label='Tool dependencies', missing=False, new_install=False, reinstalling=False ):
"""Return a folder hierarchy containing tool dependencies."""
# When we're in Galaxy (not the tool shed) and the tool dependencies are not installed or are in an error state, they are considered missing. The tool
# dependency status will be displayed only if a record exists for the tool dependency in the Galaxy database, but the tool dependency is not installed.
# The value for new_install will be True only if the associated repository in being installed for the first time. This value is used in setting the
# container description.
if tool_dependencies:
tool_dependency_id = 0
folder_id += 1
tool_dependencies_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
folder = Folder( id=folder_id, key='tool_dependencies', label=label, parent=tool_dependencies_root_folder )
if trans.webapp.name == 'galaxy':
if new_install or reinstalling:
folder.description = "repository tools require handling of these dependencies"
elif missing and not new_install and not reinstalling:
folder.description = 'click the name to install the missing dependency'
else:
folder.description = 'click the name to browse the dependency installation directory'
tool_dependencies_root_folder.folders.append( folder )
# Insert a header row.
tool_dependency_id += 1
if trans.webapp.name == 'galaxy':
# Include the installation directory.
tool_dependency = ToolDependency( id=tool_dependency_id,
name='Name',
version='Version',
type='Type',
install_dir='Install directory',
readme=None,
installation_status='Installation status',
repository_id=None,
tool_dependency_id=None,
is_orphan=None )
else:
tool_dependency = ToolDependency( id=tool_dependency_id,
name='Name',
version='Version',
type='Type',
install_dir=None,
readme=None,
installation_status=None,
repository_id=None,
tool_dependency_id=None,
is_orphan='Orphan' )
folder.tool_dependencies.append( tool_dependency )
is_orphan_description = "these dependencies may not be required by tools in this repository"
for dependency_key, requirements_dict in tool_dependencies.items():
tool_dependency_id += 1
if dependency_key in [ 'set_environment' ]:
for set_environment_dict in requirements_dict:
if trans.webapp.name == 'tool_shed':
is_orphan = set_environment_dict.get( 'is_orphan', False )
else:
# TODO: handle this is Galaxy
is_orphan = False
if is_orphan:
folder.description = is_orphan_description
name = set_environment_dict.get( 'name', None )
type = set_environment_dict[ 'type' ]
repository_id = set_environment_dict.get( 'repository_id', None )
td_id = set_environment_dict.get( 'tool_dependency_id', None )
if trans.webapp.name == 'galaxy':
installation_status = set_environment_dict.get( 'status', 'Never installed' )
else:
installation_status = None
tool_dependency = ToolDependency( id=tool_dependency_id,
name=name,
version=None,
type=type,
install_dir=None,
readme=None,
installation_status=installation_status,
repository_id=repository_id,
tool_dependency_id=td_id,
is_orphan=is_orphan )
folder.tool_dependencies.append( tool_dependency )
else:
if trans.webapp.name == 'tool_shed':
is_orphan = requirements_dict.get( 'is_orphan', False )
else:
# TODO: handle this is Galaxy
is_orphan = False
if is_orphan:
folder.description = is_orphan_description
name = requirements_dict[ 'name' ]
version = requirements_dict[ 'version' ]
type = requirements_dict[ 'type' ]
install_dir = requirements_dict.get( 'install_dir', None )
repository_id = requirements_dict.get( 'repository_id', None )
td_id = requirements_dict.get( 'tool_dependency_id', None )
if trans.webapp.name == 'galaxy':
installation_status = requirements_dict.get( 'status', 'Never installed' )
else:
installation_status = None
tool_dependency = ToolDependency( id=tool_dependency_id,
name=name,
version=version,
type=type,
install_dir=install_dir,
readme=None,
installation_status=installation_status,
repository_id=repository_id,
tool_dependency_id=td_id,
is_orphan=is_orphan )
folder.tool_dependencies.append( tool_dependency )
else:
tool_dependencies_root_folder = None
return folder_id, tool_dependencies_root_folder
def build_tool_test_results_folder( trans, folder_id, tool_test_results_dict, label='Tool test results', time_last_tested=None ):
"""Return a folder hierarchy containing tool dependencies."""
# This container is displayed only in the tool shed.
if tool_test_results_dict:
folder_id += 1
tool_test_results_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
if test_environment_dict:
folder_id += 1
test_results_folder = Folder( id=folder_id, key='test_results', label=label, parent=tool_test_results_root_folder )
tool_test_results_root_folder.folders.append( test_results_folder )
folder_id += 1
folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=test_results_folder )
test_results_folder.folders.append( folder )
test_environment = TestEnvironment( id=1,
architecture=test_environment_dict.get( 'architecture', '' ),
galaxy_database_version=test_environment_dict.get( 'galaxy_database_version', '' ),
galaxy_revision=test_environment_dict.get( 'galaxy_revision', '' ),
python_version=test_environment_dict.get( 'python_version', '' ),
system=test_environment_dict.get( 'system', '' ),
time_last_tested=time_last_tested,
tool_shed_database_version=test_environment_dict.get( 'tool_shed_database_version', '' ),
tool_shed_mercurial_version=test_environment_dict.get( 'tool_shed_mercurial_version', '' ),
tool_shed_revision=test_environment_dict.get( 'tool_shed_revision', '' ) )
folder.test_environments.append( test_environment )
not_tested_dict = tool_test_results_dict.get( 'not_tested', {} )
if not_tested_dict:
folder_id += 1
folder = Folder( id=folder_id, key='not_tested', label='Not tested', parent=test_results_folder )
test_results_folder.folders.append( folder )
not_tested_id = 0
not_tested = NotTested( id=not_tested_id,
reason=not_tested_dict.get( 'reason', '' ) )
folder.not_tested.append( not_tested )
passed_tests_dicts = tool_test_results_dict.get( 'passed_tests', [] )
if passed_tests_dicts:
folder_id += 1
folder = Folder( id=folder_id, key='passed_tests', label='Tests that passed successfully', parent=test_results_folder )
test_results_folder.folders.append( folder )
passed_test_id = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.