hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acef00d7c918772e98db37d5facd1d1e4103f36c | 191 | py | Python | __main__.py | chen-charles/sysbd | c30fc85f8f9ef3e8a8a5474001abfd8ad5fb9e4a | [
"MIT"
] | null | null | null | __main__.py | chen-charles/sysbd | c30fc85f8f9ef3e8a8a5474001abfd8ad5fb9e4a | [
"MIT"
] | null | null | null | __main__.py | chen-charles/sysbd | c30fc85f8f9ef3e8a8a5474001abfd8ad5fb9e4a | [
"MIT"
] | null | null | null | import pkg_resources
DEFAULTPYBUILD = pkg_resources.resource_string(__name__, 'pybuild.default')
exec(pkg_resources.resource_string(__name__, 'build.py'), {"DEFAULTPYBUILD": DEFAULTPYBUILD})
| 47.75 | 93 | 0.827225 |
acef017112f8171b15d7aae6b3da959b15f2d42c | 83,273 | py | Python | test/test_sparse_csr.py | nponte/pytorch | 72f7193f4d977c8dd4fc0b99bd06a26b81e7d6cd | [
"Intel"
] | null | null | null | test/test_sparse_csr.py | nponte/pytorch | 72f7193f4d977c8dd4fc0b99bd06a26b81e7d6cd | [
"Intel"
] | null | null | null | test/test_sparse_csr.py | nponte/pytorch | 72f7193f4d977c8dd4fc0b99bd06a26b81e7d6cd | [
"Intel"
] | null | null | null | # Owner(s): ["module: sparse"]
import torch
import random
import itertools
import unittest
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC
from torch.testing._internal.common_utils import \
(TEST_WITH_ROCM, TEST_SCIPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize)
from torch.testing._internal.common_device_type import \
(ops, instantiate_device_type_tests, dtypes, OpDTypes, dtypesIfCUDA, onlyCPU, onlyCUDA, skipCUDAIfNoCusparseGeneric,
precisionOverride, skipMeta, skipCUDAIf, skipCUDAIfRocm, skipCPUIfNoMklSparse)
from torch.testing._internal.common_methods_invocations import \
(op_db, sparse_csr_unary_ufuncs, )
from torch.testing._internal.common_cuda import _get_torch_cuda_version, CUDA11OrLater
from torch.testing._internal.common_dtype import (
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
all_types_and_complex, floating_and_complex_types_and
)
from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED
if TEST_SCIPY:
import scipy.sparse as sp
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
def _check_cusparse_triangular_solve_available():
version = _get_torch_cuda_version()
# cusparseSpSM was added in 11.3.1 but we don't have access to patch version
min_supported_version = (11, 4)
return version >= min_supported_version
def _check_cusparse_spgemm_available():
# cusparseSpGEMM was added in 11.0
version = _get_torch_cuda_version()
min_supported_version = (11, 0)
return version >= min_supported_version
def _check_cusparse_sddmm_available():
version = _get_torch_cuda_version()
# cusparseSDDMM was added in 11.2.1 but we don't have access to patch version
min_supported_version = (11, 3)
return version >= min_supported_version
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
binary_functions_with_dense_output = ['mm', 'mv', ]
binary_ops_with_dense_output = list(filter(lambda op: op.name in binary_functions_with_dense_output, op_db))
# This should be just an import from test_linalg instead of code duplication
# but https://github.com/pytorch/pytorch/pull/63511#discussion_r733989701
def _test_addmm_addmv(
test_case,
f,
t,
m,
v,
*,
alpha=None,
beta=None,
transpose_out=False,
layout=torch.strided,
mode=None
):
"""
Unified test for checking `f(t, m, v, alpha=alpha, beta=beta)` computation,
where f is `torch.addmv` or `torch.addmm`.
`transpose_out` controls whether the out argument is in column-major order.
`layout` controls whether `m` is converted to specified layout or not.
Custom behaviour is implemented only for torch.sparse_csr layout.
"""
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
def convert_layout(mat):
if layout == torch.sparse_csr:
return mat.to_sparse_csr()
else:
assert mat.layout == layout
return mat
if mode == "all_sparse":
res1 = f(*map(convert_layout, (t, m, v)), alpha=alpha, beta=beta)
res1 = res1.to_dense()
elif mode == "dense_result":
res1 = f(t, convert_layout(m), convert_layout(v), alpha=alpha, beta=beta)
else:
res1 = f(t, convert_layout(m), v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, float('nan'))
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, convert_layout(m), v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
res3 = torch.from_numpy(res3).to(dtype)
test_case.assertEqual(res1, res2)
test_case.assertEqual(res1, res3)
class TestSparseCSRSampler(TestCase):
def test_make_crow_indices(self):
# Here we test the correctness of the crow_indices algorithm
# and testing it on CPU and with int32 dtype will be
# sufficient.
device = torch.device('cpu')
index_dtype = torch.int32
for n_rows in range(1, 10):
for n_cols in range(1, 10):
for nnz in range(0, n_rows * n_cols + 1):
crow_indices = self._make_crow_indices(
n_rows, n_cols, nnz,
device=device, dtype=index_dtype)
self.assertEqual(len(crow_indices), n_rows + 1)
counts = crow_indices[1:] - crow_indices[:-1]
self.assertEqual(counts.sum(), nnz)
self.assertGreaterEqual(counts.min(), 0)
self.assertLessEqual(counts.max(), n_cols)
class TestSparseCSR(TestCase):
@onlyCPU
def test_csr_layout(self):
self.assertEqual(str(torch.sparse_csr), 'torch.sparse_csr')
self.assertEqual(type(torch.sparse_csr), torch.layout)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_constructor_shape_inference(self, device, dtype):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
sparse = torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
torch.tensor(col_indices, dtype=torch.int64),
torch.tensor(values), dtype=dtype, device=device)
self.assertEqual(torch.tensor(crow_indices, dtype=torch.int64), sparse.crow_indices())
self.assertEqual((len(crow_indices) - 1, max(col_indices) + 1), sparse.shape)
self.assertEqual(dtype, sparse.dtype)
self.assertEqual(torch.device(device), sparse.device)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_constructor(self, device, dtype):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
for index_dtype in [torch.int32, torch.int64]:
sparse = torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=index_dtype),
torch.tensor(col_indices, dtype=index_dtype),
torch.tensor(values),
size=(2, 10),
dtype=dtype,
device=device)
self.assertEqual((2, 10), sparse.shape)
self.assertEqual(torch.tensor(crow_indices, dtype=index_dtype), sparse.crow_indices())
self.assertEqual(torch.tensor(col_indices, dtype=index_dtype), sparse.col_indices())
self.assertEqual(torch.tensor(values, dtype=dtype), sparse.values())
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_sparse_csr_constructor_from_lists(self, device, dtype):
# without size
sparse = torch.sparse_csr_tensor([0, 2, 4],
[0, 1, 0, 1],
[1, 2, 3, 4],
dtype=dtype,
device=device)
self.assertEqual((2, 2), sparse.shape)
self.assertEqual(4, sparse.numel())
self.assertEqual(torch.tensor([0, 2, 4], dtype=torch.int64, device=device), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0, 1], dtype=torch.int64, device=device), sparse.col_indices())
self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values())
# with size
for sparse_csr_tensor in [torch.sparse_csr_tensor, torch._sparse_csr_tensor_unsafe]:
sparse = sparse_csr_tensor([0, 2, 4],
[0, 1, 0, 1],
[1, 2, 3, 4],
size=(2, 10),
dtype=dtype,
device=device)
self.assertEqual((2, 10), sparse.shape)
self.assertEqual(torch.tensor([0, 2, 4], dtype=torch.int64, device=device), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0, 1], dtype=torch.int64, device=device), sparse.col_indices())
self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values())
@skipMeta
@dtypes(*all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half))
def test_empty(self, device, dtype):
ns = [5, 2, 0]
for shape in itertools.product(ns, ns):
result = torch.empty(shape, dtype=dtype, device=device, layout=torch.sparse_csr)
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, dtype)
self.assertEqual(result.device, torch.device(device))
self.assertEqual(result.layout, torch.sparse_csr)
self.assertEqual(result.crow_indices().shape, (shape[0] + 1,))
self.assertEqual(result.col_indices().shape, (0,))
self.assertEqual(result.values().shape, (0,))
self.assertEqual(result._nnz(), 0)
self.assertEqual(result.crow_indices().device, torch.device(device))
self.assertEqual(result.col_indices().device, torch.device(device))
self.assertEqual(result.values().device, torch.device(device))
self.assertEqual(result.crow_indices().dtype, torch.int64)
self.assertEqual(result.col_indices().dtype, torch.int64)
self.assertEqual(result.values().dtype, dtype)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
def test_empty_errors(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, "torch.empty: Only 2D sparse CSR tensors are supported."):
torch.empty((5,), dtype=dtype, device=device, layout=torch.sparse_csr)
with self.assertRaisesRegex(RuntimeError, "torch.empty: Only 2D sparse CSR tensors are supported."):
torch.empty((2, 3, 4), dtype=dtype, device=device, layout=torch.sparse_csr)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16))
def test_clone(self, device, dtype):
x = torch.sparse_csr_tensor([0, 2, 4],
[0, 1, 0, 1],
[1, 2, 3, 4],
dtype=dtype,
device=device)
y = x.clone()
self.assertEqual(x, y)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy(self, device, dtype):
def run_test(shape, nnz, index_type):
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a.copy_(b)
self.assertEqual(a, b)
ns = [5, 2, 0]
for shape, index_dtype in zip(itertools.product(ns, ns), [torch.int32, torch.int64]):
run_test(shape, 0, index_dtype)
run_test(shape, shape[0] * shape[1], index_dtype)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_copy_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape1 = (2, 3)
shape2 = (3, 2)
a = self.genSparseCSRTensor(shape1, 0, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor(shape2, 0, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "only same size tensors are supported."):
a.copy_(b)
with self.assertRaisesRegex(RuntimeError, "copy between different layouts is not supported."):
a.copy_(torch.empty(a.shape, dtype=dtype, device=device))
b = self.genSparseCSRTensor(shape1, 1, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "only tensors with the same number of specified elements are supported."):
a.copy_(b)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape = (2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
new_shape = (4, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to larger shape doesn't add specified elements
self.assertEqual(a._nnz(), nnz)
new_shape = (1, 5)
a.resize_(new_shape)
self.assertEqual(a.shape, new_shape)
# resize to smaller shape trims specified elements
self.assertEqual(a._nnz(), 5)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_resize_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
shape = (2, 3)
nnz = 6
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "torch.resize_: Only 2D sparse CSR tensors are supported."):
new_shape = (4,)
a.resize_(new_shape)
# resizing of columns to smaller size is not implemented
with self.assertRaisesRegex(
RuntimeError,
"torch.resize_: Resizing columns of sparse CSR tensors to a smaller value is not supported.",
):
new_shape = (2, 2)
a.resize_(new_shape)
def test_factory_type_invariants_check(self, device):
with self.assertRaisesRegex(RuntimeError, "both crow_indices and col_indices should have the same type."):
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=torch.int64),
torch.tensor([0, 1, 0, 1], dtype=torch.int32),
torch.tensor([1, 2, 3, 4]),
device=device)
with self.assertRaisesRegex(RuntimeError, r"\"csr_construct_check\" not implemented for 'Short'"):
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=torch.int16),
torch.tensor([0, 1, 0, 1], dtype=torch.int16),
torch.tensor([1, 2, 3, 4]),
device=device)
def test_factory_layout_invariants_check(self, device):
with self.assertRaisesRegex(RuntimeError, "expected values to be a strided and contiguous tensor"):
values = torch.tensor([1.], device=device).expand(4,)
torch.sparse_csr_tensor(torch.tensor([0, 2, 4], device=device),
torch.tensor([0, 1, 0, 1], device=device),
values)
with self.assertRaisesRegex(RuntimeError, "expected col_indices to be a strided and contiguous tensor"):
col_indices = torch.tensor([0], device=device).expand(4,)
torch.sparse_csr_tensor(torch.tensor([0, 2, 4]),
col_indices,
torch.tensor([1, 2, 3, 4]))
with self.assertRaisesRegex(RuntimeError, "expected crow_indices to be a strided and contiguous tensor"):
crow_indices = torch.arange(6, device=device)
torch.sparse_csr_tensor(crow_indices[::2],
torch.tensor([0, 1, 0, 1], device=device),
torch.tensor([1, 2, 3, 4]))
def test_factory_shape_invariants_check(self, device):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
size = (2, 10)
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"size of a CSR tensor must be of length 2, but got: 3"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values),
size=(2, 10, 2),
device=device)
with self.assertRaisesRegex(RuntimeError, r"crow_indices must have dim\=1 but got crow_indices\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices).repeat(2, 1),
torch.tensor(col_indices),
torch.tensor(values),
size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"col_indices must have dim\=1 but got col_indices\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices),
torch.tensor(col_indices).repeat(2, 1),
torch.tensor(values),
size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"values must have dim\=1 but got values\.dim\(\)\=2"):
torch.sparse_csr_tensor(torch.tensor(crow_indices),
torch.tensor(col_indices),
torch.tensor(values).repeat(2, 1),
size,
device=device)
with self.assertRaisesRegex(RuntimeError,
r"crow_indices\.numel\(\) must be size\(0\) \+ 1, but got: 3"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor(col_indices), torch.tensor(values), (1, 1),
device=device)
with self.assertRaisesRegex(RuntimeError,
r"col_indices and values must have equal sizes, " +
r"but got col_indices\.numel\(\): 3, values\.numel\(\): 4"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor([0, 1, 0]), torch.tensor(values), size,
device=device)
def test_factory_indices_invariants_check(self, device):
crow_indices = [0, 2, 4]
col_indices = [0, 1, 0, 1]
values = [1, 2, 3, 4]
size = (2, 10)
with self.assertRaisesRegex(RuntimeError, "0th value of crow_indices must be 0."):
torch.sparse_csr_tensor(torch.tensor([-1, 0, 4]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError,
"last value of crow_indices should be equal to the length of col_indices."):
torch.sparse_csr_tensor(torch.tensor([0, 2, 5]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError,
r"at position i \= 2," +
r" this condition crow_indices\[i - 1\] <\= crow_indices\[i\] fails"):
torch.sparse_csr_tensor(torch.tensor([0, 5, 4]), torch.tensor(col_indices), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"col_indices\.min\(\) should be greater or equal to zero"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor([0, -1, 0, 1]), torch.tensor(values), size,
device=device)
with self.assertRaisesRegex(RuntimeError, r"size\(1\) should be greater than col_indices\.max\(\)"):
torch.sparse_csr_tensor(torch.tensor(crow_indices), torch.tensor([0, 11, 0, 1]), torch.tensor(values), size,
device=device)
@onlyCUDA
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_factory_device_type_inference(self, device, dtype):
cpu_cuda = ('cpu', 'cuda')
cpu_cuda_none = cpu_cuda + (None,)
for crow_indices_device, col_indices_device, values_device, device in itertools.product(cpu_cuda,
cpu_cuda,
cpu_cuda,
cpu_cuda_none):
for index_dtype in [torch.int32, torch.int64]:
crow_indices = torch.tensor([0, 2, 4], dtype=index_dtype, device=crow_indices_device)
col_indices = torch.tensor([0, 1, 0, 1], dtype=index_dtype, device=col_indices_device)
values = torch.tensor([1, 2, 3, 4], dtype=dtype, device=values_device)
if device is None and (crow_indices_device != col_indices_device or
crow_indices_device != values_device):
with self.assertRaises(RuntimeError):
torch.sparse_csr_tensor(crow_indices,
col_indices,
values,
size=(2, 10),
device=device)
else:
t = torch.sparse_csr_tensor(crow_indices,
col_indices,
values,
size=(2, 10),
device=device)
should_be_cuda = (device == 'cuda' or (device is None and values_device == 'cuda'))
self.assertEqual(should_be_cuda, t.is_cuda)
t.crow_indices().dtype == index_dtype
t.col_indices().dtype == index_dtype
t.values().dtype == dtype
t.crow_indices().device == t.values().device
t.col_indices().device == t.values().device
def test_sparse_csr_print(self, device):
orig_maxDiff = self.maxDiff
self.maxDiff = None
shape_nnz = [
((10, 10), 10),
((100, 10), 10),
((1000, 10), 10)
]
printed = []
for shape, nnz in shape_nnz:
values_shape = torch.Size((nnz,))
col_indices_shape = torch.Size((nnz,))
crow_indices_shape = torch.Size((shape[0] + 1,))
printed.append("# shape: {}".format(torch.Size(shape)))
printed.append("# nnz: {}".format(nnz))
printed.append("# crow_indices shape: {}".format(crow_indices_shape))
printed.append("# col_indices shape: {}".format(col_indices_shape))
printed.append("# values_shape: {}".format(values_shape))
for index_dtype in [torch.int32, torch.int64]:
for dtype in floating_types():
printed.append("########## {}/{} ##########".format(dtype, index_dtype))
x = torch.sparse_csr_tensor(torch.tensor([0, 2, 4], dtype=index_dtype),
torch.tensor([0, 1, 0, 1], dtype=index_dtype),
torch.tensor([1, 2, 3, 4]), dtype=dtype, device=device)
printed.append("# sparse tensor")
printed.append(str(x))
printed.append("# _crow_indices")
printed.append(str(x.crow_indices()))
printed.append("# _col_indices")
printed.append(str(x.col_indices()))
printed.append("# _values")
printed.append(str(x.values()))
printed.append('')
printed.append('')
self.assertExpected('\n'.join(printed))
self.maxDiff = orig_maxDiff
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_from_dense(self, device, dtype):
dense = torch.tensor([[4, 5, 0], [0, 0, 0], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 2, 2, 3], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([4, 5, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[0, 0, 0], [0, 0, 1], [1, 0, 0]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 0, 1, 2], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([2, 0], dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([1, 1], dtype=dtype), sparse.values())
dense = torch.tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(torch.tensor([0, 3, 6, 9], dtype=torch.int64), sparse.crow_indices())
self.assertEqual(torch.tensor([0, 1, 2] * 3, dtype=torch.int64), sparse.col_indices())
self.assertEqual(torch.tensor([2] * 9, dtype=dtype), sparse.values())
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_to_dense(self, device, dtype):
mn = [5, 2, 0]
for (m, n) in itertools.product(mn, mn):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
sparse = dense.to_sparse_csr()
self.assertEqual(sparse.to_dense(), dense)
crow_indices = torch.tensor([0, 3, 5])
col_indices = torch.tensor([0, 1, 2, 0, 1])
values = torch.tensor([1, 2, 1, 3, 4], dtype=dtype)
csr = torch.sparse_csr_tensor(crow_indices, col_indices,
values, dtype=dtype, device=device)
dense = torch.tensor([[1, 2, 1], [3, 4, 0]], dtype=dtype, device=device)
self.assertEqual(csr.to_dense(), dense)
@skipCPUIfNoMklSparse
@coalescedonoff
@dtypes(torch.double)
def test_coo_to_csr_convert(self, device, dtype, coalesced):
with self.assertRaisesRegex(RuntimeError, "Input is supposed to be a vector"):
torch._convert_indices_from_coo_to_csr(
torch.randint(100, (5, 5), device=device),
size=100)
size = (5, 5)
sparse_dim = 2
nnz = 10
sparse_coo, _, _ = self.genSparseTensor(size, sparse_dim, nnz, coalesced, device, dtype)
sparse_csr = sparse_coo.to_sparse_csr()
self.assertTrue(sparse_csr.is_sparse_csr)
self.assertEqual(sparse_csr.to_dense(), sparse_coo.to_dense())
vec = torch.randn((5, 1), dtype=dtype, device=device)
coo_product = sparse_coo.matmul(vec)
csr_product = sparse_csr.matmul(vec)
self.assertEqual(coo_product, csr_product)
vec = torch.randn((100, 1), dtype=dtype, device=device)
index = torch.tensor([
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
], dtype=torch.int32)
values = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype, device=device)
coo = torch.sparse_coo_tensor(index, values, torch.Size([100, 100]), dtype=dtype, device=device)
csr = coo.to_sparse_csr()
self.assertEqual(coo.matmul(vec), csr.matmul(vec))
col_indices = torch.tensor([
31, 92, 65, 50, 34, 62, 22, 56, 74, 89
], dtype=torch.int64, device=device)
self.assertEqual(csr.col_indices(), col_indices)
values = torch.tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7], dtype=dtype, device=device)
self.assertEqual(csr.values(), values)
@parametrize("blocksize", [2, 4])
@parametrize("shape", [(24, 24), (12, 24)])
@dtypes((torch.double, torch.int32), (torch.double, torch.int64))
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@skipMeta
def test_csr_to_block_csr(self, device, dtypes, shape, blocksize):
dtype, index_dtype = dtypes
m, k = shape
nnz = random.randint(0, m * k)
t = self.genSparseCSRTensor((m * blocksize, k * blocksize), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
st = sp.csr_matrix((t.values().cpu(), t.col_indices().cpu(), t.crow_indices().cpu()), shape=tuple(t.size()))
block_t = torch.sparse._csr_to_block_csr(t, (blocksize, blocksize))
self.assertEqual(block_t.values().dim(), 3)
block_st = st.tobsr(blocksize=(blocksize, blocksize))
self.assertEqual(block_t.values().cpu(), block_st.data)
self.assertEqual(block_t.col_indices().cpu(), torch.tensor(block_st.indices).to(index_dtype))
self.assertEqual(block_t.crow_indices().cpu(), torch.tensor(block_st.indptr).to(index_dtype))
@dtypes(torch.double)
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
def test_csr_to_block_csr_errors(self, device, dtype):
for index_dtype in [torch.int32, torch.int64]:
nnz = 15
t = self.genSparseCSRTensor((16, 16), nnz, dtype=dtype,
device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "must be square."):
block_t = torch.sparse._csr_to_block_csr(t, (2, 3))
with self.assertRaisesRegex(RuntimeError, r"size \(16, 16\) with block size \(5, 5\)"):
block_t = torch.sparse._csr_to_block_csr(t, (5, 5))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sparse_csr_from_dense_convert_error(self, device, dtype):
size = (4, 2, 4)
dense = make_tensor(size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Only 2D"):
sparse = dense.to_sparse_csr()
# TODO: Support auto generation of device check for sparse tensors
# See: https://github.com/pytorch/pytorch/issues/59058
@onlyCUDA
@dtypes(torch.double)
def test_matmul_device_mismatch(self, device, dtype):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
for s, m1, m2 in itertools.product((cpu, cuda), repeat=3):
csr = m1.to_sparse()
if s.device == csr.device == m2.device:
torch.addmm(s, csr, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, csr, m2)
@skipCPUIfNoMklSparse
@skipCUDAIfNoCusparseGeneric
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater else [],
*[torch.bfloat16] if SM80OrLater else []))
def test_csr_matvec(self, device, dtype):
side = 100
for index_dtype in [torch.int32, torch.int64]:
csr = self.genSparseCSRTensor((side, side), 1000, device=device, dtype=dtype, index_dtype=index_dtype)
vec = torch.randn(side, dtype=dtype, device=device)
res = csr.matmul(vec)
expected = csr.to_dense().matmul(vec)
self.assertEqual(res, expected)
bad_vec = torch.randn(side + 10, dtype=dtype, device=device)
err_msg = "size mismatch, got"
with self.assertRaisesRegex(RuntimeError, err_msg):
csr.matmul(bad_vec)
@onlyCUDA
@unittest.skipIf(not CUDA11OrLater, "Only CUDA 11+ is supported")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_baddbmm(self, device, dtype):
def run_test(c, a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
torch.baddbmm(c, a_batched, b, alpha=alpha, beta=beta, out=out)
expected = [torch.addmm(c[i], a, b[i], alpha=alpha, beta=beta) for i in range(c.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([1, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch dimension in the shape
a_batched = torch._sparse_csr_tensor_unsafe(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k))
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((batch_size, m, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(c, a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
@onlyCUDA
@unittest.skipIf(not CUDA11OrLater, "Only CUDA 11+ is supported")
@skipCUDAIfNoCusparseGeneric
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_bmm(self, device, dtype):
def run_test(a, a_batched, b, op_b=False, op_out=False, *, dtype=None, device=None):
b = b.mH if (op_b and a.shape == b.shape) else b
actual = torch.bmm(a_batched, b)
out = torch.empty_like(actual.mH if op_out and a.shape == b.shape else actual)
torch.bmm(a_batched, b, out=out)
expected = [torch.mm(a, b[i]) for i in range(b.shape[0])]
expected = torch.stack(expected, 0)
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), batch_size, noncontiguous in zip(itertools.product([1, 5], repeat=3), [1, 3], [True, False]):
nnz = random.randint(0, m * k)
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
# a_batched is a regular CSR tensor but with a batch dimension in the shape
a_batched = torch._sparse_csr_tensor_unsafe(
a.crow_indices(), a.col_indices(), a.values(), (batch_size, m, k))
b = make_tensor((batch_size, k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
run_test(a, a_batched, b, op_b, op_out, dtype=dtype, device=device)
def run_test_block_addmm_addmv(self, addmv_addmm, c, a, b, op_b=False, op_out=False, *, dtype=None, device=None):
alpha = complex(random.random(), random.random()) if dtype.is_complex else random.random()
beta = complex(random.random(), random.random()) if dtype.is_complex else random.random()
b = b.mH if (op_b and a.shape == b.shape) else b
actual = addmv_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.empty_like(c.mH if op_out and a.shape == b.shape else c)
addmv_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
a_bsr = sp.bsr_matrix(
(
a.values().cpu().numpy(),
a.col_indices().cpu().numpy(),
a.crow_indices().cpu().numpy(),
),
shape=a.shape,
)
expected = alpha * (a_bsr * b.cpu().resolve_conj().numpy()) + beta * c.cpu().numpy()
self.assertEqual(actual, out)
self.assertEqual(actual, expected)
@parametrize("block_size", [1, 2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_addmm(self, device, dtype, index_dtype, block_size):
for (m, n, k), noncontiguous in zip(itertools.product([1, 5], repeat=3), [True, False]):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = torch.sparse._csr_to_block_csr(a, (block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch._sparse_csr_tensor_unsafe(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size))
b = make_tensor((k * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size, n * block_size), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_b, op_out in itertools.product([True, False], repeat=2):
self.run_test_block_addmm_addmv(torch.addmm, c, a, b, op_b, op_out, dtype=dtype, device=device)
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@skipCPUIfNoMklSparse
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_addmv(self, device, dtype, index_dtype, block_size):
# TODO: Explicitly disable block size 1 support
# if (TEST_WITH_ROCM or not TEST_CUSPARSE_GENERIC) and block_size == 1:
# return
for (m, k), noncontiguous in zip(itertools.product([1, 5], repeat=2), [True, False]):
nnz = random.randint(0, m * k)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, k * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = torch.sparse._csr_to_block_csr(a, (block_size, block_size))
else:
a = self.genSparseCSRTensor((m, k), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch._sparse_csr_tensor_unsafe(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, k * block_size))
b = make_tensor((k * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
c = make_tensor((m * block_size,), dtype=dtype, device=device, noncontiguous=noncontiguous)
self.run_test_block_addmm_addmv(torch.addmv, c, a, b, dtype=dtype, device=device)
@parametrize("block_size", [2, 3])
@parametrize("index_dtype", [torch.int32, torch.int64])
@skipCPUIfNoMklSparse
@skipCUDAIfRocm
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_block_triangular_solve(self, device, dtype, index_dtype, block_size):
def run_test(a, b, upper, transpose, unitriangular, op_out):
actual = torch.triangular_solve(b, a, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if a._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
# TODO: replace with torch method when implemented to_dense() on block sparse tensor
a_bsr = sp.bsr_matrix(
(
a.values().cpu().numpy(),
a.col_indices().cpu().numpy(),
a.crow_indices().cpu().numpy(),
),
shape=a.shape,
)
expected_X, _ = torch.triangular_solve(
b,
torch.tensor(a_bsr.todense(), device=device),
transpose=transpose,
upper=upper,
unitriangular=unitriangular)
self.assertEqual(actual_X, expected_X)
out = torch.empty_like(b.mH if op_out and a.shape == b.shape else b)
torch.triangular_solve(
b, a,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, actual_X)
self.assertEqual(out, expected_X)
for (m, k), noncontiguous in zip(itertools.product([1, 5], repeat=2), [True, False]):
nnz = random.randint(0, m * m)
if not noncontiguous:
a = self.genSparseCSRTensor((m * block_size, m * block_size), nnz,
dtype=dtype, device=device, index_dtype=index_dtype)
a = torch.sparse._csr_to_block_csr(a, (block_size, block_size))
else:
a = self.genSparseCSRTensor((m, m), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a_data = make_tensor((nnz, block_size, block_size), dtype=dtype, device=device)
a_data = a_data.mT if noncontiguous else a_data # Test column-major blocks
a = torch._sparse_csr_tensor_unsafe(a.crow_indices(), a.col_indices(),
a_data, (m * block_size, m * block_size))
b = make_tensor((m * block_size, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
for (upper, unitriangular, transpose, op_out) in itertools.product([True, False], repeat=4):
run_test(a, b, upper, unitriangular, transpose, op_out)
@skipCPUIfNoMklSparse
@dtypes(torch.double)
def test_mm(self, device, dtype):
def test_shape(di, dj, dk, nnz):
for index_dtype in [torch.int32, torch.int64]:
x = self.genSparseCSRTensor((di, dj), nnz, device=device, dtype=dtype, index_dtype=index_dtype)
t = torch.randn(di, dk, dtype=dtype, device=device)
y = torch.randn(dj, dk, dtype=dtype, device=device)
alpha = random.random()
beta = random.random()
# res = beta * t + alpha * (x @ y)
res = torch.addmm(t, x, y, beta=beta, alpha=alpha)
expected = torch.addmm(t, x.to_dense(), y, beta=beta, alpha=alpha)
self.assertEqual(res, expected)
res = torch.addmm(t, x, y)
expected = torch.addmm(t, x.to_dense(), y)
self.assertEqual(res, expected)
res = torch.mm(x, y)
expected = torch.mm(x.to_dense(), y)
self.assertEqual(res, expected)
for i in range(2, 5):
for j in range(2, 8):
for k in range(2, 8):
test_shape(i, j, k, i * j // 2)
test_shape(4, 4, 4, 0)
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_mm(self, device, dtype):
def test_shape(d1, d2, d3, nnz, transposed, index_dtype):
if transposed:
D = torch.randn(d3, d2, dtype=dtype, device=device).t_()
else:
D = torch.randn(d2, d3, dtype=dtype, device=device)
S = self.genSparseCSRTensor((d1, d2), nnz, device=device, dtype=dtype, index_dtype=index_dtype)
S_dense = S.to_dense()
self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype)
test_shape(7, 8, 9, 20, True, index_dtype)
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_and_complex_types_and(
*[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [],
*[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else []))
@precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2})
def test_sparse_addmm(self, device, dtype):
def test_shape(m, n, p, nnz, broadcast, index_dtype, alpha_beta=None):
if alpha_beta is None:
alpha = random.random()
beta = random.random()
else:
alpha, beta = alpha_beta
if broadcast:
D1 = make_tensor((), dtype=dtype, device=device)
else:
D1 = make_tensor([n, p], dtype=dtype, device=device)
D2 = make_tensor([m, p], dtype=dtype, device=device)
S = self.genSparseCSRTensor([n, m], nnz, dtype=dtype, device=device, index_dtype=index_dtype)
S_dense = S.to_dense()
Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
self.assertEqual(Y, Y_dense)
for index_dtype in [torch.int32, torch.int64]:
test_shape(7, 8, 9, 20, False, index_dtype, None)
test_shape(7, 8, 9, 20, True, index_dtype, None)
test_shape(7, 8, 9, 20, False, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 0))
test_shape(7, 8, 9, 20, False, index_dtype, (1, 1))
test_shape(7, 8, 9, 20, True, index_dtype, (1, 1))
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@skipCUDAIf(
not _check_cusparse_spgemm_available(),
"cuSparse Generic API SpGEMM is not available"
)
def test_addmm_all_sparse_csr(self, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=torch.sparse_csr, mode="all_sparse")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=torch.sparse_csr, mode="all_sparse")
@onlyCPU
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
def test_addmm_dense_result(self, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="dense_result")
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="dense_result")
# Test beta=0, M=nan
M = torch.full((10, 25), float('nan'), device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, beta=0, layout=torch.sparse_csr, mode="dense_result")
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
_test_addmm_addmv(self, torch.addmm, M, m1, m2, transpose_out=t4, layout=torch.sparse_csr, mode="dense_result")
@skipCPUIfNoMklSparse
@dtypes(*floating_and_complex_types())
@dtypesIfCUDA(*floating_types_and(torch.complex64,
*[torch.bfloat16] if SM80OrLater else [],
*[torch.half] if SM53OrLater else [],
*[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else []))
@skipCUDAIf(
not _check_cusparse_spgemm_available(),
"cuSparse Generic API SpGEMM is not available"
)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
def test_addmm_sizes_all_sparse_csr(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
_test_addmm_addmv(self, torch.addmm, M, m1, m2, layout=torch.sparse_csr, mode="all_sparse")
M = torch.randn(n, m, device=device).to(dtype).to_sparse_csr()
m1 = torch.randn(n, k + 1, device=device).to(dtype).to_sparse_csr()
m2 = torch.randn(k, m, device=device).to(dtype).to_sparse_csr()
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a)
else:
return torch.addmm(a, a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a, a_sparse, a.unsqueeze(0))
else:
return torch.addmm(a, a, a.unsqueeze(0))
def test3(*, is_sparse):
# the first input needs to be 1D or 2D
a = make_tensor((3, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.addmm(a.unsqueeze(0), a_sparse, a)
else:
return torch.addmm(a.unsqueeze(0), a, a)
for test in (test1, test2, test3):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@skipCPUIfNoMklSparse
@dtypes(torch.float)
def test_mm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse versions
import re
def test1(*, is_sparse):
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a)
else:
return torch.mm(a, a)
def test2(*, is_sparse):
# mat2 must be a matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
if is_sparse:
a_sparse = a.to_sparse_csr()
return torch.mm(a_sparse, a.unsqueeze(0))
else:
return torch.mm(a, a.unsqueeze(0))
for test in (test1, test2):
try:
test(is_sparse=False)
except RuntimeError as msg:
with self.assertRaisesRegex(RuntimeError, re.escape(str(msg))):
test(is_sparse=True)
@dtypes(torch.float, torch.double)
def test_add(self, device, dtype):
def _test_spadd_shape(nnz, shape):
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = torch.randn(*shape, dtype=dtype, device=device)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
# Non contiguous dense tensor
s = list(shape)
s[0] = shape[-1]
s[-1] = shape[0]
y = torch.randn(*s, dtype=torch.double, device=device)
y.transpose_(0, len(s) - 1)
r = random.random()
res = torch.add(y, x, alpha=r)
expected = y + r * x.to_dense()
self.assertEqual(res, expected)
_test_spadd_shape(10, [100, 100])
_test_spadd_shape(0, [100, 100])
_test_spadd_shape(10, [100, 1])
_test_spadd_shape(10, [1, 100])
@dtypes(torch.float, torch.double)
def test_mul(self, device, dtype):
def _test_spadd_shape(fn, nnz, shape):
x = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
y = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=torch.int32)
res = fn(y, x)
expected = fn(y.to_dense(), x.to_dense()).to_sparse_csr()
self.assertEqual(res, expected)
_test_spadd_shape(torch.mul, 100, [100, 100])
_test_spadd_shape(torch.mul, 0, [100, 100])
_test_spadd_shape(torch.mul, 100, [100, 1])
_test_spadd_shape(torch.mul, 100, [1, 100])
s = torch.sparse_coo_tensor([[0], [1]], [5.0], (2, 3), device=device)
s = s.to_sparse_csr()
t23 = s.to_dense()
if device == 'cpu':
with self.assertRaisesRegex(RuntimeError, r"mul\(sparse_csr, dense\) is not supported"):
s * t23
with self.assertRaisesRegex(RuntimeError, r"mul\(dense, sparse_csr\) is not supported"):
t23 * s
elif device == 'cuda':
with self.assertRaisesRegex(NotImplementedError, "CUDA"):
s * t23
with self.assertRaisesRegex(NotImplementedError, "CUDA"):
t23 * s
@skipCPUIfNoMklSparse
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add(self, device, dtype):
def run_test(m, n, index_dtype):
if TEST_WITH_ROCM and dtype.is_complex:
self.skipTest("ROCm doesn't work with complex dtype correctly.")
alpha = random.random()
nnz1 = random.randint(0, m * n)
nnz2 = random.randint(0, m * n)
nnz3 = random.randint(0, m * n)
if TEST_WITH_ROCM:
# ROCm fails when nnz = 0
nnz1, nnz2, nnz3 = max(1, nnz1), max(1, nnz2), max(1, nnz3)
S1 = self.genSparseCSRTensor([m, n], nnz1, dtype=dtype, device=device, index_dtype=index_dtype)
S2 = self.genSparseCSRTensor([m, n], nnz2, dtype=dtype, device=device, index_dtype=index_dtype)
S3 = self.genSparseCSRTensor([m, n], nnz3, dtype=dtype, device=device, index_dtype=index_dtype)
expected = torch.add(S1.to_dense(), S2.to_dense(), alpha=alpha)
actual = torch.add(S1, S2, alpha=alpha, out=S3)
self.assertEqual(actual.to_dense(), expected)
self.assertEqual(S3.to_dense(), expected)
for index_dtype in [torch.int32, torch.int64]:
for m, n in itertools.product([3, 5], [3, 5]):
run_test(m, n, index_dtype)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sparse_add_errors(self, device, dtype):
def run_test(index_type):
a = self.genSparseCSRTensor((2, 2), 3, dtype=dtype, device=device, index_dtype=index_dtype)
b = self.genSparseCSRTensor((2, 1), 2, dtype=dtype, device=device, index_dtype=index_dtype)
with self.assertRaisesRegex(RuntimeError, "Expected input tensors to have the same shape"):
torch.add(a, b)
for index_dtype in [torch.int32, torch.int64]:
run_test(index_dtype)
@skipCPUIfNoMklSparse
@skipCUDAIf(
not _check_cusparse_triangular_solve_available(),
"cuSparse Generic API SpSV is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sparse_triangular_solve(self, device, dtype):
def run_test(n, k, upper, unitriangular, transpose, zero):
triangle_function = torch.triu if upper else torch.tril
make_A = torch.zeros if zero else make_tensor
A = make_A((n, n), dtype=dtype, device=device)
A = triangle_function(A)
A_sparse = A.to_sparse_csr()
B = make_tensor((n, k), dtype=dtype, device=device)
expected = torch.triangular_solve(B, A, upper=upper, unitriangular=unitriangular, transpose=transpose)
expected_X = expected.solution
actual = torch.triangular_solve(B, A_sparse, upper=upper, unitriangular=unitriangular, transpose=transpose)
actual_X = actual.solution
actual_A_clone = actual.cloned_coefficient
self.assertTrue(actual_A_clone.numel() == 0)
if A_sparse._nnz() == 0:
self.assertTrue(actual_X.isnan().all())
return
self.assertEqual(actual_X, expected_X)
# test out with C contiguous strides
out = torch.empty_strided((n, k), (k, 1), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
# test out with F contiguous strides
out = torch.empty_strided((n, k), (1, n), dtype=dtype, device=device)
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), (1, n))
# test out with discontiguous strides
out = torch.empty_strided((2 * n, k), (1, 2 * n), dtype=dtype, device=device)[::2]
if n > 0 and k > 0:
self.assertFalse(out.is_contiguous())
self.assertFalse(out.t().is_contiguous())
before_stride = out.stride()
torch.triangular_solve(
B, A_sparse,
upper=upper, unitriangular=unitriangular, transpose=transpose, out=(out, actual_A_clone)
)
self.assertEqual(out, expected_X)
self.assertEqual(out.stride(), before_stride)
ks = [0, 1, 3]
ns = [5, 3, 0]
for (k, n), (upper, unitriangular, transpose, zero) in itertools.product(itertools.product(ks, ns),
itertools.product([True, False], repeat=4)):
run_test(n, k, upper, unitriangular, transpose, zero)
@skipCUDAIfRocm
@onlyCUDA
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm(self, device, dtype):
def run_test(c, a, b, op_a, op_b, *, alpha=None, beta=None):
if dtype.is_complex:
alpha = random.random() + 0.3j if alpha is None else alpha
beta = random.random() + 0.6j if beta is None else beta
else:
alpha = random.random() if alpha is None else alpha
beta = random.random() if beta is None else beta
if op_a and a.shape == b.shape:
a = a.mH
if op_b and a.shape == b.shape:
b = b.mH
actual = torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta)
out = torch.sparse_csr_tensor(
*map(torch.clone, (actual.crow_indices(), actual.col_indices())),
torch.empty_like(actual.values()),
size=c.shape
)
torch.sparse.sampled_addmm(c, a, b, alpha=alpha, beta=beta, out=out)
spy_c = torch.sparse_csr_tensor(c.crow_indices(), c.col_indices(), torch.ones_like(c.values()), size=c.shape)
expected = alpha * (a @ b) * spy_c.to_dense() + beta * c.to_dense()
self.assertEqual(actual.to_dense(), out.to_dense())
self.assertEqual(actual.to_dense(), expected)
for index_dtype in [torch.int32, torch.int64]:
for (m, n, k), noncontiguous in zip(itertools.product([1, 5], repeat=3), [True, False]):
nnz = random.randint(0, m * n)
c = self.genSparseCSRTensor((m, n), nnz, dtype=dtype, device=device, index_dtype=index_dtype)
a = make_tensor((m, k), dtype=dtype, device=device, noncontiguous=noncontiguous)
b = make_tensor((k, n), dtype=dtype, device=device, noncontiguous=noncontiguous)
for op_a, op_b in itertools.product([True, False], repeat=2):
run_test(c, a, b, op_a, op_b)
@skipCUDAIfRocm
@onlyCUDA
@skipCUDAIf(True, "Causes CUDA memory exception, see https://github.com/pytorch/pytorch/issues/72177")
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_sampled_addmm_zero_sized(self, device, dtype):
def run_test(c, a, b):
actual = torch.sparse.sampled_addmm(c, a, b)
self.assertEqual(actual.shape, c.shape)
for m, n, k in itertools.product([0, 5], repeat=3):
c = torch.empty(m, n, dtype=dtype, device=device, layout=torch.sparse_csr)
a = make_tensor((m, k), dtype=dtype, device=device)
b = make_tensor((k, n), dtype=dtype, device=device)
run_test(c, a, b)
@skipCUDAIfRocm
@onlyCUDA
@skipCUDAIf(
not _check_cusparse_sddmm_available(),
"cuSparse Generic API SDDMM is not available"
)
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
def test_sampled_addmm_errors(self, device, dtype):
# test that the errors are the same for dense and sparse sampled versions
# import re
# shapes must be compatible for matrix multiplication
a = make_tensor((2, 3), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"cannot be multiplied"):
torch.sparse.sampled_addmm(a_sparse, a, a)
# mat1 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a.unsqueeze(0), a)
# mat2 must be a matrix
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to be a matrix"):
torch.sparse.sampled_addmm(a_sparse, a, a.unsqueeze(0))
a = make_tensor((2, 2), dtype=dtype, device=device)
b = make_tensor((3, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self dim 0 must match mat1 dim 0"):
torch.sparse.sampled_addmm(b_sparse, a, a)
b = make_tensor((2, 3), dtype=dtype, device=device)
b_sparse = b.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"self dim 1 must match mat2 dim 1"):
torch.sparse.sampled_addmm(b_sparse, a, a)
a = make_tensor((2, 2), dtype=dtype, device=device)
a_sparse = a.to_sparse_csr()
with self.assertRaisesRegex(RuntimeError, r"Expected mat1 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a_sparse, a_sparse)
with self.assertRaisesRegex(RuntimeError, r"Expected mat2 to have strided layout"):
torch.sparse.sampled_addmm(a_sparse, a, a_sparse)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse()
csr_sparse = coo_sparse.to_sparse_csr()
self.assertEqual(csr_sparse.to_dense(), dense)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_csr_coo_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
csr_sparse = dense.to_sparse_csr()
coo_sparse = csr_sparse.to_sparse()
self.assertEqual(coo_sparse.to_dense(), dense)
@ops(_sparse_csr_ops)
def test_sparse_csr_consistency(self, device, dtype, op):
samples = list(op.sample_inputs(device, dtype))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
if sample.input.ndim != 2:
continue
expected = op(sample.input)
assert torch.is_tensor(expected)
output = op(sample.input.to_sparse_csr())
assert torch.is_tensor(output)
self.assertEqual(output.to_dense(), expected)
# Currently, there is no rule in PyTorch for filling zeros in the outputs
# from operations on Sparse CSR tensors. Hence only those operators are supported
# which have 0->0 correspondence, example: sin(0) = 0, tan(0) = 0 but
# cos(0) = 1 (and hence it's not supported).
# Note: here, we do this test only for unary operators
@ops(sparse_csr_unary_ufuncs)
def test_zero_to_zero_correspondence_unary(self, device, dtype, op):
zero = torch.zeros((1, 2), dtype=dtype, device=device)
tensor_explicit_zeros = torch.sparse_csr_tensor([0, 1], [1], [0], dtype=dtype, device=device)
output_zero = op(zero)
expected_zero = zero.to(output_zero.dtype)
output_explicit_zeros = op(tensor_explicit_zeros).to_dense()
expected_explicit_zeros = tensor_explicit_zeros.to_dense().to(output_explicit_zeros.dtype)
for (output, expected) in [
(output_zero, expected_zero),
(output_explicit_zeros, expected_explicit_zeros)
]:
self.assertEqual(output, expected, f"This operator ({op.name}) should not be supported for "
"Sparse CSR as it breaks 0->0 correspondence.")
for inp in [zero.to_sparse_csr(), tensor_explicit_zeros]:
self.assertEqual(op(inp).values().numel(), inp.values().numel(),
f"{op.name} fails to preserve sparsity pattern.")
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_out(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if not op.supports_out:
self.skipTest("Skipped! Out not supported")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
out = self.genSparseCSRTensor(sample.input.size(), sample.input._nnz(),
device=sample.input.device, dtype=expect.dtype,
index_dtype=sample.input.crow_indices().dtype)
op(sample.input, *sample.args, **sample.kwargs, out=out)
self.assertEqual(out, expect)
@ops(sparse_csr_unary_ufuncs)
def test_sparse_csr_unary_inplace(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
if op.inplace_variant is None:
self.skipTest("Skipped! Inplace variant not supported!")
for sample in samples:
assert torch.is_tensor(sample.input)
# Sparse CSR only supports 2D tensors as inputs
# Fail early to prevent silent success with this test
if sample.input.ndim != 2:
raise ValueError("Expected 2D tensor but got tensor with dimension: {sample.input.ndim}.")
sample.input = sample.input.to_sparse_csr()
expect = op(sample.input, *sample.args, **sample.kwargs)
if not torch.can_cast(expect.dtype, dtype):
with self.assertRaisesRegex(RuntimeError, "result type"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
if sample.input.is_complex() and op.name == "abs":
with self.assertRaisesRegex(RuntimeError, "not supported"):
op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
continue
actual = op.inplace_variant(sample.input, *sample.args, **sample.kwargs)
self.assertIs(actual, sample.input)
self.assertEqual(actual, expect)
@unittest.expectedFailure
@ops(sparse_csr_unary_ufuncs, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble])
def test_autograd_sparse_csr_unary(self, device, dtype, op):
samples = list(op.sample_inputs(device, dtype))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
for sample in samples:
sparse_input = sample.input.to_sparse_csr().requires_grad_(True)
def fn(input):
output = op.gradcheck_wrapper(op.get_op(), input, *sample.args, **sample.kwargs)
output = output.to_dense()
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
# NotImplementedError inside gradcheck when computing numerical Jacobian
self.assertTrue(torch.autograd.gradcheck(fn, (sparse_input,), fast_mode=False, check_sparse_nnz=True))
# RuntimeError: Unsupported input layout: SparseCsr
output = fn(sparse_input)
output.backward(torch.ones_like(output))
assert torch.is_tensor(sparse_input.grad)
@dtypes(torch.float64)
def test_autograd_dense_output_addmm(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
samples = list(sample_inputs_addmm(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
a = sample.args[0].to_sparse_csr().detach()
for addmm in [torch.addmm, torch.sparse.addmm]:
def fn(c, b):
output = addmm(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
@skipCUDAIfRocm
@skipCPUIfNoMklSparse
@dtypes(torch.float64)
def test_autograd_dense_output_addmv(self, device, dtype):
from torch.testing._internal.common_methods_invocations import sample_inputs_addmv
samples = list(sample_inputs_addmv(None, device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.args[0].ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples to convert to sparse.")
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
a = sample.args[0].to_sparse_csr().detach()
def fn(c, b):
output = torch.addmv(c, a, b, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, [sample.input, sample.args[1]], fast_mode=True))
# noncontiguous
c = make_tensor(sample.input.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
b = make_tensor(sample.args[1].shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True)
self.assertTrue(torch.autograd.gradcheck(fn, [c, b], fast_mode=True))
@ops(binary_ops_with_dense_output, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, ])
def test_autograd_dense_output(self, device, dtype, op):
if op.name == "mv" and no_mkl_sparse and self.device_type == 'cpu':
self.skipTest("MKL Sparse is not available")
if op.name == "mv" and TEST_WITH_ROCM and self.device_type == 'cuda':
# mv currently work only on CUDA
self.skipTest("ROCm is not supported")
samples = list(op.sample_inputs(device, dtype, requires_grad=True))
# Fail early to prevent silent success with this test
ndims_equals_2d = (s.input.ndim == 2 for s in samples)
if not any(ndims_equals_2d):
raise ValueError("Expected at least one 2D tensor in samples.")
# Here we assume that the signature is op(sparse_input, dense_input) -> dense_output
for sample in samples:
# TODO: Remove detach once we have autograd support for CSR input
sparse_input = sample.input.to_sparse_csr().detach()
def fn(*args):
output = op.gradcheck_wrapper(op.get_op(), sparse_input, *args, **sample.kwargs)
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
self.assertTrue(torch.autograd.gradcheck(fn, sample.args, fast_mode=True))
# noncontiguous
args = [make_tensor(a.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True) for a in sample.args]
self.assertTrue(torch.autograd.gradcheck(fn, args, fast_mode=True))
@dtypes(*all_types_and_complex())
def test_direct_coo_csr_conversion(self, device, dtype):
for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
size = (m, n)
dense = make_tensor(size, dtype=dtype, device=device)
coo_sparse = dense.to_sparse_coo()
self.assertEqual(coo_sparse.to_sparse_csr().to_sparse_coo(), coo_sparse)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_sum(self, device, dtype):
def run_test(shape, nnz, index_type):
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
self.assertEqual(a.sum(), a.values().sum())
if dtype in floating_types():
a.requires_grad_(True)
with self.assertRaisesRegex(RuntimeError,
("Function SumBackward0 returned an invalid gradient at " +
"index 0 - expected layout SparseCsr but got Strided")):
a.sum().backward()
for shape, index_dtype in itertools.product(
[(10, 5), (10, 10)],
[torch.int32, torch.int64]):
run_test(shape, 0, index_dtype)
run_test(shape, max(shape), index_dtype)
run_test(shape, shape[0] * shape[1], index_dtype)
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_transpose(self, device, dtype):
def run_test(shape, nnz, index_type, dim0, dim1):
a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
t = a.transpose(dim0, dim1)
self.assertEqual(t.to_dense(), a.to_dense().transpose(dim0, dim1))
for shape, index_dtype, (dim0, dim1) in itertools.product(
[(10, 5), (10, 10)],
[torch.int32, torch.int64],
[(0, 0), (0, 1)]):
run_test(shape, 0, index_dtype, dim0, dim1)
run_test(shape, max(shape), index_dtype, dim0, dim1)
run_test(shape, shape[0] * shape[1], index_dtype, dim0, dim1)
# TODO: This is a stopgap for a rigorous extension of our autograd tests
# to test the functionality of detach
@skipMeta
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_exercise_detach(self, device, dtype):
shape = (3, 3)
nnz = 4
for index_dtype in [torch.int32, torch.int64]:
inp = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype)
detached_inp = inp.detach()
self.assertEqual(inp, detached_inp)
# e.g., TestSparseCSRCPU and TestSparseCSRCUDA
instantiate_device_type_tests(TestSparseCSR, globals())
if __name__ == '__main__':
run_tests()
| 49.128614 | 130 | 0.595859 |
acef0244a754bc86956df976c3877551a7d58ccb | 18,256 | py | Python | supervisor/dispatchers.py | hellhound/supervisor | 4cd438b4f0815a3b170c4322994ea4b3562e7cf1 | [
"ZPL-2.1"
] | null | null | null | supervisor/dispatchers.py | hellhound/supervisor | 4cd438b4f0815a3b170c4322994ea4b3562e7cf1 | [
"ZPL-2.1"
] | null | null | null | supervisor/dispatchers.py | hellhound/supervisor | 4cd438b4f0815a3b170c4322994ea4b3562e7cf1 | [
"ZPL-2.1"
] | 2 | 2015-08-11T16:59:53.000Z | 2021-01-04T08:33:03.000Z | import warnings
import errno
from supervisor.medusa.asyncore_25 import compact_traceback
from supervisor.events import notify
from supervisor.events import EventRejectedEvent
from supervisor.events import ProcessLogStderrEvent
from supervisor.events import ProcessLogStdoutEvent
from supervisor.states import EventListenerStates
from supervisor import loggers
def find_prefix_at_end(haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
class PDispatcher:
""" Asyncore dispatcher for mainloop, representing a process channel
(stdin, stdout, or stderr). This class is abstract. """
closed = False # True if close() has been called
def __repr__(self):
return '<%s at %s for %s (%s)>' % (self.__class__.__name__,
id(self),
self.process,
self.channel)
def readable(self):
raise NotImplementedError
def writable(self):
raise NotImplementedError
def handle_read_event(self):
raise NotImplementedError
def handle_write_event(self):
raise NotImplementedError
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
self.process.config.options.logger.critical(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
repr(self),
t,
v,
tbinfo
)
)
self.close()
def close(self):
if not self.closed:
self.process.config.options.logger.debug(
'fd %s closed, stopped monitoring %s' % (self.fd, self))
self.closed = True
def flush(self):
pass
class POutputDispatcher(PDispatcher):
"""
A Process Output (stdout/stderr) dispatcher. Serves several purposes:
- capture output sent within <!--XSUPERVISOR:BEGIN--> and
<!--XSUPERVISOR:END--> tags and signal a ProcessCommunicationEvent
by calling notify(event).
- route the output to the appropriate log handlers as specified in the
config.
"""
process = None # process which "owns" this dispatcher
channel = None # 'stderr' or 'stdout'
capturemode = False # are we capturing process event data
mainlog = None # the process' "normal" logger
capturelog = None # the logger while we're in capturemode
childlog = None # the current logger (event or main)
output_buffer = '' # data waiting to be logged
def __init__(self, process, event_type, fd):
"""
Initialize the dispatcher.
`event_type` should be one of ProcessLogStdoutEvent or
ProcessLogStderrEvent
"""
self.process = process
self.event_type = event_type
self.fd = fd
self.channel = channel = self.event_type.channel
self._setup_logging(process.config, channel)
capture_maxbytes = getattr(process.config,
'%s_capture_maxbytes' % channel)
if capture_maxbytes:
self.capturelog = loggers.handle_boundIO(
self.process.config.options.getLogger(),
fmt='%(message)s',
maxbytes=capture_maxbytes,
)
self.childlog = self.mainlog
# all code below is purely for minor speedups
begintoken = self.event_type.BEGIN_TOKEN
endtoken = self.event_type.END_TOKEN
self.begintoken_data = (begintoken, len(begintoken))
self.endtoken_data = (endtoken, len(endtoken))
self.mainlog_level = loggers.LevelsByName.DEBG
config = self.process.config
self.log_to_mainlog = config.options.loglevel <= self.mainlog_level
self.stdout_events_enabled = config.stdout_events_enabled
self.stderr_events_enabled = config.stderr_events_enabled
def _setup_logging(self, config, channel):
"""
Configure the main log according to the process' configuration and
channel. Sets `mainlog` on self. Returns nothing.
"""
logfile = getattr(config, '%s_logfile' % channel)
if not logfile:
return
maxbytes = getattr(config, '%s_logfile_maxbytes' % channel)
backups = getattr(config, '%s_logfile_backups' % channel)
fmt = '%(message)s'
if logfile == 'syslog':
warnings.warn("Specifying 'syslog' for filename is deprecated. "
"Use %s_syslog instead." % channel, DeprecationWarning)
fmt = ' '.join((config.name, fmt))
self.mainlog = loggers.handle_file(
config.options.getLogger(),
filename=logfile,
fmt=fmt,
rotating=not not maxbytes, # optimization
maxbytes=maxbytes,
backups=backups)
if getattr(config, '%s_syslog' % channel, False):
fmt = config.name + ' %(message)s'
loggers.handle_syslog(self.mainlog, fmt)
def removelogs(self):
for log in (self.mainlog, self.capturelog):
if log is not None:
for handler in log.handlers:
handler.remove()
handler.reopen()
def reopenlogs(self):
for log in (self.mainlog, self.capturelog):
if log is not None:
for handler in log.handlers:
handler.reopen()
def _log(self, data):
if data:
config = self.process.config
if config.options.strip_ansi:
data = stripEscapes(data)
if self.childlog:
self.childlog.info(data)
if self.log_to_mainlog:
msg = '%(name)r %(channel)s output:\n%(data)s'
config.options.logger.log(
self.mainlog_level, msg, name=config.name,
channel=self.channel, data=data)
if self.channel == 'stdout':
if self.stdout_events_enabled:
notify(
ProcessLogStdoutEvent(self.process,
self.process.pid, data)
)
else: # channel == stderr
if self.stderr_events_enabled:
notify(
ProcessLogStderrEvent(self.process,
self.process.pid, data)
)
def record_output(self):
if self.capturelog is None:
# shortcut trying to find capture data
data = self.output_buffer
self.output_buffer = ''
self._log(data)
return
if self.capturemode:
token, tokenlen = self.endtoken_data
else:
token, tokenlen = self.begintoken_data
if len(self.output_buffer) <= tokenlen:
return # not enough data
data = self.output_buffer
self.output_buffer = ''
try:
before, after = data.split(token, 1)
except ValueError:
after = None
index = find_prefix_at_end(data, token)
if index:
self.output_buffer = self.output_buffer + data[-index:]
data = data[:-index]
self._log(data)
else:
self._log(before)
self.toggle_capturemode()
self.output_buffer = after
if after:
self.record_output()
def toggle_capturemode(self):
self.capturemode = not self.capturemode
if self.capturelog is not None:
if self.capturemode:
self.childlog = self.capturelog
else:
for handler in self.capturelog.handlers:
handler.flush()
data = self.capturelog.getvalue()
channel = self.channel
procname = self.process.config.name
event = self.event_type(self.process, self.process.pid, data)
notify(event)
msg = "%(procname)r %(channel)s emitted a comm event"
self.process.config.options.logger.debug(msg,
procname=procname,
channel=channel)
for handler in self.capturelog.handlers:
handler.remove()
handler.reopen()
self.childlog = self.mainlog
def writable(self):
return False
def readable(self):
if self.closed:
return False
return True
def handle_read_event(self):
data = self.process.config.options.readfd(self.fd)
self.output_buffer += data
self.record_output()
if not data:
# if we get no data back from the pipe, it means that the
# child process has ended. See
# mail.python.org/pipermail/python-dev/2004-August/046850.html
self.close()
class PEventListenerDispatcher(PDispatcher):
""" An output dispatcher that monitors and changes a process'
listener_state """
process = None # process which "owns" this dispatcher
channel = None # 'stderr' or 'stdout'
childlog = None # the logger
state_buffer = '' # data waiting to be reviewed for state changes
READY_FOR_EVENTS_TOKEN = 'READY\n'
RESULT_TOKEN_START = 'RESULT '
READY_FOR_EVENTS_LEN = len(READY_FOR_EVENTS_TOKEN)
RESULT_TOKEN_START_LEN = len(RESULT_TOKEN_START)
def __init__(self, process, channel, fd):
self.process = process
# the initial state of our listener is ACKNOWLEDGED; this is a
# "busy" state that implies we're awaiting a READY_FOR_EVENTS_TOKEN
self.process.listener_state = EventListenerStates.ACKNOWLEDGED
self.process.event = None
self.result = ''
self.resultlen = None
self.channel = channel
self.fd = fd
logfile = getattr(process.config, '%s_logfile' % channel)
if logfile:
maxbytes = getattr(process.config, '%s_logfile_maxbytes' % channel)
backups = getattr(process.config, '%s_logfile_backups' % channel)
self.childlog = loggers.handle_file(
process.config.options.getLogger(),
logfile,
'%(message)s',
rotating=not not maxbytes, # optimization
maxbytes=maxbytes,
backups=backups,
)
def removelogs(self):
if self.childlog is not None:
for handler in self.childlog.handlers:
handler.remove()
handler.reopen()
def reopenlogs(self):
if self.childlog is not None:
for handler in self.childlog.handlers:
handler.reopen()
def writable(self):
return False
def readable(self):
if self.closed:
return False
return True
def handle_read_event(self):
data = self.process.config.options.readfd(self.fd)
if data:
self.state_buffer += data
procname = self.process.config.name
msg = '%r %s output:\n%s' % (procname, self.channel, data)
self.process.config.options.logger.debug(msg)
if self.childlog:
if self.process.config.options.strip_ansi:
data = stripEscapes(data)
self.childlog.info(data)
else:
# if we get no data back from the pipe, it means that the
# child process has ended. See
# mail.python.org/pipermail/python-dev/2004-August/046850.html
self.close()
self.handle_listener_state_change()
def handle_listener_state_change(self):
data = self.state_buffer
if not data:
return
process = self.process
procname = process.config.name
state = process.listener_state
if state == EventListenerStates.UNKNOWN:
# this is a fatal state
self.state_buffer = ''
return
if state == EventListenerStates.ACKNOWLEDGED:
if len(data) < self.READY_FOR_EVENTS_LEN:
# not enough info to make a decision
return
elif data.startswith(self.READY_FOR_EVENTS_TOKEN):
msg = '%s: ACKNOWLEDGED -> READY' % procname
process.config.options.logger.debug(msg)
process.listener_state = EventListenerStates.READY
tokenlen = self.READY_FOR_EVENTS_LEN
self.state_buffer = self.state_buffer[tokenlen:]
process.event = None
else:
msg = '%s: ACKNOWLEDGED -> UNKNOWN' % procname
process.config.options.logger.debug(msg)
process.listener_state = EventListenerStates.UNKNOWN
self.state_buffer = ''
process.event = None
if self.state_buffer:
# keep going til its too short
self.handle_listener_state_change()
else:
return
elif state == EventListenerStates.READY:
# the process sent some spurious data, be a hardass about it
msg = '%s: READY -> UNKNOWN' % procname
process.config.options.logger.debug(msg)
process.listener_state = EventListenerStates.UNKNOWN
self.state_buffer = ''
process.event = None
return
elif state == EventListenerStates.BUSY:
if self.resultlen is None:
# we haven't begun gathering result data yet
pos = data.find('\n')
if pos == -1:
# we can't make a determination yet, we dont have a full
# results line
return
result_line = self.state_buffer[:pos]
self.state_buffer = self.state_buffer[pos+1:] # rid LF
resultlen = result_line[self.RESULT_TOKEN_START_LEN:]
try:
self.resultlen = int(resultlen)
except ValueError:
msg = ('%s: BUSY -> UNKNOWN (bad result line %r)'
% (procname, result_line))
process.config.options.logger.debug(msg)
process.listener_state = EventListenerStates.UNKNOWN
self.state_buffer = ''
notify(EventRejectedEvent(process, process.event))
process.event = None
return
else:
needed = self.resultlen - len(self.result)
if needed:
self.result += self.state_buffer[:needed]
self.state_buffer = self.state_buffer[needed:]
needed = self.resultlen - len(self.result)
if not needed:
self.handle_result(self.result)
self.process.event = None
self.result = ''
self.resultlen = None
if self.state_buffer:
# keep going til its too short
self.handle_listener_state_change()
else:
return
def handle_result(self, result):
process = self.process
procname = process.config.name
try:
self.process.group.config.result_handler(process.event, result)
msg = '%s: BUSY -> ACKNOWLEDGED (processed)' % procname
process.listener_state = EventListenerStates.ACKNOWLEDGED
except RejectEvent:
msg = '%s: BUSY -> ACKNOWLEDGED (rejected)' % procname
process.listener_state = EventListenerStates.ACKNOWLEDGED
notify(EventRejectedEvent(process, process.event))
except:
msg = '%s: BUSY -> UNKNOWN' % procname
process.listener_state = EventListenerStates.UNKNOWN
notify(EventRejectedEvent(process, process.event))
process.config.options.logger.debug(msg)
class PInputDispatcher(PDispatcher):
""" Input (stdin) dispatcher """
process = None # process which "owns" this dispatcher
channel = None # 'stdin'
input_buffer = '' # data waiting to be sent to the child process
def __init__(self, process, channel, fd):
self.process = process
self.channel = channel
self.fd = fd
self.input_buffer = ''
def writable(self):
if self.input_buffer and not self.closed:
return True
return False
def readable(self):
return False
def flush(self):
# other code depends on this raising EPIPE if the pipe is closed
sent = self.process.config.options.write(self.fd,
self.input_buffer)
self.input_buffer = self.input_buffer[sent:]
def handle_write_event(self):
if self.input_buffer:
try:
self.flush()
except OSError, why:
if why[0] == errno.EPIPE:
self.input_buffer = ''
self.close()
else:
raise
ANSI_ESCAPE_BEGIN = '\x1b['
ANSI_TERMINATORS = ('H', 'f', 'A', 'B', 'C', 'D', 'R', 's', 'u', 'J',
'K', 'h', 'l', 'p', 'm')
def stripEscapes(string):
"""
Remove all ANSI color escapes from the given string.
"""
result = ''
show = 1
i = 0
L = len(string)
while i < L:
if show == 0 and string[i] in ANSI_TERMINATORS:
show = 1
elif show:
n = string.find(ANSI_ESCAPE_BEGIN, i)
if n == -1:
return result + string[i:]
else:
result = result + string[i:n]
i = n
show = 0
i = i + 1
return result
class RejectEvent(Exception):
""" The exception type expected by a dispatcher when a handler wants
to reject an event """
def default_handler(event, response):
if response != 'OK':
raise RejectEvent(response)
| 34.97318 | 79 | 0.561076 |
acef0249d0d96b035ad112b0741bc72433b96c16 | 4,214 | py | Python | CAN stuff/liveimport.py | pojdrovic/AutoPen | 74b9853d356698a1c5e5b857769eb990d924269f | [
"Apache-2.0"
] | null | null | null | CAN stuff/liveimport.py | pojdrovic/AutoPen | 74b9853d356698a1c5e5b857769eb990d924269f | [
"Apache-2.0"
] | null | null | null | CAN stuff/liveimport.py | pojdrovic/AutoPen | 74b9853d356698a1c5e5b857769eb990d924269f | [
"Apache-2.0"
] | null | null | null | import csv
import matplotlib.pyplot as plt
import numpy as np
import math
from operator import itemgetter
import matplotlib.animation as animation
#class CANdata:
# def __init__(self):
# self.list = []
# def addrow(self,row):
# self.list.append(row)
# return
def parsedata(data, filename):
with open(filename, newline='') as csvfile:
canreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for ii in range (1,38):
next(canreader)
for row in canreader:
data.append(row)
return
def finduniques(list):
templist = []
for x in list:
templist.append(x[9])
dataset = set(templist)
return dataset
def printArbId(list):
for x in list:
print ('{:>12} {:>12}'.format(x[0][9], len(x)))
def plotArbIdf(starttime,endtime,list,inc):
plotlist = np.asarray(list)
plt.ylabel('number of calls')
plt.xlabel('time (s)')
plt.ion()
# for i in range(int(starttime),int(endtime)):
tracker = inc
freq = 0
for x in list:
if float(x[1])-starttime <= tracker:
freq += 1
else:
plt.scatter(tracker,freq)
plt.pause(0.005)
freq = 0
tracker += inc
# freqlist = [0] * (int(endtime-starttime)+1)
# for x in list:
# timefloor = math.floor(float(x[1])-starttime)
# freqlist[timefloor] += 1
# plt.plot(freqlist, 'ro')
# plt.show()
def plotArbIdc(starttime,endtime,list,inc):
plotlist = np.asarray(list)
plt.ylabel('number of calls')
plt.xlabel('time (s)')
plt.ion()
# for i in range(int(starttime),int(endtime)):
tracker = inc
freq = 0
xvars = []
yvars = []
for x in list:
if float(x[1])-starttime <= tracker:
freq += 1
else:
xvars.append(tracker)
yvars.append(freq)
plt.plot(xvars,yvars, 'r-')
plt.scatter(tracker,freq)
plt.pause(0.005)
tracker += inc
def plotArbIdchanges(starttime,endtime,list,inc):
plotlist = np.asarray(list)
plt.ylabel('number of calls')
plt.xlabel('time (s)')
plt.ion()
# for i in range(int(starttime),int(endtime)):
tracker = inc
freq = 0
prevfreq = 0
xvars = []
yvars = []
for x in list:
if float(x[1])-starttime <= tracker:
freq += 1
else:
xvars.append(tracker)
yvars.append(freq-prevfreq)
plt.plot(xvars,yvars, 'g-')
plt.scatter(tracker,freq-prevfreq)
plt.pause(0.005)
prevfreq = freq
freq = 0
tracker += inc
def main():
# data = CANdata();+
data = []
parsedata(data,'can1.csv')
# print ('Input filename (enter n if that is the last of the data): ')
# userinput = input()
# while (userinput != 'n'):
# parsedata(data,userinput)
# print ('Input filename (enter n if that is the last of the data): ')
# userinput = input()
# # print(sorted(data, key=itemgetter(9)))
uniquedata = []
dataset = finduniques(data)
for x in dataset:
templist = [row for row in data if row[9] == x]
uniquedata.append(templist)
##########uniquedata is where the data by ARB ID is stored
# for x in uniquedata[3]:
# print (x)
while (True):
print ('\n(1) list all ARB IDs')
print ('(2) print ARB ID calls over time (frequency)')
print ('(3) print ARB ID calls over time (cumulative)')
print ('(4) print ARB ID calls over time (changes)')
print ('(5) write data to file')
command = input('Input command: ')
inc = 1
if (command == '1'):
printArbId(uniquedata);
elif (command == '2'):
arbID = input('ARB ID to search for: ')
for row in uniquedata:
if row[0][9] == arbID:
plotArbIdf(float(data[0][1]),float(data[len(data)-1][1]), row, inc)
break;
elif (command == '3'):
arbID = input('ARB ID to search for: ')
for row in uniquedata:
if row[0][9] == arbID:
plotArbIdc(float(data[0][1]),float(data[len(data)-1][1]), row, inc)
break;
elif (command == '4'):
arbID = input('ARB ID to search for: ')
for row in uniquedata:
if row[0][9] == arbID:
plotArbIdchanges(float(data[0][1]),float(data[len(data)-1][1]), row, inc)
break;
elif (command == '5'):
writeto = input ('Enter file to write to: ')
print ('Writing to ' + writeto + '.csv...')
with open(writeto + '.csv', 'w') as csvfile:
csvwrite = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
for iid in uniquedata:
for row in iid:
csvwrite.writerow(row)
print ('Write complete.')
if __name__ == "__main__":
main()
| 25.083333 | 78 | 0.637162 |
acef0302c0fbc11b2436b83da7c390e02631e2e7 | 2,581 | py | Python | sdk/python/pulumi_azure_native/eventgrid/v20200101preview/list_topic_shared_access_keys.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/eventgrid/v20200101preview/list_topic_shared_access_keys.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/eventgrid/v20200101preview/list_topic_shared_access_keys.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListTopicSharedAccessKeysResult',
'AwaitableListTopicSharedAccessKeysResult',
'list_topic_shared_access_keys',
]
@pulumi.output_type
class ListTopicSharedAccessKeysResult:
"""
Shared access keys of the Topic
"""
def __init__(__self__, key1=None, key2=None):
if key1 and not isinstance(key1, str):
raise TypeError("Expected argument 'key1' to be a str")
pulumi.set(__self__, "key1", key1)
if key2 and not isinstance(key2, str):
raise TypeError("Expected argument 'key2' to be a str")
pulumi.set(__self__, "key2", key2)
@property
@pulumi.getter
def key1(self) -> Optional[str]:
"""
Shared access key1 for the topic.
"""
return pulumi.get(self, "key1")
@property
@pulumi.getter
def key2(self) -> Optional[str]:
"""
Shared access key2 for the topic.
"""
return pulumi.get(self, "key2")
class AwaitableListTopicSharedAccessKeysResult(ListTopicSharedAccessKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListTopicSharedAccessKeysResult(
key1=self.key1,
key2=self.key2)
def list_topic_shared_access_keys(resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicSharedAccessKeysResult:
"""
Shared access keys of the Topic
:param str resource_group_name: The name of the resource group within the user's subscription.
:param str topic_name: Name of the topic
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:eventgrid/v20200101preview:listTopicSharedAccessKeys', __args__, opts=opts, typ=ListTopicSharedAccessKeysResult).value
return AwaitableListTopicSharedAccessKeysResult(
key1=__ret__.key1,
key2=__ret__.key2)
| 32.670886 | 168 | 0.666021 |
acef03a5a8277cf1656a82082297d94c80faefbc | 10,128 | py | Python | analysis/urls.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | analysis/urls.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | analysis/urls.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | from analysis.grids import AnalysesGrid, NodeColumnSummaryGrid, KaromappingAnalysesGrid, AnalysisTemplatesGrid, \
AnalysisNodeIssuesGrid, NodeOntologyGenesGrid
from analysis.views import views, views_json, views_grid, views_karyomapping, views_autocomplete
from library.django_utils.jqgrid_view import JQGridView
from variantgrid.perm_path import perm_path
urlpatterns = [
perm_path('analyses/list/', views.analysis_list, name='analyses'),
perm_path('analysis_templates/', views.analysis_templates, name='analysis_templates'),
perm_path('<int:analysis_id>/', views.view_analysis, name='analysis'),
perm_path('<int:analysis_id>/<int:active_node_id>/', views.view_analysis, name='analysis_node'),
perm_path('clone_analysis/<int:analysis_id>/', views_json.clone_analysis, name='clone_analysis'),
perm_path('create_analysis_from_template/<genome_build_name>', views.create_analysis_from_template, name='create_analysis_from_template'),
perm_path('trio_wizard/<int:cohort_id>/<int:sample1_id>/<int:sample2_id>/<int:sample3_id>/', views.trio_wizard, name='trio_wizard'),
# Templates
perm_path('analysis_template/<pk>/save/', views_json.analysis_template_save, name='analysis_template_save'),
perm_path('analysis_template/<pk>/list/', views.analysis_templates_list, name='analysis_templates_list'),
# Node editor
perm_path('node/view/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:extra_filters>/', views.node_view, name='node_view'),
perm_path('node_update/<int:node_id>/', views_json.NodeUpdate.as_view(), name='node_update'),
perm_path('node_debug/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:extra_filters>/', views.node_debug, name='node_debug'),
perm_path('node_doc/<int:node_id>/', views.node_doc, name='node_doc'),
perm_path('node_load/<int:node_id>/', views.node_load, name='node_load'),
perm_path('node_cancel_load/<int:node_id>/', views.node_cancel_load, name='node_cancel_load'),
perm_path('node/column_summary/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:extra_filters>/<slug:grid_column_name>/<int:significant_figures>/', views.node_column_summary, name='node_column_summary'),
perm_path('node/node_snp_matrix/<int:node_id>/<int:node_version>/<slug:conversion>/<int:significant_figures>/', views.node_snp_matrix, name='node_snp_matrix'),
perm_path('node/graph/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:graph_type_id>/<slug:cmap>/', views.node_data_graph, name='node_data_graph'),
perm_path('node/cohort_zygosity_filters/<int:cohort_node_id>/<int:cohort_id>/', views.cohort_zygosity_filters, name='cohort_zygosity_filters'),
perm_path('node/vcf_locus_filters/<int:node_id>/<int:vcf_id>/', views.vcf_locus_filters, name='vcf_locus_filters'),
perm_path('node/sample_vcf_locus_filters/<int:node_id>/<int:sample_id>/', views.sample_vcf_locus_filters, name='sample_vcf_locus_filters'),
perm_path('node/cohort_vcf_locus_filters/<int:node_id>/<int:cohort_id>/', views.cohort_vcf_locus_filters, name='cohort_vcf_locus_filters'),
perm_path('node/pedigree_vcf_locus_filters/<int:node_id>/<int:pedigree_id>/', views.pedigree_vcf_locus_filters, name='pedigree_vcf_locus_filters'),
# Node JSON
perm_path('node/<int:node_id>/data/', views_json.node_data, name='node_data'),
perm_path('analysis/<int:analysis_id>/node/create/<node_type>/', views_json.node_create, name='node_create'),
perm_path('analysis/<int:analysis_id>/nodes/copy/', views_json.nodes_copy, name='nodes_copy'),
perm_path('analysis/<int:analysis_id>/nodes/delete/', views_json.nodes_delete, name='nodes_delete'),
perm_path('analysis/<int:analysis_id>/nodes/status/', views_json.nodes_status, name='nodes_status'),
perm_path('create_filter_child/<int:node_id>/', views_json.create_filter_child, name='create_filter_child'),
perm_path('create_extra_filter_child/<int:node_id>/<slug:extra_filters>/', views_json.create_extra_filter_child, name='create_extra_filter_child'),
perm_path('create_selected_child/<int:node_id>/', views_json.create_selected_child, name='create_selected_child'),
perm_path('analysis/<int:analysis_id>/node_versions/', views_json.analysis_node_versions, name='analysis_node_versions'),
perm_path('analysis/<int:analysis_id>/edit_and_grid/', views.analysis_editor_and_grid, name='analysis_editor_and_grid'),
perm_path('analysis/<int:analysis_id>/edit_and_grid/stand_alone/', views.stand_alone_analysis_editor_and_grid, name='standalone_analysis_editor_and_grid'),
perm_path('analysis/<int:analysis_id>/set_panel_size/', views_json.analysis_set_panel_size, name='analysis_set_panel_size'),
perm_path('analysis/<int:node_id>/node_populate_clingen_alleles/', views_json.node_populate_clingen_alleles,
name='node_populate_clingen_alleles'),
perm_path('analysis/<int:analysis_id>/settings/lock', views.analysis_settings_lock, name='analysis_settings_lock'),
perm_path('analysis/<int:analysis_id>/settings/', views.view_analysis_settings, name='analysis_settings'),
perm_path('analysis/<int:analysis_id>/settings_details_tab/', views.analysis_settings_details_tab, name='analysis_settings_details_tab'),
perm_path('analysis/<int:analysis_id>/settings_node_counts_tab/', views.analysis_settings_node_counts_tab, name='analysis_settings_node_counts_tab'),
perm_path('analysis/<int:analysis_id>/settings_template_run_tab/', views.analysis_settings_template_run_tab,
name='analysis_settings_template_run_tab'),
perm_path('analysis/<int:analysis_id>/reload/', views_json.analysis_reload, name='analysis_reload'),
perm_path('analysis/<int:analysis_id>/input_samples/', views.analysis_input_samples, name='analysis_input_samples'),
perm_path('node_graph/<int:node_id>/<int:graph_type_id>/<slug:cmap>/', views.node_graph, name='node_graph'),
perm_path('column_summary_boxplot/<int:node_id>/<label>/<slug:variant_column>/', views.column_summary_boxplot, name='column_summary_boxplot'),
perm_path('analysis/set_variant_tag/<slug:location>/', views_json.set_variant_tag, name='set_variant_tag'),
perm_path('set_variant_selected/<int:node_id>/', views_json.set_variant_selected, name='set_variant_selected'),
perm_path('classification/create_for_variant_tag/<int:variant_tag_id>', views.CreateClassificationForVariantTagView.as_view(),
name='create_classification_for_variant_tag'),
perm_path('create_classification_for_analysis/<int:analysis_id>',
views.create_classification_for_analysis, name='create_classification_for_analysis'),
# Node Data (bottom right window)
perm_path('node_data_grid/cfg/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:extra_filters>/', views.node_data_grid, name='node_data_grid'),
perm_path('node_async_wait/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:extra_filters>/', views.node_async_wait, name='node_async_wait'),
perm_path('node_errors/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:extra_filters>/', views.node_errors, name='node_errors'),
perm_path('node_method_description/<int:node_id>/<int:node_version>', views.node_method_description, name='node_method_description'),
# Analysis templates
perm_path('templates/variable/<int:node_id>/', views_json.analysis_template_variable, name='analysis_template_variable'),
# Grids
perm_path('node_grid/export/', views_grid.node_grid_export, name='node_grid_export'),
perm_path('node_grid/cfg/<int:analysis_version>/<int:node_id>/<int:node_version>/<slug:extra_filters>/', views_grid.NodeGridConfig.as_view(), name='node_grid_config'),
perm_path('node_grid/handler/', views_grid.NodeGridHandler.as_view(), name='node_grid_handler'),
perm_path('node_column_summary/grid/<int:node_id>/<int:node_version>/<slug:extra_filters>/<slug:variant_column>/<int:significant_figures>/<slug:op>/', JQGridView.as_view(grid=NodeColumnSummaryGrid, csv_download=True), name='node_column_summary_grid'),
perm_path('analyses/grid/<slug:op>/', JQGridView.as_view(grid=AnalysesGrid, delete_row=True), name='analyses_grid'),
perm_path('analysis_templates/grid/<slug:op>/', JQGridView.as_view(grid=AnalysisTemplatesGrid, delete_row=True), name='analysis_templates_grid'),
perm_path('analysis_issues/grid/<slug:op>/',
JQGridView.as_view(grid=AnalysisNodeIssuesGrid), name='analysis_node_issues_grid'),
perm_path('node/ontology/genes/grid/<int:node_id>/<int:version>/<slug:op>/',
JQGridView.as_view(grid=NodeOntologyGenesGrid), name='node_ontology_genes_grid'),
perm_path('analysis_issues', views.view_analysis_issues, name='analysis_issues'),
# Mutational Signature
perm_path('view_mutational_signature/<int:pk>/', views.view_mutational_signature, name='view_mutational_signature'),
# karyomapping
perm_path('karyomapping/analyses/', views_karyomapping.karyomapping_analyses, name='karyomapping_analyses'),
perm_path('karyomapping/create_for_trio/<int:trio_id>/', views_karyomapping.create_karyomapping_analysis_for_trio_id, name='create_karyomapping_analysis_for_trio'),
perm_path('karyomapping/view_karyomapping_analysis/<int:pk>/', views_karyomapping.view_karyomapping_analysis, name='view_karyomapping_analysis'),
perm_path('karyomapping/view_karyomapping_gene/<int:pk>/', views_karyomapping.view_karyomapping_gene, name='view_karyomapping_gene'),
perm_path('karyomapping/download_karyomapping_gene_csv/<int:pk>/', views_karyomapping.download_karyomapping_gene_csv, name='download_karyomapping_gene_csv'),
perm_path('karyomapping/analyses/grid/<slug:op>/', JQGridView.as_view(grid=KaromappingAnalysesGrid, delete_row=True), name='karyomapping_analyses_grid'),
# Autocompletes
perm_path('autocomplete/Analysis/', views_autocomplete.AnalysisAutocompleteView.as_view(), name='analysis_autocomplete'),
perm_path('autocomplete/AnalysisTemplate/', views_autocomplete.AnalysisTemplateAutocompleteView.as_view(),
name='analysis_template_autocomplete'),
]
| 88.069565 | 255 | 0.785644 |
acef03ef94f80be128848ff0cf72c4e918f76f87 | 3,181 | py | Python | gibson/data/generate_data.py | rainprob/GibsonEnv | e0d0bc614713c676cb303bf9f11ca6a98713e0e0 | [
"MIT"
] | 731 | 2018-02-26T18:35:05.000Z | 2022-03-23T04:00:09.000Z | gibson/data/generate_data.py | Shubodh/GibsonEnv | 38274874d7c2c2a87efdb6ee529f2b366c5219de | [
"MIT"
] | 111 | 2018-04-19T01:00:22.000Z | 2022-03-18T17:43:50.000Z | gibson/data/generate_data.py | Shubodh/GibsonEnv | 38274874d7c2c2a87efdb6ee529f2b366c5219de | [
"MIT"
] | 153 | 2018-02-27T04:38:40.000Z | 2022-03-28T08:10:39.000Z | import numpy as np
import ctypes as ct
import cv2
import sys
import argparse
from gibson.data.datasets import ViewDataSet3D
import torch
from torchvision import datasets, transforms
from torch.autograd import Variable
import time
from numpy import cos, sin
import matplotlib.pyplot as plt
from PIL import Image
import os
import time
from multiprocessing import Pool, cpu_count
from scipy.signal import convolve2d
from scipy.interpolate import griddata
import scipy
import torch.nn.functional as F
from torchvision import transforms
dll=np.ctypeslib.load_library('../core/render/render_cuda_f','.')
# In[6]:
def render(imgs, depths, pose, poses, tdepth):
global fps
t0 = time.time()
showsz = imgs[0].shape[0]
nimgs = len(imgs)
show=np.zeros((showsz,showsz * 2,3),dtype='uint8')
target_depth = (128 * tdepth[:,:,0]).astype(np.float32)
imgs = np.array(imgs)
depths = np.array(depths).flatten()
rpose = np.eye(4).astype(np.float32)
rpose[0,-1] = 1
rpose[1,-1] = 2
rpose[2,-1] = 1
pose_after = [rpose.dot(poses[i]).astype(np.float32) for i in range(len(imgs))]
pose_after = np.array(pose_after)
dll.render(ct.c_int(len(imgs)),
ct.c_int(imgs[0].shape[0]),
ct.c_int(imgs[0].shape[1]),
ct.c_int(1),
ct.c_int(1),
imgs.ctypes.data_as(ct.c_void_p),
depths.ctypes.data_as(ct.c_void_p),
pose_after.ctypes.data_as(ct.c_void_p),
show.ctypes.data_as(ct.c_void_p),
target_depth.ctypes.data_as(ct.c_void_p)
)
return show, target_depth
# In[7]:
def generate_data(args):
idx = args[0]
print(idx)
d = args[1]
outf = args[2]
filename = "%s/data_%d.npz" % (outf, idx)
if not os.path.isfile(filename):
print(idx)
data = d[idx] ## This operation stalls 95% of the time, CPU heavy
sources = data[0]
target = data[1]
source_depths = data[2]
target_depth = data[3]
#target_normal = data[5]
poses = [item.numpy() for item in data[-1]]
show, _ = render(sources, source_depths, poses[0], poses, target_depth)
print(show.shape)
Image.fromarray(show).save('%s/show%d.png' % (outf, idx))
Image.fromarray(target).save('%s/target%d.png' % (outf, idx))
np.savez(file = filename, source = show, depth = target_depth, target = target)
return
parser = argparse.ArgumentParser()
parser.add_argument('--debug' , action='store_true', help='debug mode')
parser.add_argument('--dataroot' , required = True, help='dataset path')
parser.add_argument('--outf' , type = str, default = '', help='path of output folder')
opt = parser.parse_args()
d = ViewDataSet3D(root=opt.dataroot, transform = np.array, mist_transform = np.array, seqlen = 5, off_3d = False, train = False)
print(len(d))
p = Pool(10)
p.map(generate_data, [(idx, d, opt.outf) for idx in range(len(d))])
#for i in range(len(d)):
# filename = "%s/data_%d.npz" % (opt.outf, i)
# print(filename)
# if not os.path.isfile(filename):
# generate_data([i, d, opt.outf])
| 28.657658 | 128 | 0.634706 |
acef040cb0c934e1fc3f37d939740f07cbe55653 | 1,489 | py | Python | app/core/tests/test_models.py | vurghus-minar/recipe-app-api | 87a245ce0ef3caa5dcff163cfa8fad03ee3be1bf | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | vurghus-minar/recipe-app-api | 87a245ce0ef3caa5dcff163cfa8fad03ee3be1bf | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | vurghus-minar/recipe-app-api | 87a245ce0ef3caa5dcff163cfa8fad03ee3be1bf | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
email = 'Test@TEST.COM'
normalized_email = 'Test@test.com'
password = 'password123'
def test_create_user_with_email_successful(self):
# Test creating a new user with an email is sucessful
user = get_user_model().objects.create_user(
email=ModelTests.email,
password=ModelTests.password
)
self.assertEqual(user.email, ModelTests.normalized_email)
self.assertTrue(user.check_password(ModelTests.password))
def test_new_user_email_normalized(self):
# Test the email for a new user is normalized
user = get_user_model().objects.create_user(
email=ModelTests.email,
password=ModelTests.password
)
self.assertEqual(user.email, ModelTests.normalized_email)
def test_new_user_invalid_email(self):
# Test creating user with no email raises error
with self.assertRaises(ValueError):
get_user_model().objects.create_user(
None,
password=ModelTests.password
)
def test_create_new_superuser(self):
# Test creating new superuser
user = get_user_model().objects.create_superuser(
email=ModelTests.email,
password=ModelTests.password
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 31.020833 | 65 | 0.664876 |
acef04b8e1d56fce849fbe2031f40e71d58e6c93 | 1,073 | py | Python | invirtualenv_plugins/parsedconfig.py | BenLloydPearson/invirtualenv | 5103eece3a998499fa260413ef7e57baa57555c4 | [
"BSD-3-Clause"
] | 15 | 2017-03-13T03:35:15.000Z | 2021-08-31T09:34:14.000Z | invirtualenv_plugins/parsedconfig.py | BenLloydPearson/invirtualenv | 5103eece3a998499fa260413ef7e57baa57555c4 | [
"BSD-3-Clause"
] | 53 | 2016-04-28T20:49:01.000Z | 2021-06-18T16:40:00.000Z | invirtualenv_plugins/parsedconfig.py | BenLloydPearson/invirtualenv | 5103eece3a998499fa260413ef7e57baa57555c4 | [
"BSD-3-Clause"
] | 10 | 2016-05-10T19:22:18.000Z | 2020-06-30T18:24:58.000Z | import logging
import os
from invirtualenv.config import generate_parsed_config_file
from invirtualenv.plugin_base import InvirtualenvPlugin
logger = logging.getLogger(__name__)
class InvirtualenvParsedConfig(InvirtualenvPlugin):
package_formats = ['parsed_deploy_conf']
default_config_filename = 'deploy.conf.parsed'
package_template = None
def __init__(self, *args, **kwargs):
super(InvirtualenvParsedConfig, self).__init__(*args, **kwargs)
if os.path.exists(self.config_file):
with open(self.config_file) as config_file_handle:
self.package_template = config_file_handle.read()
logger.debug('Read template %r', self.package_template)
def run_package_command(self, package_hashes, wheel_dir='wheels'):
if not os.path.exists(self.config_file):
raise FileNotFoundError('The invirtualenv configuration file %r was not found' % self.config_file)
generate_parsed_config_file('deploy.conf', self.default_config_filename)
return self.default_config_filename
| 37 | 110 | 0.740913 |
acef06519764d60757228874e90ceb634652d3f5 | 8,677 | py | Python | docs/conf.py | georgehristov/data | 1d28e3b5dd8934d075f79a46c6f623398d465453 | [
"MIT"
] | 284 | 2016-05-13T13:00:58.000Z | 2022-03-27T21:13:25.000Z | docs/conf.py | georgehristov/data | 1d28e3b5dd8934d075f79a46c6f623398d465453 | [
"MIT"
] | 629 | 2016-04-25T12:22:33.000Z | 2022-03-21T16:52:04.000Z | docs/conf.py | georgehristov/data | 1d28e3b5dd8934d075f79a46c6f623398d465453 | [
"MIT"
] | 54 | 2016-06-05T11:50:19.000Z | 2021-12-22T12:24:11.000Z | # -*- coding: utf-8 -*-
# Agile Data documentation build configuration file
import sys
import os
import sphinx_rtd_theme
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinxcontrib.phpdomain'
#'sphinxcontrib_phpautodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Agile Data'
copyright = u'2016, Agile Toolkit'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2-latest'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
highlight_language = 'php'
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AgileDataDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
#('index', 'DSQL.tex', u'DSQL Documentation',
#u'Agile Toolkit', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'agile-data', u'Agile Data Documentation',
[u'Agile Toolkit'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AgileData', u'Agile Data Documentation',
u'Agile Toolkit', 'Agile Data', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
primary_domain = "php" # It seems to help sphinx in some kind (don't know why)
| 31.552727 | 81 | 0.719719 |
acef06a45a6ca8b7d8d92e203abe4aebbbf8d2b0 | 303 | py | Python | main.py | Rhymmor/create-calendar-event-bot | 30227a9fa3ec9f3069ec389767ddf2b8aeafe968 | [
"MIT"
] | 1 | 2018-02-28T10:05:35.000Z | 2018-02-28T10:05:35.000Z | main.py | Rhymmor/create-calendar-event-bot | 30227a9fa3ec9f3069ec389767ddf2b8aeafe968 | [
"MIT"
] | null | null | null | main.py | Rhymmor/create-calendar-event-bot | 30227a9fa3ec9f3069ec389767ddf2b8aeafe968 | [
"MIT"
] | null | null | null | import logging
from bot import Bot
def set_logging():
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def main():
set_logging()
bot = Bot()
bot.start()
try:
main()
except KeyboardInterrupt:
exit()
| 17.823529 | 86 | 0.594059 |
acef081ce82ec88e957caf555085ac226f3b1cad | 3,719 | py | Python | homophonic_cipher.py | himrasmussen/PyCipher | 215b19a2d1c8e1b90fb641d10407670c4fac2660 | [
"MIT"
] | null | null | null | homophonic_cipher.py | himrasmussen/PyCipher | 215b19a2d1c8e1b90fb641d10407670c4fac2660 | [
"MIT"
] | null | null | null | homophonic_cipher.py | himrasmussen/PyCipher | 215b19a2d1c8e1b90fb641d10407670c4fac2660 | [
"MIT"
] | null | null | null | # a homophonic cipher
# needs to have a homophoinic letter substitution table
import copy
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from cryptobase import CryptoBase
from homophonic_table_creator import TableCreator
from letter_distribution import LangData
class HomophonicCipher(CryptoBase):
def __init__(self, *args, **kwargs):
self.has_table = None
self.key_backup = {}
super(HomophonicCipher, self).__init__(*args, **kwargs)
# check if the user has a homophonic substitution table and act accordingly
def does_user_have_the_table(self):
choices = ["no", "yes"]
print("Do you have a homophonic substitution table?")
for idx, choice in enumerate(choices):
print("{}: {}".format(idx, choice))
self.has_table = choices[int(input("Enter a number please: "))] # yes or no
# if user has table, use it, else make it
def get_or_make_table(self):
# if the user has the table, ask for the absolute path to it
if self.has_table == "yes":
# create a file explorer windows for user to select the key file
print("Select your homophonic substitution table")
Tk().withdraw() # remove excess windows
self.key_file = askopenfilename() # opens a dialog
else:
print("You now select your language word list")
langdata = LangData()
langdata.main()
TableCreator(langdata.letter_distribution).excecute()
self.has_table = "yes"
self.get_or_make_table()
# import the homophonic letter substitution table
def import_substitution_table(self):
with open(self.key_file) as f:
self.key = {}
table_data_lines = f.read().splitlines()
# seperate the letter from the numbers on each line
for line in table_data_lines:
cur_letter = line[0] # extract letter
numbers = line[2:].split() # extract numbers
self.key[cur_letter] = numbers
# make a backup key for use later
self.key_backup = copy.deepcopy(self.key)
# encrypt the message
def encrypt(self):
print(self.msg)
# encrypt every character in the message
for char in self.msg:
if char in self.alphabet:
self.new_msg += self.key[char].pop() if char in self.alphabet else char
# if all the numbers for the letter have been used, reinitialize the numbers
if not self.key[char]:
self.key[char] = self.key_backup[char]
self.new_msg += " "
# decrypt the message
def decrypt(self):
for number in self.msg.split():
for letter, numbers in self.key.items(): # optimer?
if number in numbers:
self.new_msg += letter
# excecute the operation
def excecute(self, mode):
if mode == "encrypt":
self.encrypt()
elif mode == "decrypt":
self.decrypt()
print(self.new_msg)
self.done()
if __name__ == "__main__":
cipher = HomophonicCipher(key="foo", msg="bla bla bla", mode="encrypt")
cipher.does_user_have_the_table()
cipher.get_or_make_table()
cipher.import_substitution_table()
cipher.excecute(cipher.mode)
cipher = HomophonicCipher(key="foo", msg=cipher.new_msg, mode="decrypt")
cipher.does_user_have_the_table()
cipher.get_or_make_table()
cipher.import_substitution_table()
cipher.excecute(cipher.mode)
| 35.759615 | 96 | 0.607421 |
acef089ca197349029e1081a33ee33b06847262f | 3,590 | py | Python | predict/json/json-video.py | wjbKimberly/AlphaPose-RMPE-wjb | 926ad2781e647cc5fe218e323739469054886c00 | [
"Apache-2.0"
] | 6 | 2018-10-10T18:47:32.000Z | 2019-06-04T07:55:51.000Z | predict/json/json-video.py | XiaoqingWang/AlphaPose | c2a1f51bfe8445662966d4cf62a098d8d3b373c4 | [
"Apache-2.0"
] | null | null | null | predict/json/json-video.py | XiaoqingWang/AlphaPose | c2a1f51bfe8445662966d4cf62a098d8d3b373c4 | [
"Apache-2.0"
] | 2 | 2018-10-09T02:30:29.000Z | 2019-06-06T06:17:54.000Z | # coding: utf-8
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
from PIL import Image
import json
import shutil
import argparse
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='')
parser.add_argument('--outputpath',dest='outputpath', help='path of output', default="")
parser.add_argument('--inputpath',dest='inputpath', help='path of inputpath', default="")
args = parser.parse_args()
return args
def display_pose(intputpath, outputpath, imgname):
img = Image.open(os.path.join(intputpath,imgname))
width, height = img.size
fig = plt.figure(figsize=(width/10,height/10),dpi=10)
plt.imshow(img)
for pid in range(len(rmpe_results[imgname])):
pose = np.array(rmpe_results[imgname][pid]['keypoints']).reshape(-1,3)[:,:3]
if pose.shape[0] == 16:
mpii_part_names = ['RAnkle','RKnee','RHip','LHip','LKnee','LAnkle','Pelv','Thrx','Neck','Head','RWrist','RElbow','RShoulder','LShoulder','LElbow','LWrist']
colors = ['m', 'b', 'b', 'r', 'r', 'b', 'b', 'r', 'r', 'm', 'm', 'm', 'r', 'r','b','b']
pairs = [[8,9],[11,12],[11,10],[2,1],[1,0],[13,14],[14,15],[3,4],[4,5],[8,7],[7,6],[6,2],[6,3],[8,12],[8,13]]
colors_skeleton = ['m', 'b', 'b', 'r', 'r', 'b', 'b', 'r', 'r', 'm', 'm', 'r', 'r', 'b','b']
for idx_c, color in enumerate(colors):
plt.plot(np.clip(pose[idx_c,0],0,width), np.clip(pose[idx_c,1],0,height), marker='o', color=color, ms=40*np.mean(pose[idx_c,2]))
for idx in range(len(colors_skeleton)):
plt.plot(np.clip(pose[pairs[idx],0],0,width),np.clip(pose[pairs[idx],1],0,height), 'r-',
color=colors_skeleton[idx],linewidth=40*np.mean(pose[pairs[idx],2]), alpha=np.mean(pose[pairs[idx],2]))
elif pose.shape[0] == 17:
coco_part_names = ['Nose','LEye','REye','LEar','REar','LShoulder','RShoulder','LElbow','RElbow','LWrist','RWrist','LHip','RHip','LKnee','RKnee','LAnkle','RAnkle']
colors = ['r', 'r', 'r', 'r', 'r', 'y', 'y', 'y', 'y', 'y', 'y', 'g', 'g', 'g','g','g','g']
pairs = [[0,1],[0,2],[1,3],[2,4],[5,6],[5,7],[7,9],[6,8],[8,10],[11,12],[11,13],[13,15],[12,14],[14,16],[6,12],[5,11]]
colors_skeleton = ['y', 'y', 'y', 'y', 'b', 'b', 'b', 'b', 'b', 'r', 'r', 'r', 'r', 'r','m','m']
for idx_c, color in enumerate(colors):
plt.plot(np.clip(pose[idx_c,0],0,width), np.clip(pose[idx_c,1],0,height), marker='o', color=color, ms=4*np.mean(pose[idx_c,2]))
for idx in range(len(colors_skeleton)):
plt.plot(np.clip(pose[pairs[idx],0],0,width),np.clip(pose[pairs[idx],1],0,height),'r-',
color=colors_skeleton[idx],linewidth=4*np.mean(pose[pairs[idx],2]), alpha=0.12*np.mean(pose[pairs[idx],2]))
plt.axis('off')
ax = plt.gca()
ax.set_xlim([0,width])
ax.set_ylim([height,0])
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig(os.path.join(outputpath,'RENDER',imgname.split('/')[-1]),pad_inches = 0.0, bbox_inches=extent, dpi=13)
plt.close()
if __name__ == '__main__':
args = parse_args()
outputpath = args.outputpath
inputpath = args.inputpath
jsonpath = os.path.join(args.outputpath,"POSE/alpha-pose-results-forvis.json")
with open(jsonpath) as f:
rmpe_results = json.load(f)
for imgname in tqdm(rmpe_results.keys()):
display_pose(inputpath, outputpath, imgname)
| 53.58209 | 174 | 0.576323 |
acef093a102b8a033ade7fce82e2b6ce548a1384 | 5,961 | py | Python | bouncer/ext/config.py | meltmedia/bouncer | 34fee7d8651fc2d6a3dc4886e9a0f2bd279eb69e | [
"MIT"
] | null | null | null | bouncer/ext/config.py | meltmedia/bouncer | 34fee7d8651fc2d6a3dc4886e9a0f2bd279eb69e | [
"MIT"
] | 1 | 2015-03-20T22:28:18.000Z | 2015-03-20T22:28:18.000Z | bouncer/ext/config.py | meltmedia/bouncer | 34fee7d8651fc2d6a3dc4886e9a0f2bd279eb69e | [
"MIT"
] | null | null | null | class BaseLoader(object):
def extension(self):
raise NotImplementedError()
def load(self, filename):
raise NotImplementedError()
class YamlLoader(BaseLoader):
def extension(self):
return 'yaml'
def load(self, filename):
import yaml
with open(filename, 'rt') as f:
return yaml.load(f)
class JsonLoader(BaseLoader):
def extension(self):
return 'json'
def load(self, filename):
import json
with open(filename, 'rt') as f:
return json.load(f)
class Configuration(object):
def __init__(self, basename, loader=None):
import logging
self.BASE_NAME = basename
self.ENV_PREFIX = "%s_" % self.BASE_NAME.upper()
self.loader = loader or YamlLoader()
self.DEFAULT_CONFIG = None
self.DEFAULT_FILES = [
"etc/%s.%s" % (self.BASE_NAME, self.loader.extension()),
"~/.%s" % self.BASE_NAME
]
self.data = {}
self.log = logging.getLogger(__name__)
self._load_default()
def __getattr__(self, key):
if key in self.data:
return self.data[key]
else:
raise AttributeError(key)
def _load_default(self):
# these imports are needed due to the module hack used below.
import os
root = os.path.abspath('%s/../../' % os.path.dirname(__file__))
filenames = []
if os.path.exists('etc/defaults.%s' % self.loader.extension()):
filenames.append('etc/defaults.%s' % self.loader.extension())
default_path = '%s/etc/defaults.%s' % (root, self.loader.extension())
if os.path.exists(default_path):
filenames.append(default_path)
self.DEFAULT_CONFIG = self.load(
filenames=filenames,
base={}, use_default=False)
def get(self):
""" Return a copy of the current configuration
"""
# these imports are needed due to the module hack used below.
import copy
return copy.deepcopy(self.data)
def load(self, filenames=None, base=None, use_default=True):
""" Load configuration
Kwargs:
filenames (array): An array of filenames to load.
if None use defaults (default None)
base (dict): Base configuration dictionary to update (default None)
use_default (bool): Load the default configuration settings first.
If base is specified use_default will have
no effect.
Returns:
dict. The loaded configuration settings.
"""
# these imports are needed due to the module hack used below.
import copy
import os
self.data = {}
if use_default and self.DEFAULT_CONFIG:
self.data = copy.deepcopy(self.DEFAULT_CONFIG)
if base:
self.data = base
if filenames is None:
filenames = self.DEFAULT_FILES
for filename in filenames:
if filename.startswith('~/'):
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
self.log.debug('file not found "%s", skipping' % filename)
continue
self.data.update(self.loader.load(filename))
# populate the defaults as needed
self.parse_defaults()
# set any variables from the environment
self._set_from_env()
return self.data
def parse_defaults(self):
if not self.data:
self.log.debug('no data to parse defaults from')
return
if 'defaults' not in self.data:
self.log.debug('no defaults in data to parse')
return
for default in self.data['defaults']:
if default not in self.data:
continue
# Can't do this on strings
if isinstance(self.data[default], basestring):
continue
# Check if this is a dictionary
if isinstance(self.data[default], dict):
self._apply_dict_defaults(default)
# Check if this is acts like list
elif hasattr(self.data[default], "__getitem__") or \
hasattr(self.data[default], "__iter__"):
self._apply_list_defaults(default)
def _apply_list_defaults(self, default):
# convert to a set and call union
result = set(self.data[default])\
.union(self.data['defaults'][default])
# set back to a list
self.data[default] = list(result)
def _apply_dict_defaults(self, default):
import copy
for name in self.data[default]:
data = copy.deepcopy(self.data['defaults'][default])
data.update(self.data[default][name])
self.data[default][name] = data
def _set_from_env(self):
""" Look for the ENV_PREFIX in the environment and set the
configuration value from that variable split on underscores
e.g. MYAPP_ACCOUNTS_ONE_SECRET="ONE" becomes accounts.one.secret
"""
import os
env_opts = [i for i in os.environ if i.startswith(self.ENV_PREFIX)]
for opt in env_opts:
mapList = [p.lower() for p in opt.split('_')[1:]]
try:
# get first to prevent adding extra irrelevant data
self._get_nested(mapList)
self._set_nested(mapList, os.environ[opt])
except KeyError as e:
# silently ignore KeyError
self.log.debug('No value found for %s in %s.' % (e, mapList))
continue
def _get_nested(self, mapList):
return reduce(lambda d, k: d[k], mapList, self.data)
def _set_nested(self, mapList, value):
self._get_nested(mapList[:-1])[mapList[-1]] = value
| 30.106061 | 78 | 0.574904 |
acef094cff47b12be6473d9b3a012b13cbfafd1b | 3,116 | py | Python | src/klein/_decorators.py | JorikSchellekens/klein | 88affef26004e2d167fbdf9cfcf9566e2fbbc683 | [
"MIT"
] | null | null | null | src/klein/_decorators.py | JorikSchellekens/klein | 88affef26004e2d167fbdf9cfcf9566e2fbbc683 | [
"MIT"
] | null | null | null | src/klein/_decorators.py | JorikSchellekens/klein | 88affef26004e2d167fbdf9cfcf9566e2fbbc683 | [
"MIT"
] | null | null | null | from functools import wraps
def bindable(bindable):
"""
Mark a method as a "bindable" method.
If a L{Klein.app} resource is found on an instance object (i.e. is returned
from C{YourObject().app.resource()}), it will pass C{self} from that
instance to all of its routes, making a signature of 2 arguments: C{self}
and C{request} However, if it's found globally (i.e. C{app = Klein()};
C{@app.route(...)} at global scope), then it will only receive one:
C{request}. However, for decorators that must be able to live between
C{@route} and the user's function, but still need to manipulate the
C{request} object, they need to be invoked with a consistent argument
signature. A method decorated with C{@bindable} will therefore always take
C{instance, request} as its first two arguments, even if C{instance} is
C{None} when the L{Klein} object is not bound to an instance.
@return: its argument, modified to mark it as unconditinally requiring an
instance argument.
"""
bindable.__klein_bound__ = True
return bindable
def modified(modification, original, modifier=None):
"""
Annotate a callable as a modified wrapper of an original callable.
@param modification: A name for the type of modification, for example "form
processor" or "request forwarder"; this will be tacked on to the name
of the resulting function.
@param modifier: Another decorator which, if given, will be applied to the
function that decorates this function. Additionally, I{any new
attributes} set on the decorated function by C{modifier} will be
I{copied to} C{original}. This allows attributes set by "inner"
decorators such as L{klein.Form.handler} and L{klein.app.Klein.route}
to set attributes that will be visible at the top level.
@return: A new callable; this may have a different argument signature or
return value, and is only related to C{original} in the sense that it
likely calls it.
"""
def decorator(wrapper):
result = named(modification + " for " + original.__name__)(
wraps(original)(wrapper)
)
result.__original__ = original
if modifier is not None:
before = set(wrapper.__dict__.keys())
result = modifier(result)
after = set(wrapper.__dict__.keys())
for key in after - before:
setattr(original, key, wrapper.__dict__[key])
return result
return decorator
def named(name):
"""
Change the name of a function to the given name.
"""
def decorator(original):
original.__name__ = str(name)
original.__qualname__ = str(name)
return original
return decorator
def originalName(function):
"""
Get the original, user-specified name of C{function}, chasing back any
wrappers applied with C{modified}.
"""
fnext = function
while fnext is not None:
function = fnext
fnext = getattr(function, "__original__", None)
return function.__name__
| 36.232558 | 79 | 0.667843 |
acef097cd4c29080237bf90ede6fadf716cb1a61 | 2,158 | py | Python | sgkit/__init__.py | tomwhite/sgkit | 4297ba58df5ec85085adcfeab2917dcd61d7e11e | [
"Apache-2.0"
] | null | null | null | sgkit/__init__.py | tomwhite/sgkit | 4297ba58df5ec85085adcfeab2917dcd61d7e11e | [
"Apache-2.0"
] | null | null | null | sgkit/__init__.py | tomwhite/sgkit | 4297ba58df5ec85085adcfeab2917dcd61d7e11e | [
"Apache-2.0"
] | null | null | null | from pkg_resources import DistributionNotFound, get_distribution
from .display import display_genotypes
from .distance.api import pairwise_distance
from .io.dataset import load_dataset, save_dataset
from .io.vcfzarr_reader import read_vcfzarr
from .model import (
DIM_ALLELE,
DIM_PLOIDY,
DIM_SAMPLE,
DIM_VARIANT,
create_genotype_call_dataset,
create_genotype_dosage_dataset,
)
from .stats.aggregation import (
count_call_alleles,
count_cohort_alleles,
count_variant_alleles,
individual_heterozygosity,
infer_call_ploidy,
infer_sample_ploidy,
infer_variant_ploidy,
sample_stats,
variant_stats,
)
from .stats.association import gwas_linear_regression
from .stats.conversion import convert_probability_to_call
from .stats.hwe import hardy_weinberg_test
from .stats.pc_relate import pc_relate
from .stats.pca import pca
from .stats.popgen import (
Fst,
Garud_H,
Tajimas_D,
divergence,
diversity,
observed_heterozygosity,
pbs,
)
from .stats.preprocessing import filter_partial_calls
from .stats.regenie import regenie
from .testing import simulate_genotype_call_dataset
from .window import window
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = "unknown"
__all__ = [
"DIM_ALLELE",
"DIM_PLOIDY",
"DIM_SAMPLE",
"DIM_VARIANT",
"create_genotype_call_dataset",
"convert_probability_to_call",
"count_variant_alleles",
"count_call_alleles",
"count_cohort_alleles",
"create_genotype_dosage_dataset",
"display_genotypes",
"filter_partial_calls",
"gwas_linear_regression",
"read_vcfzarr",
"regenie",
"hardy_weinberg_test",
"individual_heterozygosity",
"infer_call_ploidy",
"infer_sample_ploidy",
"infer_variant_ploidy",
"sample_stats",
"variant_stats",
"diversity",
"divergence",
"Fst",
"Garud_H",
"Tajimas_D",
"pbs",
"pc_relate",
"simulate_genotype_call_dataset",
"variables",
"observed_heterozygosity",
"pca",
"window",
"load_dataset",
"save_dataset",
"pairwise_distance",
]
| 24.247191 | 64 | 0.734476 |
acef09ab7c6a587b08f6c4b310f1dee7750fe731 | 6,873 | py | Python | ENIAC/api/loop_statistics/loop_indicators/kdj_indicator.py | Ahrli/fast_tools | 144d764e4f169d3ab3753dcc6a79db9f9449de59 | [
"Apache-2.0"
] | 1 | 2021-12-11T16:33:47.000Z | 2021-12-11T16:33:47.000Z | ENIAC/api/loop_statistics/loop_indicators/kdj_indicator.py | webclinic017/fast_tools | 144d764e4f169d3ab3753dcc6a79db9f9449de59 | [
"Apache-2.0"
] | null | null | null | ENIAC/api/loop_statistics/loop_indicators/kdj_indicator.py | webclinic017/fast_tools | 144d764e4f169d3ab3753dcc6a79db9f9449de59 | [
"Apache-2.0"
] | 3 | 2021-11-22T09:46:43.000Z | 2022-01-28T22:33:07.000Z | import backtrader.indicators as btind
from . import compare_price as compare
from .base_indicator import iBaseIndicator, iKdj
class iKdjCompare(iBaseIndicator):
'''
因子:kdj比较
传入参数:
rule = {"args": [20], #n日KDj
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 周期结果比较
}
'''
lines = ('kdjc',)
params = dict(rule=list())
def __init__(self):
super(iKdjCompare, self).__init__()
self.kdj = iKdj(self.data, period=self.args[0], a=self.args[1], b=self.args[2])
def next(self):
kc = compare(self.kdj.k[0], self.logic)
dc = compare(self.kdj.d[0], self.logic)
jc = compare(self.kdj.j[0], self.logic)
self.lines.kdjc[0] = kc and dc and jc
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iKdjCrossGolden(iBaseIndicator):
'''
因子:金叉:K线向上突破D线时,俗称金叉
传入参数:
rule = {"args": [20], #n日KDj
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 周期结果比较
}
'''
lines = ('goldencross',)
params = dict(rule=list())
def __init__(self):
super(iKdjCrossGolden, self).__init__()
self.kdj = iKdj(self.data, period=self.args[0], a=self.args[1], b=self.args[2])
self.cross = btind.CrossOver(self.kdj.k, self.kdj.d)
def next(self):
if self.cross[0] == 1:
self.lines.goldencross[0] = compare(self.kdj.k[0] - self.kdj.d[0], self.logic)
else:
self.lines.goldencross[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iKdjCrossDie(iBaseIndicator):
'''
因子:死叉:K线向下突破D线时,俗称死叉
传入参数:
rule = {"args": [20,3,3], #n日KDj, a,b
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 周期结果比较
}
'''
lines = ('goldencross',)
params = dict(rule=list())
def __init__(self):
super(iKdjCrossDie, self).__init__()
self.kdj = iKdj(self.data, period=self.args[0], a=self.args[1], b=self.args[2])
self.cross = btind.CrossOver(self.kdj.k, self.kdj.d)
def next(self):
if self.cross[0] == -1:
self.lines.goldencross[0] = compare(self.kdj.d[0] - self.kdj.k[0], self.logic)
else:
self.lines.goldencross[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][0])
class iKdjLong(iBaseIndicator):
'''
因子:多头, 当J大于K、K大于D时 默认大于50
传入参数:
rule = {"args": [20, a,b,3, 50], #n日KDj, 连续n天多头
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 周期结果比较
}
'''
lines = ('kdjlong', )
params = dict(rule=list())
def __init__(self):
super(iKdjLong, self).__init__()
self.kdj = iKdj(self.data, period = self.args[0], a=self.args[1], b=self.args[2])
def next(self):
ds = list(self.kdj.d.get(size=self.args[3]))
ks = list(self.kdj.k.get(size=self.args[3]))
js = list(self.kdj.j.get(size=self.args[3]))
# d<k<j, 且都大于50为多头
if len(ks) == self.args[3]:
kdjlong = set(list(map(lambda d: d > self.args[4], ks + ds + js)) + [sorted(s, reverse=True)==s for s in zip(js, ds, ks)])
else:
kdjlong = []
if len(kdjlong) == 1 and True in kdjlong:
self.lines.kdjlong[0] = True
else:
self.lines.kdjlong[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][0]) + int(cond['args'][1])
class iKdjShort(iBaseIndicator):
'''
因子:空头
传入参数:
rule = {"args": [20, a,b,3, 50], #n日KDj, 连续n天多头
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, # 周期结果比较
}
'''
lines = ('kdjshort', )
params = dict(rule=list())
def __init__(self):
super(iKdjShort, self).__init__()
self.kdj = iKdj(self.data, period = self.args[0], a=self.args[1], b=self.args[2])
def next(self):
ks = list(self.kdj.k.get(size=self.args[3]))
ds = list(self.kdj.d.get(size=self.args[3]))
js = list(self.kdj.j.get(size=self.args[3]))
if len(ks) == self.args[3]:
kdjshort = set(list(map(lambda d: d > self.args[4], ks + ds + js)) + [sorted(s) == s for s in zip(js, ds, ks)])
else:
kdjshort = []
if len(kdjshort) == 1 and True in kdjshort:
self.lines.kdjshort[0] = True
else:
self.lines.kdjshort[0] = False
@classmethod
def judge(cls, cond):
return int(cond['args'][0]) + int(cond['args'][1])
class iKdjTop(iBaseIndicator):
'''
因子:最近 n 天 最高点
传入参数:
rule = {"args": [20, 3, 3, 30, k] , n日, a, b m日最高, k线
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
'''
# lines = ('ktop', 'dtop', 'jtop',)
lines = ('kdjtop',)
params = dict(rule=list())
def __init__(self):
super(iKdjTop, self).__init__()
self.kdj = iKdj(self.data, period=self.args[0], a=self.args[1], b=self.args[2])
def next(self):
_list = {
'k': list(self.kdj.k.get(size=self.args[3])),
'd': list(self.kdj.d.get(size=self.args[3])),
'j': list(self.kdj.j.get(size=self.args[3])),
}
_signal = {
'k': self.kdj.k[0],
'd': self.kdj.d[0],
'j': self.kdj.j[0],
}
# kdj_line = self.args[4]
# kdj_line = self.logic['line']
kdj_line = self.sigline
if len(_list[kdj_line]) == self.args[3] and _signal[kdj_line] == max(_list[kdj_line]):
self.lines.kdjtop[0] = True
else:
self.lines.kdjtop[0] = False
class iKdjBottom(iBaseIndicator):
'''
因子:最近 n 天 最高点
传入参数:
rule = {"args": [20, 3, 3, 30, k] , n日, a, b m日最高, k线
"logic":{"compare": "eq","byValue": 1,"byMax": 5,}, #无意义
'''
lines = ('kdjbottom',)
params = dict(rule=list())
def __init__(self):
super(iKdjBottom, self).__init__()
self.kdj = iKdj(self.data, period=self.args[0], a=self.args[1], b=self.args[2])
def next(self):
_list = {
'k': list(self.kdj.k.get(size=self.args[3])),
'd': list(self.kdj.d.get(size=self.args[3])),
'j': list(self.kdj.j.get(size=self.args[3])),
}
_signal = {
'k': self.kdj.k[0],
'd': self.kdj.d[0],
'j': self.kdj.j[0],
}
# kdj_line = self.args[4]
# kdj_line = self.logic['line']
kdj_line = self.sigline
if len(_list[kdj_line]) == self.args[3] and _signal[kdj_line] == min(_list[kdj_line]):
self.lines.kdjbottom[0] = True
else:
self.lines.kdjbottom[0] = False | 30.683036 | 134 | 0.516077 |
acef0ac89b5ad58160ea136be512e6245568b0b5 | 1,802 | py | Python | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_aks/models/container_service_service_principal_profile.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2 | 2021-06-05T17:51:26.000Z | 2021-11-17T11:17:56.000Z | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_aks/models/container_service_service_principal_profile.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 3 | 2020-05-27T20:16:26.000Z | 2020-07-23T19:46:49.000Z | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_aks/models/container_service_service_principal_profile.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 5 | 2020-09-08T22:46:48.000Z | 2020-11-08T14:54:35.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ContainerServiceServicePrincipalProfile(Model):
"""Information about a service principal identity for the cluster to use for
manipulating Azure APIs. Either secret or keyVaultSecretRef must be
specified.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. The ID for the service principal.
:type client_id: str
:param secret: The secret password associated with the service principal
in plain text.
:type secret: str
:param key_vault_secret_ref: Reference to a secret stored in Azure Key
Vault.
:type key_vault_secret_ref:
~azure.mgmt.containerservice.models.KeyVaultSecretRef
"""
_validation = {
'client_id': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
'key_vault_secret_ref': {'key': 'keyVaultSecretRef', 'type': 'KeyVaultSecretRef'},
}
def __init__(self, **kwargs):
super(ContainerServiceServicePrincipalProfile, self).__init__(**kwargs)
self.client_id = kwargs.get('client_id', None)
self.secret = kwargs.get('secret', None)
self.key_vault_secret_ref = kwargs.get('key_vault_secret_ref', None)
| 37.541667 | 90 | 0.643729 |
acef0afa01fe19fcfff9ed405c1d07aad4a3bd15 | 30,737 | py | Python | cohesity_management_sdk/controllers/protection_jobs_controller.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/controllers/protection_jobs_controller.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/controllers/protection_jobs_controller.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.protection_job import ProtectionJob
from cohesity_management_sdk.models.update_protection_jobs_state import UpdateProtectionJobsState
from cohesity_management_sdk.models.protection_job_audit_trail import ProtectionJobAuditTrail
from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException
class ProtectionJobsController(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, config=None, client=None, call_back=None):
super(ProtectionJobsController, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
self.config = config
def change_protection_job_state(self, id, body=None):
"""Does a POST request to /public/protectionJobState/{id}.
If the Protection Job is currently running (not paused) and true is
passed in,
this operation stops any new Runs of this Protection Job
from stating and executing.
However, any existing Runs that were already executing will continue
to run.
If this Projection Job is paused and false is passed in, this
operation
restores the Job to a running state and new Runs are started as
defined
by the schedule in the Policy associated with the Job.
Returns success if the paused state is changed.
Args:
id (long|int): Specifies a unique id of the Protection Job.
body (ChangeProtectionJobStateParam, optional): TODO: type
description here. Example:
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('change_protection_job_state called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for change_protection_job_state.'
)
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info(
'Preparing query URL for change_protection_job_state.')
_url_path = '/public/protectionJobState/{id}'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'id': id})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for change_protection_job_state.')
_headers = {'content-type': 'application/json; charset=utf-8'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for change_protection_job_state.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='change_protection_job_state')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for change_protection_job_state.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def get_protection_jobs(self,
ids=None,
names=None,
policy_ids=None,
environments=None,
is_active=None,
is_deleted=None,
only_return_basic_summary=None,
include_last_run_and_stats=None,
include_rpo_snapshots=None,
is_last_run_sla_violated=None,
only_return_data_migration_jobs=None,
prune_excluded_source_ids=None,
tenant_ids=None,
all_under_hierarchy=None):
"""Does a GET request to /public/protectionJobs.
If no parameters are specified, all Protection Jobs currently
on the Cohesity Cluster are returned.
Specifying parameters filters the results that are returned.
Args:
ids (list of long|int, optional): Filter by a list of Protection
Job ids.
names (list of string, optional): Filter by a list of Protection
Job names.
policy_ids (list of string, optional): Filter by Policy ids that
are associated with Protection Jobs. Only Jobs associated with
the specified Policy ids, are returned.
environments (list of EnvironmentGetProtectionJobsEnum, optional):
Filter by environment types such as 'kVMware', 'kView', etc.
Only Jobs protecting the specified environment types are
returned. NOTE: 'kPuppeteer' refers to Cohesity's Remote
Adapter.
is_active (bool, optional): Filter by Inactive or Active Jobs. If
not set, all Inactive and Active Jobs are returned. If true,
only Active Jobs are returned. If false, only Inactive Jobs
are returned. When you create a Protection Job on a Primary
Cluster with a replication schedule, the Cluster creates an
Inactive copy of the Job on the Remote Cluster. In addition,
when an Active and running Job is deactivated, the Job becomes
Inactive.
is_deleted (bool, optional): If true, return only Protection Jobs
that have been deleted but still have Snapshots associated
with them. If false, return all Protection Jobs except those
Jobs that have been deleted and still have Snapshots
associated with them. A Job that is deleted with all its
Snapshots is not returned for either of these cases.
only_return_basic_summary (bool, optional): if true then only job
descriptions and the most recent run of the job will be
returned.
include_last_run_and_stats (bool, optional): If true, return the
last Protection Run of the Job and the summary stats.
include_rpo_snapshots (bool, optional): If true, then the
Protected Objects protected by RPO policies will also be
returned.
is_last_run_sla_violated (bool, optional): IsLastRunSlaViolated is
the parameter to filter the Protection Jobs based on the SLA
violation status of the last Protection Run.
only_return_data_migration_jobs (bool, optional):
OnlyReturnDataMigrationJobs specifies if only data migration
jobs should be returned. If not set, no data migration job
will be returned.
prune_excluded_source_ids (bool, optional): If true, the list of
exclusion sources will be omitted from the response. This can
be used to improve performance when the exclusion sources are
not needed.
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
Returns:
list of ProtectionJob: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_protection_jobs called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_protection_jobs.')
_url_path = '/public/protectionJobs'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'ids': ids,
'names': names,
'policyIds': policy_ids,
'environments': environments,
'isActive': is_active,
'isDeleted': is_deleted,
'onlyReturnBasicSummary': only_return_basic_summary,
'includeLastRunAndStats': include_last_run_and_stats,
'includeRpoSnapshots': include_rpo_snapshots,
'isLastRunSlaViolated': is_last_run_sla_violated,
'onlyReturnDataMigrationJobs': only_return_data_migration_jobs,
'pruneExcludedSourceIds': prune_excluded_source_ids,
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder, _query_parameters,
Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_protection_jobs.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_protection_jobs.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_protection_jobs')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_protection_jobs.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body,
ProtectionJob.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def create_protection_job(self, body):
"""Does a POST request to /public/protectionJobs.
Returns the created Protection Job.
Args:
body (ProtectionJobRequestBody): Request to create a Protection
Job.
Returns:
ProtectionJob: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_protection_job called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for create_protection_job.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_protection_job.')
_url_path = '/public/protectionJobs'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_protection_job.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for create_protection_job.')
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='create_protection_job')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_protection_job.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body,
ProtectionJob.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def create_run_protection_job(self, id, body):
"""Does a POST request to /public/protectionJobs/run/{id}.
Immediately execute a single Job Run and ignore the schedule defined
in the Policy.
A Protection Policy associated with the Job may define up to three
backup run types:
1) Regular (CBT utilized), 2) Full (CBT not utilized) and 3) Log.
The passed in run type defines what type of backup is done by the Job
Run.
The schedule defined in the Policy for the backup run type is ignored
but
other settings such as the snapshot retention and retry settings are
used.
Returns success if the Job Run starts.
Args:
id (long|int): Specifies a unique id of the Protection Job.
body (RunProtectionJobParam): Specifies the type of backup. If not
specified, the 'kRegular' backup is run.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_run_protection_job called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for create_run_protection_job.'
)
self.validate_parameters(id=id, body=body)
# Prepare query URL
self.logger.info(
'Preparing query URL for create_run_protection_job.')
_url_path = '/public/protectionJobs/run/{id}'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'id': id})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for create_run_protection_job.')
_headers = {'content-type': 'application/json; charset=utf-8'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for create_run_protection_job.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='create_run_protection_job')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for create_run_protection_job.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_protection_jobs_state(self, body=None):
"""Does a POST request to /public/protectionJobs/states.
Note that the pause or resume actions will take effect from next
Protection
Run. Also, user can specify only one type of action on all the
Protection Jobs.
Deactivate and activate actions are independent of pause and resume
state.
Deactivate and activate actions are useful in case of failover
situations.
Returns success if the state of all the Protection Jobs state is
changed
successfully.
Args:
body (UpdateProtectionJobsStateRequestBody, optional): TODO: type
description here. Example:
Returns:
UpdateProtectionJobsState: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_protection_jobs_state called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for update_protection_jobs_state.')
_url_path = '/public/protectionJobs/states'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_protection_jobs_state.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_protection_jobs_state.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='update_protection_jobs_state')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_protection_jobs_state.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
UpdateProtectionJobsState.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def delete_protection_job(self, id, body=None):
"""Does a DELETE request to /public/protectionJobs/{id}.
Returns Success if the Protection Job is deleted.
Args:
id (long|int): Specifies a unique id of the Protection Job.
body (DeleteProtectionJobParam, optional): Request to delete a
protection job.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_protection_job called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for delete_protection_job.')
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info('Preparing query URL for delete_protection_job.')
_url_path = '/public/protectionJobs/{id}'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'id': id})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for delete_protection_job.')
_headers = {'content-type': 'application/json; charset=utf-8'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for delete_protection_job.')
_request = self.http_client.delete(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='delete_protection_job')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_protection_job.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def get_protection_job_by_id(self, id):
"""Does a GET request to /public/protectionJobs/{id}.
Returns the Protection Job corresponding to the specified Job id.
Args:
id (long|int): Specifies a unique id of the Protection Job.
Returns:
ProtectionJob: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_protection_job_by_id called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for get_protection_job_by_id.')
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info(
'Preparing query URL for get_protection_job_by_id.')
_url_path = '/public/protectionJobs/{id}'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'id': id})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_protection_job_by_id.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_protection_job_by_id.'
)
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_protection_job_by_id')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for get_protection_job_by_id.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body,
ProtectionJob.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_protection_job(self, body, id):
"""Does a PUT request to /public/protectionJobs/{id}.
Returns the updated Protection Job.
Args:
body (ProtectionJobRequestBody): Request to update a protection
job.
id (long|int): Specifies a unique id of the Protection Job.
Returns:
ProtectionJob: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_protection_job called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for update_protection_job.')
self.validate_parameters(body=body, id=id)
# Prepare query URL
self.logger.info('Preparing query URL for update_protection_job.')
_url_path = '/public/protectionJobs/{id}'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'id': id})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_protection_job.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_protection_job.')
_request = self.http_client.put(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='update_protection_job')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_protection_job.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body,
ProtectionJob.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def get_protection_job_audit(self, id):
"""Does a GET request to /public/protectionJobs/{id}/auditTrail.
Returns the audit of specific protection job edit history.
Args:
id (long|int): Specifies a unique id of the Protection Job.
Returns:
list of ProtectionJobAuditTrail: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_protection_job_audit called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for get_protection_job_audit.')
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info(
'Preparing query URL for get_protection_job_audit.')
_url_path = '/public/protectionJobs/{id}/auditTrail'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'id': id})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_protection_job_audit.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_protection_job_audit.'
)
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_protection_job_audit')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for get_protection_job_audit.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ProtectionJobAuditTrail.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
| 42.809192 | 103 | 0.605362 |
acef0b3aaef1d0c9277c74f7052d097c7b10a90f | 6,453 | py | Python | tests/test_lockstep.py | Roynecro97/easypy | 9f36732b558477557b8a57cfad2840767eff0d12 | [
"BSD-3-Clause"
] | 7 | 2020-03-23T08:30:29.000Z | 2020-12-05T14:51:49.000Z | tests/test_lockstep.py | Roynecro97/easypy | 9f36732b558477557b8a57cfad2840767eff0d12 | [
"BSD-3-Clause"
] | null | null | null | tests/test_lockstep.py | Roynecro97/easypy | 9f36732b558477557b8a57cfad2840767eff0d12 | [
"BSD-3-Clause"
] | 6 | 2020-04-28T12:20:14.000Z | 2022-02-15T15:01:42.000Z | import pytest
from easypy.lockstep import lockstep, LockstepSyncMismatch
def test_lockstep_side_effects():
calculation_result = 0
@lockstep
def simple_calculation(number):
nonlocal calculation_result
calculation_result = number
yield 'SET_NUMBER'
calculation_result *= 2
yield 'MULTIPLY_IT_BY_TWO'
calculation_result += 5
yield 'ADD_FIVE'
with simple_calculation.lockstep(5) as calculation:
calculation.step_next('SET_NUMBER')
assert calculation_result == 5
calculation.step_next('MULTIPLY_IT_BY_TWO')
assert calculation_result == 10
calculation.step_next('ADD_FIVE')
assert calculation_result == 15
def test_lockstep_run_as_function():
calculation_result = 0
@lockstep
def simple_calculation(number):
nonlocal calculation_result
calculation_result = number
yield 'SET_NUMBER'
calculation_result *= 2
yield 'MULTIPLY_IT_BY_TWO'
calculation_result += 5
yield 'ADD_FIVE'
simple_calculation(10)
assert calculation_result == 25
def test_lockstep_class_method():
class SimpleCalculation():
def __init__(self, number):
self.calculation_result = number
@lockstep
def calculation(self):
self.calculation_result *= 2
yield 'MULTIPLY_IT_BY_TWO'
self.calculation_result += 5
yield 'ADD_FIVE'
simple_calculation = SimpleCalculation(5)
with simple_calculation.calculation.lockstep() as calculation:
assert simple_calculation.calculation_result == 5
calculation.step_next('MULTIPLY_IT_BY_TWO')
assert simple_calculation.calculation_result == 10
calculation.step_next('ADD_FIVE')
assert simple_calculation.calculation_result == 15
assert simple_calculation.calculation_result == 15
# run as function
simple_calculation2 = SimpleCalculation(10)
assert simple_calculation2.calculation_result == 10
simple_calculation2.calculation()
assert simple_calculation2.calculation_result == 25
def test_lockstep_wrong_step_name():
@lockstep
def process():
yield 'STEP_1'
yield 'STEP_2'
yield 'STEP_3'
with pytest.raises(LockstepSyncMismatch) as excinfo:
with process.lockstep() as process:
process.step_next('STEP_1')
process.step_next('STEP_TWO')
process.step_next('STEP_3')
assert excinfo.value.expected_step == 'STEP_TWO'
assert excinfo.value.actual_step == 'STEP_2'
def test_lockstep_not_exhausted():
@lockstep
def process():
yield 'STEP_1'
yield 'STEP_2'
yield 'STEP_3'
with pytest.raises(LockstepSyncMismatch) as excinfo:
with process.lockstep() as process:
process.step_next('STEP_1')
process.step_next('STEP_2')
assert excinfo.value.expected_step == 'finished'
assert excinfo.value.actual_step == 'STEP_3'
def test_lockstep_exhausted_prematurely():
@lockstep
def process():
yield 'STEP_1'
yield 'STEP_2'
with pytest.raises(LockstepSyncMismatch) as excinfo:
with process.lockstep() as process:
process.step_next('STEP_1')
process.step_next('STEP_2')
process.step_next('STEP_3')
assert excinfo.value.expected_step == 'STEP_3'
assert excinfo.value.actual_step == 'finished'
def test_lockstep_exhaust():
finished = False
@lockstep
def process():
nonlocal finished
yield 'STEP_1'
yield 'STEP_2'
yield 'STEP_3'
finished = True
assert not finished
with process.lockstep() as process:
assert not finished
process.step_all()
assert finished
assert finished
def test_lockstep_yielded_values():
@lockstep
def process():
yield 'STEP_1', 1
yield 'STEP_2'
yield 'STEP_3', 3
with process.lockstep() as process:
assert process.step_next('STEP_1') == 1
assert process.step_next('STEP_2') is None
assert process.step_next('STEP_3') == 3
def test_lockstep_nested():
@lockstep
def internal_process():
yield 'INTERNAL_1'
yield 'INTERNAL_2'
@lockstep
def external_process():
yield 'EXTERNAL_1'
with internal_process.lockstep() as process:
yield from process
yield 'EXTERNAL_2'
with external_process.lockstep() as process:
process.step_next('EXTERNAL_1')
process.step_next('INTERNAL_1')
process.step_next('INTERNAL_2')
process.step_next('EXTERNAL_2')
def test_lockstep_step_util():
@lockstep
def process():
yield 'STEP_1'
yield 'STEP_2'
yield 'STEP_3'
with process.lockstep() as process:
process.step_until('STEP_3')
def test_lockstep_step_util_wrong_order():
@lockstep
def process():
yield 'STEP_1'
yield 'STEP_2'
yield 'STEP_3'
with pytest.raises(LockstepSyncMismatch) as excinfo:
with process.lockstep() as process:
process.step_until('STEP_2')
process.step_until('STEP_1')
assert excinfo.value.expected_step == 'STEP_1'
assert excinfo.value.actual_step == 'finished'
def test_lockstep_as_static_and_class_methods():
class Foo:
@lockstep
def process1(self, out):
out.append(1)
yield 'STEP'
out.append(2)
@lockstep
@classmethod
def process2(cls, out):
out.append(1)
yield 'STEP'
out.append(2)
@lockstep
@staticmethod
def process3(out):
out.append(1)
yield 'STEP'
out.append(2)
print()
def check_method_call(method):
out = []
method(out)
assert out == [1, 2]
check_method_call(Foo().process1)
check_method_call(Foo().process2)
check_method_call(Foo().process3)
def check_method_lockstep(method):
method.lockstep
out = []
with method.lockstep(out) as process:
assert out == []
process.step_until('STEP')
assert out == [1]
assert out == [1, 2]
check_method_lockstep(Foo().process1)
check_method_lockstep(Foo.process2)
check_method_lockstep(Foo.process3)
| 24.915058 | 66 | 0.636603 |
acef0c1d69fd0328dcafee2a3d80bb518b0bd8dd | 19,941 | py | Python | gaia_tools/xmatch/__init__.py | npricejones/gaia_tools | 12dcd320ab07386aee816f9b0b14b19cabad29fc | [
"MIT"
] | 1 | 2020-11-20T18:27:11.000Z | 2020-11-20T18:27:11.000Z | gaia_tools/xmatch/__init__.py | npricejones/gaia_tools | 12dcd320ab07386aee816f9b0b14b19cabad29fc | [
"MIT"
] | null | null | null | gaia_tools/xmatch/__init__.py | npricejones/gaia_tools | 12dcd320ab07386aee816f9b0b14b19cabad29fc | [
"MIT"
] | null | null | null | # Tools for cross-matching catalogs
import csv
import sys
import os
import os.path
import platform
import shutil
import subprocess
import tempfile
import warnings
WIN32= platform.system() == 'Windows'
import numpy
import astropy.coordinates as acoords
from astropy.table import Table
from astropy import units as u
from ..load.download import _ERASESTR
def xmatch(cat1,cat2,maxdist=2,
colRA1='RA',colDec1='DEC',epoch1=None,
colRA2='RA',colDec2='DEC',epoch2=None,
colpmRA2='pmra',colpmDec2='pmdec',
swap=False,
col_field=None):
"""
NAME:
xmatch
PURPOSE:
cross-match two catalogs (incl. proper motion in cat2 if epochs are different)
INPUT:
cat1 - First catalog
cat2 - Second catalog
maxdist= (2) maximum distance in arcsec
colRA1= ('RA') name of the tag in cat1 with the right ascension in degree in cat1 (assumed to be ICRS)
colDec1= ('DEC') name of the tag in cat1 with the declination in degree in cat1 (assumed to be ICRS)
epoch1= (2000.) epoch of the coordinates in cat1
colRA2= ('RA') name of the tag in cat2 with the right ascension in degree in cat2 (assumed to be ICRS)
colDec2= ('DEC') name of the tag in cat2 with the declination in degree in cat2 (assumed to be ICRS)
epoch2= (2000.) epoch of the coordinates in cat2
colpmRA2= ('pmra') name of the tag in cat2 with the proper motion in right ascension in degree in cat2 (assumed to be ICRS; includes cos(Dec)) [only used when epochs are different]
colpmDec2= ('pmdec') name of the tag in cat2 with the proper motion in declination in degree in cat2 (assumed to be ICRS) [only used when epochs are different]
swap= (False) if False, find closest matches in cat2 for each cat1 source, if False do the opposite (important when one of the catalogs has duplicates)
col_field= (None) if None, simply cross-match on RA and Dec; if a string, then cross-match on RA and Dec with additional matching in the data tag specified by the string
OUTPUT:
(index into cat1 of matching objects,
index into cat2 of matching objects,
angular separation between matching objects)
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2019-07-07 - add additional catalog field matching - Leung (UofT)
"""
if epoch1 is None:
if 'ref_epoch' in cat1.dtype.fields:
epoch1= cat1['ref_epoch']
else:
epoch1= 2000.
if epoch2 is None:
if 'ref_epoch' in cat2.dtype.fields:
epoch2= cat2['ref_epoch']
else:
epoch2= 2000.
_check_epoch(cat1,epoch1)
_check_epoch(cat2,epoch2)
depoch= epoch2-epoch1
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat2[colpmRA2]/numpy.cos(cat2[colDec2]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat2[colpmDec2]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat2[colpmRA2])]= 0.
ddec[numpy.isnan(cat2[colpmDec2])]= 0.
else:
dra= 0.
ddec= 0.
mc1= acoords.SkyCoord(cat1[colRA1],cat1[colDec1],
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(cat2[colRA2]-dra,cat2[colDec2]-ddec,
unit=(u.degree, u.degree),frame='icrs')
if col_field is not None:
try: # check if the field actually exists in both cat1/cat2
cat1[col_field]
cat2[col_field]
except KeyError: # python 2/3 format string
raise KeyError("'%s' does not exist in both catalog" % col_field)
uniques = numpy.unique(cat1[col_field])
if swap: # times neg one to indicate those indices untouch will be noticed at the end and filtered out
d2d = numpy.ones(len(cat2)) * -1.
idx = numpy.zeros(len(cat2), dtype=int)
else:
d2d = numpy.ones(len(cat1)) * -1.
idx = numpy.zeros(len(cat1), dtype=int)
for unique in uniques: # loop over the class
idx_1 = numpy.arange(cat1[colRA1].shape[0])[cat1[col_field] == unique]
idx_2 = numpy.arange(cat2[colRA2].shape[0])[cat2[col_field] == unique]
if idx_1.shape[0] == 0 or idx_2.shape[0] == 0: # the case where a class only exists in one but not the other
continue
if swap:
temp_idx, temp_d2d, d3d = mc2[idx_2].match_to_catalog_sky(mc1[idx_1])
m1 = numpy.arange(len(cat2))
idx[cat2[col_field] == unique] = idx_1[temp_idx]
d2d[cat2[col_field] == unique] = temp_d2d
else:
temp_idx, temp_d2d, d3d = mc1[idx_1].match_to_catalog_sky(mc2[idx_2])
m1 = numpy.arange(len(cat1))
idx[cat1[col_field] == unique] = idx_2[temp_idx]
d2d[cat1[col_field] == unique] = temp_d2d
d2d = d2d * temp_d2d.unit # make sure finally we have an unit on d2d array s.t. "<" operation can complete
else:
if swap:
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
m1= numpy.arange(len(cat2))
else:
idx,d2d,d3d = mc1.match_to_catalog_sky(mc2)
m1= numpy.arange(len(cat1))
# to make sure filtering out all neg ones which are untouched
mindx= ((d2d < maxdist*u.arcsec) & (0.*u.arcsec <= d2d))
m1= m1[mindx]
m2= idx[mindx]
if swap:
return (m2,m1,d2d[mindx])
else:
return (m1,m2,d2d[mindx])
def cds(cat,xcat='vizier:I/345/gaia2',maxdist=2,colRA='RA',colDec='DEC',
selection='best',epoch=None,colpmRA='pmra',colpmDec='pmdec',
savefilename=None,gaia_all_columns=False):
"""
NAME:
cds
PURPOSE:
Cross-match against a catalog in the CDS archive using the CDS cross-matching service (http://cdsxmatch.u-strasbg.fr/xmatch); uses the curl interface
INPUT:
cat - a catalog to cross match, requires 'RA' and 'DEC' keywords (see below)
xcat= ('vizier:I/345/gaia2') name of the catalog to cross-match against, in a format understood by the CDS cross-matching service (see http://cdsxmatch.u-strasbg.fr/xmatch/doc/available-tables.html; things like 'vizier:Tycho2' or 'vizier:I/345/gaia2')
maxdist= (2) maximum distance in arcsec
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
gaia_all_columns= (False) set to True if you are matching against Gaia DR2 and want *all* columns returned; this runs a query at the Gaia Archive, which may or may not work...
savefilename= (None) if set, save the output from CDS to this path; can match back using cds_matchback
OUTPUT:
(xcat entries for those that match,
indices into cat of matching sources: index[0] is cat index of xcat[0])
HISTORY:
2016-09-12 - Written based on RC catalog code - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2018-05-08 - Added gaia_all_columns - Bovy (UofT)
"""
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
# Write positions
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(cat)):
wr.writerow([(cat[ii][colRA]-dra[ii]+360.) % 360.,
cat[ii][colDec]]-ddec[ii])
_cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat)
# Directly match on input RA
ma= cds_load(resultfilename)
if gaia_all_columns:
from astroquery.gaia import Gaia
# Write another temporary file with the XML output of the cross-match
tab= Table(numpy.array([ma['source_id'],ma['RA'],ma['DEC']]).T,
names=('source_id','RA','DEC'),
dtype=('int64','float64','float64'))
xmlfilename= tempfile.mktemp('.xml',dir=os.getcwd())
tab.write(xmlfilename,format='votable')
try:
job= Gaia.launch_job_async(
"""select g.*, m.RA as mRA, m.DEC as mDEC
from gaiadr2.gaia_source as g
inner join tap_upload.my_table as m on m.source_id = g.source_id""",
upload_resource=xmlfilename,
upload_table_name="my_table")
ma= job.get_results()
except:
print("gaia_tools.xmath.cds failed to retrieve all gaiadr2 columns, returning just the default returned by the CDS xMatch instead...")
else:
ma.rename_column('mra','RA')
ma.rename_column('mdec','DEC')
finally:
os.remove(xmlfilename)
# Remove temporary files
os.remove(posfilename)
if savefilename is None:
os.remove(resultfilename)
else:
shutil.move(resultfilename,savefilename)
# Match back to the original catalog
mai= cds_matchback(cat,ma,colRA=colRA,colDec=colDec,epoch=epoch,
colpmRA=colpmRA,colpmDec=colpmDec)
return (ma,mai)
def _cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat,
nruns_necessary=1):
"""CDS xMatch (sometimes?) fails for large matches, because of a time-out,
so we recursively split until the batches are small enough to not fail"""
# Figure out which of the hierarchy we are running
try:
runs= ''.join([str(int(r)-1)
for r in posfilename.split('csv.')[-1].split('.')])
except ValueError:
runs= ''
nruns= 2**len(runs)
if nruns >= nruns_necessary:
# Only run this level's match if we don't already know that we should
# be using smaller batches
_cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat)
try:
ma= cds_load(resultfilename)
except ValueError: # Assume this is the time-out failure
pass
else:
return nruns
# xMatch failed because of time-out, split
posfilename1= posfilename+'.1'
posfilename2= posfilename+'.2'
resultfilename1= resultfilename+'.1'
resultfilename2= resultfilename+'.2'
# Figure out which of the hierarchy we are running
runs= ''.join([str(int(r)-1)
for r in posfilename1.split('csv.')[-1].split('.')])
nruns= 2**len(runs)
thisrun1= 1+int(runs,2)
thisrun2= 1+int(''.join([str(int(r)-1)
for r in posfilename2.split('csv.')[-1].split('.')]),2)
# Count the number of objects
with open(posfilename,'r') as posfile:
num_lines= sum(1 for line in posfile)
# Write the header line
with open(posfilename1,'w') as posfile1:
with open(posfilename,'r') as posfile:
posfile1.write(posfile.readline())
with open(posfilename2,'w') as posfile2:
with open(posfilename,'r') as posfile:
posfile2.write(posfile.readline())
# Cut in half
cnt= 0
with open(posfilename,'r') as posfile:
with open(posfilename1,'a') as posfile1:
with open(posfilename2,'a') as posfile2:
for line in posfile:
if cnt == 0:
cnt+= 1
continue
if cnt < num_lines//2:
posfile1.write(line)
cnt+= 1 # Can stop counting once this if is done
else:
posfile2.write(line)
# Run each
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun1,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename1,posfilename1,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun2,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename2,posfilename2,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
# Combine results
with open(resultfilename,'w') as resultfile:
with open(resultfilename1,'r') as resultfile1:
for line in resultfile1:
resultfile.write(line)
with open(resultfilename2,'r') as resultfile2:
for line in resultfile2:
if line[0] == 'a': continue
resultfile.write(line)
# Remove intermediate files
os.remove(posfilename1)
os.remove(posfilename2)
os.remove(resultfilename1)
os.remove(resultfilename2)
return nruns_necessary
def _cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat):
# Send to CDS for matching
result= open(resultfilename,'w')
try:
subprocess.check_call(['curl',
'-X','POST',
'-F','request=xmatch',
'-F','distMaxArcsec=%i' % maxdist,
'-F','selection=%s' % selection,
'-F','RESPONSEFORMAT=csv',
'-F','cat1=@%s' % os.path.basename(posfilename),
'-F','colRA1=RA',
'-F','colDec1=DEC',
'-F','cat2=%s' % xcat,
'http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync'],
stdout=result)
except subprocess.CalledProcessError:
os.remove(posfilename)
if os.path.exists(resultfilename):
result.close()
os.remove(resultfilename)
result.close()
return None
def cds_load(filename):
if WIN32:
# windows do not have float128, but source_id is double
# get around this by squeezing precision from int64 on source_id as source_id is always integer anyway
# first read everything as fp64 and then convert source_id to int64 will keep its precision
data = numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True, max_rows=1,
dtype='float64') # only read the first row max to reduce workload to just get the column name
to_list = list(data.dtype.names)
# construct a list where everything is fp64 except 'source_id' being int64
dtype_list = [('{}'.format(i), numpy.float64) for i in to_list]
dtype_list[dtype_list.index(('source_id', numpy.float64))] = ('source_id', numpy.uint64)
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype=dtype_list)
else:
return numpy.genfromtxt(filename, delimiter=',', skip_header=0,
filling_values=-9999.99, names=True,
dtype='float128')
def cds_matchback(cat,xcat,colRA='RA',colDec='DEC',selection='best',
epoch=None,colpmRA='pmra',colpmDec='pmdec',):
"""
NAME:
cds_matchback
PURPOSE:
Match a matched catalog from xmatch.cds back to the original catalog
INPUT
cat - original catalog
xcat - matched catalog returned by xmatch.cds
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
OUTPUT:
Array indices into cat of xcat entries: index[0] is cat index of xcat[0]
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2018-05-04 - Account for non-zero epoch difference - Bovy (UofT)
"""
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
# xmatch to v. small diff., because match is against *original* coords,
# not matched coords in CDS
mc1= acoords.SkyCoord(cat[colRA]-dra,cat[colDec]-ddec,
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(xcat['RA'],xcat['DEC'],
unit=(u.degree, u.degree),frame='icrs')
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
mindx= d2d < 1e-5*u.arcsec
return idx[mindx]
def _check_epoch(cat,epoch):
warn_about_epoch= False
if 'ref_epoch' in cat.dtype.fields:
if 'designation' not in cat.dtype.fields: # Assume this is DR1
if numpy.any(numpy.fabs(epoch-2015.) > 0.01):
warn_about_epoch= True
elif 'Gaia DR2' in cat['designation'][0].decode('utf-8'):
if numpy.any(numpy.fabs(epoch-2015.5) > 0.01):
warn_about_epoch= True
if warn_about_epoch:
warnings.warn("You appear to be using a Gaia catalog, but are not setting the epoch to 2015. (DR1) or 2015.5 (DR2), which may lead to incorrect matches")
return None
| 46.809859 | 258 | 0.608094 |
acef0d24e410e7be01423e26e56bc63372dc6b28 | 6,096 | py | Python | dynamic_plot.py | traits/dynamic_plot | 666f4dd78de590b6f126429fa217ce401c45f0a3 | [
"MIT"
] | null | null | null | dynamic_plot.py | traits/dynamic_plot | 666f4dd78de590b6f126429fa217ce401c45f0a3 | [
"MIT"
] | null | null | null | dynamic_plot.py | traits/dynamic_plot | 666f4dd78de590b6f126429fa217ce401c45f0a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Embedding D3 and three.js for Pelican
=====================================
This plugin allows you to easily embed D3 and three.js JS and CSS in Pelicans
configuration and also the header of individual articles or pages.
It also allows for for embedding article-specific D3.js versions.
"""
import os
import shutil
from pathlib import Path
import json
import logging
# import wingdbstub
from pelican import signals
logger = logging.getLogger(__name__)
DP_DEFAULT = {
"dynplot_modules": True,
"dynplot_d3_url": "https://d3js.org/d3.v5.min.js",
"dynplot_three_url": "https://threejs.org/build/three.module.js",
}
DP_KEY = "DYNAMIC_PLOT_OPTIONS"
DP_MODULES_KEY = "dynplot_modules"
DP_SCRIPTS_KEY = "dynplot_scripts"
DP_STYLES_KEY = "dynplot_styles"
file_mapping = []
def init_default_config(pelican):
"""
Write plugin defaults into pelican settings
"""
from pelican.settings import DEFAULT_CONFIG
def update_settings(settings):
temp = DP_DEFAULT.copy()
if DP_KEY in settings:
temp.update(settings[DP_KEY])
settings[DP_KEY] = temp
return settings
DEFAULT_CONFIG = update_settings(DEFAULT_CONFIG)
if pelican:
pelican.settings = update_settings(pelican.settings)
def get_effective_option(metadata, settings, key):
"""
Return option with highest priority:
not-defined key < default < pelican config settings < file metadata
"""
return metadata.get(key, settings[DP_KEY].get(key))
def is_json(fname):
"""
Returns True for string, enclosed with '[' ']', False else
"""
if str(fname) and (str(fname)[0] == "[") and (str(fname)[-1] == "]"):
return True
return False
def get_json(json_file):
with open(json_file) as file:
data = json.load(file)
return list(data)
def is_relative(fname):
"""
Returns True for leading '/', False else
"""
if str(fname) and (str(fname)[0] != "/"):
return True
return False
def copy_files_to_destination(gen):
"""
Copy files to target destination
"""
global file_mapping
for m in file_mapping:
if not m[0].exists():
logger.warning(f"dynamic_plot: source file not found ({str(m[0])})")
continue
os.makedirs(os.path.dirname(str(m[1])), exist_ok=True)
shutil.copy2(m[0], m[1])
def get_mapping(content, tag):
"""
Return list of all file names from metadata enriched by
input/output path information
"""
# see format_tags
if not hasattr(content, tag):
return []
files_str = getattr(content, tag)
if not files_str:
return []
src_dir = Path(content.relative_dir)
dst_dir = Path(content.url).parent
content_root = Path(content.settings.get("PATH"))
output_root = Path(content.settings.get("OUTPUT_PATH"))
file_list = files_str.replace(" ", "").split(",")
json_files = [e[1:-1] for e in file_list if is_json(e)]
file_list = [e for e in file_list if not is_json(e)]
for j in json_files:
if is_relative(j):
file_list += get_json(content_root / src_dir / j)
else:
file_list += get_json(content_root / j[1:])
result = []
for f in file_list:
if is_relative(f):
result.append([content_root / src_dir / f, output_root / dst_dir / f, f])
else:
result.append([content_root / f[1:], output_root / f[1:], f])
return [[e[0].resolve(), e[1].resolve(), e[2]] for e in result]
def get_formatted_resource(content, tag, formatter):
"""
Return list of html-tag-transformed raw filenames from metadata
"""
# see format_tags
if not hasattr(content, tag):
return []
files_str = getattr(content, tag)
if not files_str:
return []
file_list = files_str.replace(" ", "").split(",")
return [formatter.format(Path(f).as_posix()) for f in file_list]
def format_scripts(content, urls):
def script_string(is_module, src):
if is_module:
return f'<script type="module" src="{src}"></script>'
return f'<script src="{src}"></script>'
use_modules = get_effective_option(
content.metadata, content.settings, DP_SCRIPTS_KEY
)
entries = [script_string(use_modules, Path(f).as_posix()) for f in urls]
if entries:
# user scripts
## Take care here, NOT to try modifying content.metadata["dynplot_scripts"]
## These will no longer affect the values used for html output after
## content creation
content.dynplot_scripts = [x for x in entries]
# master scripts
for url_tag in ["dynplot_d3_url", "dynplot_three_url"]:
url = get_effective_option(content.metadata, content.settings, url_tag)
url = script_string(use_modules, url)
content.dynplot_scripts.insert(0, url)
def format_styles(content, urls):
entries = [
f'<link rel="stylesheet" href="{Path(f).as_posix()}" type="text/css" />'
for f in urls
]
if entries:
# user scripts
## Take care here, NOT to try modifying content.metadata["dynplot_styles"]
## These will no longer affect the values used for html output after
## content creation
content.dynplot_styles = [x for x in entries]
def add_files(content):
"""
Receive content and extract relevant information for later usage
"""
scripts = get_mapping(content, DP_SCRIPTS_KEY)
styles = get_mapping(content, DP_STYLES_KEY)
global file_mapping
if scripts:
file_mapping += [[s[0], s[1]] for s in scripts]
format_scripts(content, [s[2] for s in scripts])
if styles:
file_mapping += [[s[0], s[1]] for s in styles]
format_styles(content, [s[2] for s in styles])
def register():
"""
Plugin registration
"""
signals.initialized.connect(init_default_config)
signals.content_object_init.connect(add_files)
signals.finalized.connect(copy_files_to_destination)
| 27.093333 | 85 | 0.639436 |
acef0da4f21380e302c1ef7f0dc328148ab1b9a6 | 800 | py | Python | var/spack/repos/builtin/packages/perl-error/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/perl-error/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/perl-error/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlError(PerlPackage):
"""The Error package provides two interfaces. Firstly Error provides a
procedural interface to exception handling. Secondly Error is a base class
for errors/exceptions that can either be thrown, for subsequent catch, or
can simply be recorded."""
homepage = "https://metacpan.org/pod/Error"
url = "https://cpan.metacpan.org/authors/id/S/SH/SHLOMIF/Error-0.17028.tar.gz"
version('0.17028', sha256='3ad85c5e58b31c8903006298424a51bba39f1840e324f5ae612eabc8b935e960')
depends_on('perl-module-build', type='build')
| 38.095238 | 97 | 0.75 |
acef0e748f2b706d640cba8c2885abee67a16d4f | 294 | py | Python | autharch_sharc/conftest.py | kingsdigitallab/autharch_sharc | 92de5fbec8cc72ce48a9e25eb634d40ac2cc83ca | [
"MIT"
] | null | null | null | autharch_sharc/conftest.py | kingsdigitallab/autharch_sharc | 92de5fbec8cc72ce48a9e25eb634d40ac2cc83ca | [
"MIT"
] | null | null | null | autharch_sharc/conftest.py | kingsdigitallab/autharch_sharc | 92de5fbec8cc72ce48a9e25eb634d40ac2cc83ca | [
"MIT"
] | null | null | null | import pytest
from autharch_sharc.users.models import User
from autharch_sharc.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 19.6 | 60 | 0.785714 |
acef0ec4ddb04d744895166281c856c72f0f87bb | 1,450 | py | Python | src/configuration.py | vladmandic/tf-cnn-classification | af6c9cfe3edc4455d40430ed87a5a77f8ebc3548 | [
"Apache-2.0"
] | null | null | null | src/configuration.py | vladmandic/tf-cnn-classification | af6c9cfe3edc4455d40430ed87a5a77f8ebc3548 | [
"Apache-2.0"
] | null | null | null | src/configuration.py | vladmandic/tf-cnn-classification | af6c9cfe3edc4455d40430ed87a5a77f8ebc3548 | [
"Apache-2.0"
] | null | null | null |
# Training Parameters
DEVICE = "gpu" # cpu or gpu
EPOCHS = 10 # how many epochs to run while training
BATCH_SIZE = 8 # how many images to train on in a single call
NUM_CLASSES = 4
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
CHANNELS = 3 # rgb
TRAIN_SET_RATIO = 0.6 # 60% of dataset for training
TEST_SET_RATIO = 0.2 # 20% of dataset for testing
SAVE_N_EPOCH = 5 # save checkpoint every n epochs
# Model Network
# 0: MobileNet-v1, 1: MobileNet-v2, 2: MobileNet-v3-Large, 3: MobileNet-v3-Small
# 4: EfficientNet-B0, 5: EfficientNet-B1, 6: EfficientNet-B2, 7: EfficientNet-B3
# 8: EfficientNet-B4, 9: EfficientNet-B5, 10: EfficientNet-B6, 11: EfficientNet-B7
# 12: ResNeXt50, 13: ResNeXt101
# 14: InceptionV4, 15: InceptionResNetV1, 16: InceptionResNetV2
# 17: SE_ResNet_50, 18: SE_ResNet_101, 19: SE_ResNet_152
# 20: SqueezeNet
# 21: DenseNet_121, 22: DenseNet_169, 23: DenseNet_201, 24: DenseNet_269
# 25: ShuffleNetV2-0.5x, 26: ShuffleNetV2-1.0x, 27: ShuffleNetV2-1.5x, 28: ShuffleNetV2-2.0x
# 29: ResNet_18, 30: ResNet_34, 31: ResNet_50, 32: ResNet_101, 33: ResNet_152
# 34: SEResNeXt_50, 35: SEResNeXt_101
# 36: RegNet
MODEL = 3
# Locations
save_model_dir = "saved/"
test_image_dir = ""
dataset_dir = "train/"
train_dir = dataset_dir + "train"
valid_dir = dataset_dir + "valid"
test_dir = dataset_dir + "test"
train_tfrecord = dataset_dir + "train.tfrecord"
valid_tfrecord = dataset_dir + "valid.tfrecord"
test_tfrecord = dataset_dir + "test.tfrecord"
| 37.179487 | 92 | 0.742759 |
acef0fafd7eab17ee74c4befd5c5429f5d8302cc | 158 | py | Python | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_Hour_MLP.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_Hour_MLP.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_Hour_MLP.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['MLP'] ); | 39.5 | 80 | 0.746835 |
acef10430a2ad06e45087d6278b580058a2c1bea | 1,700 | py | Python | deckbuilder/context.py | meduzik/deckbuilder | 8cab2e258cac0272ac53dd3b64df692281ea46e9 | [
"MIT"
] | 2 | 2020-08-25T12:48:38.000Z | 2021-04-14T16:30:50.000Z | deckbuilder/context.py | meduzik/deckbuilder | 8cab2e258cac0272ac53dd3b64df692281ea46e9 | [
"MIT"
] | 1 | 2021-04-14T16:49:05.000Z | 2021-04-14T20:38:21.000Z | deckbuilder/context.py | meduzik/deckbuilder | 8cab2e258cac0272ac53dd3b64df692281ea46e9 | [
"MIT"
] | null | null | null | import os
from typing import Dict, List, Optional, TYPE_CHECKING
from deckbuilder.utils import encode, ValidateError
if TYPE_CHECKING:
from deckbuilder.core import TextStyle
from deckbuilder.executor import StmtSequence, Stmt
class DeckContext:
def __init__(self, base_path: str):
self.base_path: str = base_path
self.styles: Dict[str, TextStyle] = dict()
self.decks: List[DeckTemplate] = []
self.inlines: Dict[str, InlineSymbol] = dict()
def resolve_inline(self, name: str) -> 'InlineSymbol':
if name in self.inlines:
return self.inlines[name]
raise ValidateError(f"inline symbol '{encode(name)}' is not defined")
def resolve_style(self, name: str) -> 'TextStyle':
if name in self.styles:
return self.styles[name]
raise ValidateError(f"text style '{encode(name)}' is not defined")
def resolve_path(self, path: str) -> str:
return os.path.abspath(os.path.join(self.base_path, path))
class FaceTemplate:
def __init__(self, block: 'StmtSequence'):
self.block: StmtSequence = block
class CardData:
def __init__(self):
self.count = 1
self.data: Dict[str, str] = dict()
class CardBlock:
def __init__(self):
self.cards: List[CardData] = []
self.renderers: List[Stmt] = []
class DeckTemplate:
def __init__(self, name: str, width: int, height: int):
self.name = name
self.width: int = width
self.height: int = height
self.scale: float = 1
self.face_hidden: Optional[FaceTemplate] = None
self.back_default: Optional[FaceTemplate] = None
self.card_blocks: List[CardBlock] = []
class InlineSymbol:
def __init__(self, name: str, src: str, offset_y: float):
self.name: str = name
self.src: str = src
self.offset_y: float = offset_y | 26.5625 | 71 | 0.72 |
acef11810acf7768af666291359d7130a734c294 | 22,236 | py | Python | samples/models/swin_transformer_tensorflow/model.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | null | null | null | samples/models/swin_transformer_tensorflow/model.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | null | null | null | samples/models/swin_transformer_tensorflow/model.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import GlobalAveragePooling1D
from tensorflow.keras.layers import LayerNormalization
CFGS = {
"swin_tiny_224": dict(
input_size=(224, 224),
window_size=7,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
),
"swin_small_224": dict(
input_size=(224, 224),
window_size=7,
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
),
"swin_base_224": dict(
input_size=(224, 224),
window_size=7,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
),
"swin_base_384": dict(
input_size=(384, 384),
window_size=12,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
),
"swin_large_224": dict(
input_size=(224, 224),
window_size=7,
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
),
"swin_large_384": dict(
input_size=(384, 384),
window_size=12,
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
),
}
class Mlp(tf.keras.layers.Layer):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
drop=0.0,
prefix="",
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = Dense(hidden_features, name=f"{prefix}/mlp/fc1")
self.fc2 = Dense(out_features, name=f"{prefix}/mlp/fc2")
self.drop = Dropout(drop)
def call(self, x):
x = self.fc1(x)
x = tf.keras.activations.gelu(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
B, H, W, C = x.get_shape().as_list()
x = tf.reshape(
x,
shape=[
-1,
H // window_size,
window_size,
W // window_size,
window_size,
C,
],
)
x = tf.transpose(x, perm=[0, 1, 3, 2, 4, 5])
windows = tf.reshape(x, shape=[-1, window_size, window_size, C])
return windows
def window_reverse(windows, window_size, H, W, C):
x = tf.reshape(
windows,
shape=[
-1,
H // window_size,
W // window_size,
window_size,
window_size,
C,
],
)
x = tf.transpose(x, perm=[0, 1, 3, 2, 4, 5])
x = tf.reshape(x, shape=[-1, H, W, C])
return x
class WindowAttention(tf.keras.layers.Layer):
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
prefix="",
):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim**-0.5
self.prefix = prefix
self.qkv = Dense(
dim * 3,
use_bias=qkv_bias,
name=f"{self.prefix}/attn/qkv",
)
self.attn_drop = Dropout(attn_drop)
self.proj = Dense(dim, name=f"{self.prefix}/attn/proj")
self.proj_drop = Dropout(proj_drop)
def build(self, input_shape):
self.relative_position_bias_table = self.add_weight(
f"{self.prefix}/attn/relative_position_bias_table",
shape=(
(2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1),
self.num_heads,
),
initializer=tf.initializers.Zeros(),
trainable=True,
)
coords_h = np.arange(self.window_size[0])
coords_w = np.arange(self.window_size[1])
coords = np.stack(np.meshgrid(coords_h, coords_w, indexing="ij"))
coords_flatten = coords.reshape(2, -1)
relative_coords = (
coords_flatten[
:,
:,
None,
]
- coords_flatten[:, None, :]
)
relative_coords = relative_coords.transpose([1, 2, 0])
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.relative_position_index = tf.Variable(
initial_value=tf.convert_to_tensor(relative_position_index),
trainable=False,
name=f"{self.prefix}/attn/relative_position_index",
)
self.built = True
def call(self, x, mask=None):
B_, N, C = x.get_shape().as_list()
qkv = tf.transpose(
tf.reshape(
self.qkv(x),
shape=[-1, N, 3, self.num_heads, C // self.num_heads],
),
perm=[2, 0, 3, 1, 4],
)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = q @ tf.transpose(k, perm=[0, 1, 3, 2])
relative_position_bias = tf.gather(
self.relative_position_bias_table,
tf.reshape(self.relative_position_index, shape=[-1]),
)
relative_position_bias = tf.reshape(
relative_position_bias,
shape=[
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1,
],
)
relative_position_bias = tf.transpose(
relative_position_bias,
perm=[2, 0, 1],
)
attn = attn + tf.expand_dims(relative_position_bias, axis=0)
if mask is not None:
nW = mask.get_shape()[0] # tf.shape(mask)[0]
attn = tf.reshape(attn, shape=[-1, nW, self.num_heads, N, N]) + tf.cast(
tf.expand_dims(
tf.expand_dims(
mask,
axis=1,
),
axis=0,
),
tf.float32,
)
attn = tf.reshape(attn, shape=[-1, self.num_heads, N, N])
attn = tf.nn.softmax(attn, axis=-1)
else:
attn = tf.nn.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = tf.transpose((attn @ v), perm=[0, 2, 1, 3])
x = tf.reshape(x, shape=[-1, N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x
def drop_path(inputs, drop_prob, is_training):
if (not is_training) or (drop_prob == 0.0):
return inputs
# Compute keep_prob
keep_prob = 1.0 - drop_prob
# Compute drop_connect tensor
random_tensor = keep_prob
# shape = (inputs.shape[0],) + (1,) * \
# (tf.experimental.numpy.ndim(inputs) - 1)
shape = (tf.shape(inputs)[0],) + (1,) * (tf.experimental.numpy.ndim(inputs) - 1)
random_tensor += tf.random.uniform(shape, dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = tf.math.divide(inputs, keep_prob) * binary_tensor
return output
class DropPath(tf.keras.layers.Layer):
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def call(self, x, training=None):
return drop_path(x, self.drop_prob, training)
class SwinTransformerBlock(tf.keras.layers.Layer):
def __init__(
self,
dim,
input_resolution,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path_prob=0.0,
norm_layer=LayerNormalization,
prefix="",
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert (
0 <= self.shift_size and self.shift_size < self.window_size
), "shift_size must in 0-window_size"
self.prefix = prefix
self.norm1 = norm_layer(epsilon=1e-5, name=f"{self.prefix}/norm1")
self.attn = WindowAttention(
dim,
window_size=(self.window_size, self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
prefix=self.prefix,
)
self.drop_path = DropPath(
drop_path_prob if drop_path_prob > 0.0 else 0.0,
)
self.norm2 = norm_layer(epsilon=1e-5, name=f"{self.prefix}/norm2")
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
drop=drop,
prefix=self.prefix,
)
def build(self, input_shape):
if self.shift_size > 0:
H, W = self.input_resolution
img_mask = np.zeros([1, H, W, 1])
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
img_mask = tf.convert_to_tensor(img_mask)
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = tf.reshape(
mask_windows,
shape=[-1, self.window_size * self.window_size],
)
attn_mask = tf.expand_dims(mask_windows, axis=1) - tf.expand_dims(
mask_windows,
axis=2,
)
attn_mask = tf.where(attn_mask != 0, -100.0, attn_mask)
attn_mask = tf.where(attn_mask == 0, 0.0, attn_mask)
self.attn_mask = tf.Variable(
initial_value=attn_mask,
trainable=False,
name=f"{self.prefix}/attn_mask",
)
else:
self.attn_mask = None
self.built = True
def call(self, x):
H, W = self.input_resolution
B, L, C = x.get_shape().as_list()
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = tf.reshape(x, shape=[-1, H, W, C])
# cyclic shift
if self.shift_size > 0:
shifted_x = tf.roll(
x,
shift=[-self.shift_size, -self.shift_size],
axis=[1, 2],
)
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size)
x_windows = tf.reshape(
x_windows,
shape=[-1, self.window_size * self.window_size, C],
)
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask)
# merge windows
attn_windows = tf.reshape(
attn_windows,
shape=[-1, self.window_size, self.window_size, C],
)
shifted_x = window_reverse(attn_windows, self.window_size, H, W, C)
# reverse cyclic shift
if self.shift_size > 0:
x = tf.roll(
shifted_x,
shift=[
self.shift_size,
self.shift_size,
],
axis=[1, 2],
)
else:
x = shifted_x
x = tf.reshape(x, shape=[-1, H * W, C])
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(tf.keras.layers.Layer):
def __init__(
self,
input_resolution,
dim,
norm_layer=LayerNormalization,
prefix="",
):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = Dense(
2 * dim,
use_bias=False,
name=f"{prefix}/downsample/reduction",
)
self.norm = norm_layer(epsilon=1e-5, name=f"{prefix}/downsample/norm")
def call(self, x):
H, W = self.input_resolution
B, L, C = x.get_shape().as_list()
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = tf.reshape(x, shape=[-1, H, W, C])
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = tf.concat([x0, x1, x2, x3], axis=-1)
x = tf.reshape(x, shape=[-1, (H // 2) * (W // 2), 4 * C])
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(tf.keras.layers.Layer):
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path_prob=0.0,
norm_layer=LayerNormalization,
downsample=None,
use_checkpoint=False,
prefix="",
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = tf.keras.Sequential(
[
SwinTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path_prob=drop_path_prob[i]
if isinstance(
drop_path_prob,
list,
)
else drop_path_prob,
norm_layer=norm_layer,
prefix=f"{prefix}/blocks{i}",
)
for i in range(depth)
],
)
if downsample is not None:
self.downsample = downsample(
input_resolution,
dim=dim,
norm_layer=norm_layer,
prefix=prefix,
)
else:
self.downsample = None
def call(self, x):
x = self.blocks(x)
if self.downsample is not None:
x = self.downsample(x)
return x
class PatchEmbed(tf.keras.layers.Layer):
def __init__(
self,
img_size=(224, 224),
patch_size=(4, 4),
in_chans=3,
embed_dim=96,
norm_layer=None,
):
super().__init__(name="patch_embed")
patches_resolution = [
img_size[0] // patch_size[0],
img_size[1] // patch_size[1],
]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = Conv2D(
embed_dim,
kernel_size=patch_size,
strides=patch_size,
name="proj",
)
if norm_layer is not None:
self.norm = norm_layer(epsilon=1e-5, name="norm")
else:
self.norm = None
def call(self, x):
B, H, W, C = x.get_shape().as_list()
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
x = tf.reshape(
x,
shape=[
-1,
(H // self.patch_size[0]) * (W // self.patch_size[0]),
self.embed_dim,
],
)
if self.norm is not None:
x = self.norm(x)
return x
class SwinTransformerModel(tf.keras.Model):
def __init__(
self,
model_name="swin_tiny_patch4_window7_224",
include_top=False,
img_size=(224, 224),
patch_size=(4, 4),
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=LayerNormalization,
ape=False,
patch_norm=True,
use_checkpoint=False,
**kwargs,
):
super().__init__(name=model_name)
self.include_top = include_top
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute postion embedding
if self.ape:
self.absolute_pos_embed = self.add_weight(
"absolute_pos_embed",
shape=(1, num_patches, embed_dim),
initializer=tf.initializers.Zeros(),
)
self.pos_drop = Dropout(drop_rate)
# stochastic depth
dpr = [x for x in np.linspace(0.0, drop_path_rate, sum(depths))]
# build layers
self.basic_layers = tf.keras.Sequential(
[
BasicLayer(
dim=int(embed_dim * 2**i_layer),
input_resolution=(
patches_resolution[0] // (2**i_layer),
patches_resolution[1] // (2**i_layer),
),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path_prob=dpr[
sum(
depths[:i_layer],
) : sum(depths[: i_layer + 1])
],
norm_layer=norm_layer,
downsample=PatchMerging
if (i_layer < self.num_layers - 1)
else None,
use_checkpoint=use_checkpoint,
prefix=f"layers{i_layer}",
)
for i_layer in range(self.num_layers)
],
)
self.norm = norm_layer(epsilon=1e-5, name="norm")
self.avgpool = GlobalAveragePooling1D()
if self.include_top:
self.head = Dense(num_classes, name="head")
else:
self.head = None
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x = self.basic_layers(x)
x = self.norm(x)
x = self.avgpool(x)
return x
def call(self, x):
x = self.forward_features(x)
if self.include_top:
x = self.head(x)
return x
def SwinTransformer(
model_name="swin_tiny_224",
cache_dir=None,
num_classes=1000,
include_top=True,
pretrained=True,
use_tpu=False,
cfgs=CFGS,
):
cfg = cfgs[model_name]
net = SwinTransformerModel(
model_name=model_name,
include_top=include_top,
num_classes=num_classes,
img_size=cfg["input_size"],
window_size=cfg["window_size"],
embed_dim=cfg["embed_dim"],
depths=cfg["depths"],
num_heads=cfg["num_heads"],
)
net(tf.keras.Input(shape=(cfg["input_size"][0], cfg["input_size"][1], 3)))
if pretrained is True:
url = f"https://github.com/rishigami/Swin-Transformer-TF/releases/download/v0.1-tf-swin-weights/{model_name}.tgz"
pretrained_ckpt = tf.keras.utils.get_file(
model_name,
url,
untar=True,
cache_dir=cache_dir,
cache_subdir="",
)
else:
pretrained_ckpt = pretrained
if pretrained_ckpt:
if tf.io.gfile.isdir(pretrained_ckpt):
pretrained_ckpt = f"{pretrained_ckpt}/{model_name}.ckpt"
if use_tpu:
load_locally = tf.saved_model.LoadOptions(
experimental_io_device="/job:localhost",
)
net.load_weights(pretrained_ckpt, options=load_locally)
else:
net.load_weights(pretrained_ckpt)
return net
| 29.84698 | 121 | 0.51655 |
acef121e90cdec4f36cc9835836c080e6b5b9be7 | 3,919 | py | Python | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | openvdro/airship-shipyard | bae15294c534cf321f5c7ca37592dfa74c4ad7c2 | [
"Apache-2.0"
] | 12 | 2018-05-18T18:59:23.000Z | 2019-05-10T12:31:44.000Z | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | openvdro/airship-shipyard | bae15294c534cf321f5c7ca37592dfa74c4ad7c2 | [
"Apache-2.0"
] | 4 | 2021-07-28T14:36:57.000Z | 2022-03-22T16:39:23.000Z | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | openvdro/airship-shipyard | bae15294c534cf321f5c7ca37592dfa74c4ad7c2 | [
"Apache-2.0"
] | 9 | 2018-05-18T16:42:41.000Z | 2019-04-18T20:12:14.000Z | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CLI value checks invoked from commands"""
import arrow
from arrow.parser import ParserError
def check_action_command(ctx, action_command):
"""Verifies the action command is valid"""
valid_commands = ['deploy_site', 'update_site', 'update_software',
'redeploy_server', 'relabel_nodes', 'test_site']
if action_command not in valid_commands:
ctx.fail('Invalid action command. The action commands available are '
' {}'.format(', '.join(valid_commands)))
def check_control_action(ctx, action):
"""Verifies the control action is valid"""
if action not in ['pause', 'unpause', 'stop']:
ctx.fail('Invalid action. Please enter pause, unpause, or stop.')
def check_id(ctx, ulid_id):
"""Verifies a ULID id is in a valid format"""
if ulid_id is None:
ctx.fail('Invalid ID. None is not a valid action ID.')
if len(ulid_id) != 26:
ctx.fail('Invalid ID. ID can only be 26 characters.')
if not ulid_id.isalnum():
ctx.fail('Invalid ID. ID can only contain letters and numbers.')
def check_workflow_id(ctx, workflow_id):
"""Verifies that a workflow id matches the desired format"""
if workflow_id is None:
ctx.fail('Invalid ID. None is not a valid workflow ID.')
if '__' not in workflow_id:
ctx.fail('Invalid ID. The ID must cotain a double underscore '
'separating the workflow name from the execution date')
input_date_string = workflow_id.split('__')[1]
date_format_ok = True
try:
parsed_dt = arrow.get(input_date_string)
if input_date_string != parsed_dt.format('YYYY-MM-DDTHH:mm:ss.SSSSSS'):
date_format_ok = False
except ParserError:
date_format_ok = False
if not date_format_ok:
ctx.fail('Invalid ID. The date portion of the ID must conform to '
'YYYY-MM-DDTHH:mm:ss.SSSSSS')
def check_reformat_parameter(ctx, param):
"""Checks for <name>=<value> format"""
param_dictionary = {}
try:
for p in param:
values = p.split('=')
param_dictionary[values[0]] = values[1]
except Exception:
ctx.fail(
"Invalid parameter or parameter format for " + p +
". Please utilize the format: <parameter name>=<parameter value>")
return param_dictionary
def check_reformat_versions(ctx, buffer, committed, last_site_action,
successful_site_action):
"""Checks and reformat version"""
versions = []
if buffer:
versions.append('buffer')
if committed:
versions.append('committed')
if last_site_action:
versions.append('last_site_action')
if successful_site_action:
versions.append('successful_site_action')
if len(versions) == 0:
return ['committed', 'buffer']
elif len(versions) == 2:
return versions
else:
ctx.fail(
"Invalid input. User must either\n"
"1. Pass in 0 versions, in which case --buffer and --committed "
"versions are assumed\n"
"2. Pass in 2 valid versions for comparison\n\n"
"Valid versions are '--buffer', '--committed', "
"'--last-site-action' and '--successful-site-action'")
| 36.626168 | 79 | 0.651187 |
acef129e8ad0fc66cbbe502b3a4764148ae34df1 | 26,080 | py | Python | google/cloud/gaming_v1/services/game_server_configs_service/client.py | Yingxin-Jiang/python-game-servers | 7b897b44024a8f06467ad8433bfe900feaee4b04 | [
"Apache-2.0"
] | null | null | null | google/cloud/gaming_v1/services/game_server_configs_service/client.py | Yingxin-Jiang/python-game-servers | 7b897b44024a8f06467ad8433bfe900feaee4b04 | [
"Apache-2.0"
] | null | null | null | google/cloud/gaming_v1/services/game_server_configs_service/client.py | Yingxin-Jiang/python-game-servers | 7b897b44024a8f06467ad8433bfe900feaee4b04 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Callable, Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation
from google.api_core import operation_async
from google.cloud.gaming_v1.services.game_server_configs_service import pagers
from google.cloud.gaming_v1.types import common
from google.cloud.gaming_v1.types import game_server_configs
from google.protobuf import empty_pb2 as empty # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from .transports.base import GameServerConfigsServiceTransport
from .transports.grpc import GameServerConfigsServiceGrpcTransport
from .transports.grpc_asyncio import GameServerConfigsServiceGrpcAsyncIOTransport
class GameServerConfigsServiceClientMeta(type):
"""Metaclass for the GameServerConfigsService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[GameServerConfigsServiceTransport]]
_transport_registry["grpc"] = GameServerConfigsServiceGrpcTransport
_transport_registry["grpc_asyncio"] = GameServerConfigsServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[GameServerConfigsServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class GameServerConfigsServiceClient(metaclass=GameServerConfigsServiceClientMeta):
"""The game server config configures the game servers in an
Agones fleet.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "gameservices.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
{@api.name}: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@staticmethod
def game_server_config_path(
project: str, location: str, deployment: str, config: str,
) -> str:
"""Return a fully-qualified game_server_config string."""
return "projects/{project}/locations/{location}/gameServerDeployments/{deployment}/configs/{config}".format(
project=project, location=location, deployment=deployment, config=config,
)
@staticmethod
def parse_game_server_config_path(path: str) -> Dict[str, str]:
"""Parse a game_server_config path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/gameServerDeployments/(?P<deployment>.+?)/configs/(?P<config>.+?)$",
path,
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, GameServerConfigsServiceTransport] = None,
client_options: ClientOptions = None,
) -> None:
"""Instantiate the game server configs service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.GameServerConfigsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint, this is the default value for
the environment variable) and "auto" (auto switch to the default
mTLS endpoint if client SSL credentials is present). However,
the ``api_endpoint`` property takes precedence if provided.
(2) The ``client_cert_source`` property is used to provide client
SSL credentials for mutual TLS transport. If not provided, the
default SSL credentials will be used if present.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = ClientOptions.from_dict(client_options)
if client_options is None:
client_options = ClientOptions.ClientOptions()
if client_options.api_endpoint is None:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never")
if use_mtls_env == "never":
client_options.api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
has_client_cert_source = (
client_options.client_cert_source is not None
or mtls.has_default_client_cert_source()
)
client_options.api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if has_client_cert_source
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, GameServerConfigsServiceTransport):
# transport is a GameServerConfigsServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=client_options.api_endpoint,
scopes=client_options.scopes,
api_mtls_endpoint=client_options.api_endpoint,
client_cert_source=client_options.client_cert_source,
quota_project_id=client_options.quota_project_id,
)
def list_game_server_configs(
self,
request: game_server_configs.ListGameServerConfigsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListGameServerConfigsPager:
r"""Lists game server configs in a given project,
location, and game server deployment.
Args:
request (:class:`~.game_server_configs.ListGameServerConfigsRequest`):
The request object. Request message for
GameServerConfigsService.ListGameServerConfigs.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment}/configs/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListGameServerConfigsPager:
Response message for
GameServerConfigsService.ListGameServerConfigs.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a game_server_configs.ListGameServerConfigsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, game_server_configs.ListGameServerConfigsRequest):
request = game_server_configs.ListGameServerConfigsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_game_server_configs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListGameServerConfigsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_game_server_config(
self,
request: game_server_configs.GetGameServerConfigRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> game_server_configs.GameServerConfig:
r"""Gets details of a single game server config.
Args:
request (:class:`~.game_server_configs.GetGameServerConfigRequest`):
The request object. Request message for
GameServerConfigsService.GetGameServerConfig.
name (:class:`str`):
Required. The name of the game server config to
retrieve. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment}/configs/{config}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.game_server_configs.GameServerConfig:
A game server config resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a game_server_configs.GetGameServerConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, game_server_configs.GetGameServerConfigRequest):
request = game_server_configs.GetGameServerConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_game_server_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_game_server_config(
self,
request: game_server_configs.CreateGameServerConfigRequest = None,
*,
parent: str = None,
game_server_config: game_server_configs.GameServerConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new game server config in a given project,
location, and game server deployment. Game server
configs are immutable, and are not applied until
referenced in the game server deployment rollout
resource.
Args:
request (:class:`~.game_server_configs.CreateGameServerConfigRequest`):
The request object. Request message for
GameServerConfigsService.CreateGameServerConfig.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment}/``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
game_server_config (:class:`~.game_server_configs.GameServerConfig`):
Required. The game server config
resource to be created.
This corresponds to the ``game_server_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:``~.game_server_configs.GameServerConfig``: A
game server config resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, game_server_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a game_server_configs.CreateGameServerConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, game_server_configs.CreateGameServerConfigRequest):
request = game_server_configs.CreateGameServerConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if game_server_config is not None:
request.game_server_config = game_server_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_game_server_config
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
game_server_configs.GameServerConfig,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def delete_game_server_config(
self,
request: game_server_configs.DeleteGameServerConfigRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single game server config. The deletion
will fail if the game server config is referenced in a
game server deployment rollout.
Args:
request (:class:`~.game_server_configs.DeleteGameServerConfigRequest`):
The request object. Request message for
GameServerConfigsService.DeleteGameServerConfig.
name (:class:`str`):
Required. The name of the game server config to delete.
Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment}/configs/{config}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:``~.empty.Empty``: A generic empty message that
you can re-use to avoid defining duplicated empty
messages in your APIs. A typical example is to use it as
the request or the response type of an API method. For
instance:
::
service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
}
The JSON representation for ``Empty`` is empty JSON
object ``{}``.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a game_server_configs.DeleteGameServerConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, game_server_configs.DeleteGameServerConfigRequest):
request = game_server_configs.DeleteGameServerConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_game_server_config
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty.Empty,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
try:
_client_info = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-game-servers",
).version,
)
except pkg_resources.DistributionNotFound:
_client_info = gapic_v1.client_info.ClientInfo()
__all__ = ("GameServerConfigsServiceClient",)
| 42.132472 | 137 | 0.635314 |
acef142d33d6838268706586843203fb22dc9de2 | 96 | py | Python | ethcloud/utils/__init__.py | kgritesh/ethcloud | 25bbf074a7ffa4e24520f7291b15d175fdf08ec5 | [
"MIT"
] | 3 | 2017-09-27T10:40:10.000Z | 2018-02-05T13:21:38.000Z | ethcloud/utils/__init__.py | kgritesh/ethcloud | 25bbf074a7ffa4e24520f7291b15d175fdf08ec5 | [
"MIT"
] | null | null | null | ethcloud/utils/__init__.py | kgritesh/ethcloud | 25bbf074a7ffa4e24520f7291b15d175fdf08ec5 | [
"MIT"
] | 1 | 2018-02-24T08:27:50.000Z | 2018-02-24T08:27:50.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
| 19.2 | 38 | 0.760417 |
acef148dadde7cf42cb6e81c3b2c8182b8cd2444 | 1,829 | py | Python | 05-hydrothermal-venture/05-hydrothermal-venture-01.py | kforiordan/advent-of-code | 5de1ec392959a650ab9ba86873b87a144b24c2ce | [
"Apache-2.0"
] | 1 | 2021-12-07T21:53:08.000Z | 2021-12-07T21:53:08.000Z | 05-hydrothermal-venture/05-hydrothermal-venture-01.py | kforiordan/advent-of-code | 5de1ec392959a650ab9ba86873b87a144b24c2ce | [
"Apache-2.0"
] | null | null | null | 05-hydrothermal-venture/05-hydrothermal-venture-01.py | kforiordan/advent-of-code | 5de1ec392959a650ab9ba86873b87a144b24c2ce | [
"Apache-2.0"
] | null | null | null |
import pprint
import sys
from typing import NamedTuple
class Point(NamedTuple):
x: int
y: int
# # Can't override __init__ with NamedTuple
# def __init__(self, x, y):
# self.x = int(x)
# self.y = int(y)
def __hash__(self):
return hash('{},{}'.format(self.x, self.y))
def __eq__(self, other):
return (self.__class__ == other.__class__ and \
self.x == other.x and \
self.y == other.y)
def is_straight_line(p1, p2):
if p1.x == p2.x or p1.y == p2.y:
return True
return False
def get_point_pairs(fh):
pp = pprint.PrettyPrinter(compact=True)
point_pairs = []
for line in fh:
(p1,arrow,p2) = line.strip().split()
(x1, y1) = map(int,p1.split(','))
(x2, y2) = map(int,p2.split(','))
point_pairs.append((Point(x1,y1), Point(x2,y2)))
return point_pairs
def points_on_line(p1, p2):
points = []
if p1.x == p2.x:
points = [Point(p1.x, y) for y in range(min(p1.y,p2.y),(max(p1.y,p2.y) + 1))]
elif p1.y == p2.y:
points = [Point(x, p1.y) for x in range(min([p1.x,p2.x]),(max(p1.x,p2.x) + 1))]
return points
if __name__ == "__main__":
pp = pprint.PrettyPrinter(compact=False)
point_pairs = []
point_pairs = get_point_pairs(sys.stdin)
straight_lines = []
straight_line_pairs = [(p1,p2) for (p1,p2) in point_pairs if is_straight_line(p1,p2)]
point_counts = {}
for (p1,p2) in straight_line_pairs:
for p in points_on_line(p1,p2):
if p in point_counts:
point_counts[p] = point_counts[p] + 1
else:
point_counts[p] = 1
threshold = 2
count = 0
for p,c in point_counts.items():
if c >= threshold:
count += 1
print(count)
| 22.8625 | 89 | 0.559322 |
acef14e0312b7173b97bbb580d04c06f8683900a | 3,229 | py | Python | whoosh/graphics/window.py | Normangorman/whoosh | 8ada8f9f628e05d8bfefcc5043588635893749ee | [
"MIT"
] | null | null | null | whoosh/graphics/window.py | Normangorman/whoosh | 8ada8f9f628e05d8bfefcc5043588635893749ee | [
"MIT"
] | 8 | 2017-03-04T20:05:32.000Z | 2017-03-05T11:19:54.000Z | whoosh/graphics/window.py | Normangorman/whoosh | 8ada8f9f628e05d8bfefcc5043588635893749ee | [
"MIT"
] | null | null | null | """
Draw stuff in the game!
"""
import pyglet
import pyglet.window.key
import pyglet.gl as gl
import pymunk.pyglet_util
import whoosh.graphics.camera
from whoosh.engine.core import WhooshCore
from whoosh.components.animatedsprite import AnimatedSpriteComponent
from whoosh.components.physics import PhysicsComponent
from whoosh.components.input import InputComponent
from whoosh.graphics.camera import Camera
class WhooshWindow(pyglet.window.Window):
"""
Open a magical window into your game world.
Who knows what you might see?
"""
def __init__(self, core, fps, **kwargs):
pyglet.window.Window.__init__(self, **kwargs)
self.core = core
self.camera = Camera(width=kwargs.get('width'), height=kwargs.get('height'))
self.fps = fps
self.enable_fps_display = False
self.enable_physics_debug = False
self.enable_camera_debug = False
self.enable_pixelart_mode = False
self._fps_display = pyglet.clock.ClockDisplay()
self._batch = pyglet.graphics.Batch() # TODO: this isn't used yet
self._physics_draw_options = pymunk.pyglet_util.DrawOptions()
self._physics_draw_options.flags = self._physics_draw_options.DRAW_SHAPES # enable shape drawing by default
self.core.set_window(self)
pyglet.clock.schedule_interval(self.update, 1/float(fps))
whoosh.graphics.camera.opengl_init()
def start(self):
"""
Hopefully it won't blow up
"""
pyglet.app.run()
def on_show(self):
if self.enable_pixelart_mode:
self.activate_pixelart_mode()
self.camera.refresh()
def update(self, dt):
"""
Updates the core
"""
self.core.update(dt)
self.render(dt)
self.core.on_render()
def get_camera(self):
"""
Returns the Camera object the window is using.
"""
return self.camera
def draw_physics_debug(self):
"""
Draws every object in the physics world to screen.
Pretty damn useful.
"""
self.core.game_world.physics_world.debug_draw(self._physics_draw_options)
def activate_pixelart_mode(self):
"""
Does some OpenGL stuff to prevent pixel art being blurred when zooming.
"""
gl.glEnable(gl.GL_TEXTURE_2D)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
self.enable_pixelart_mode = True
def render(self, dt):
"""
Renders everything in the core's game world
"""
self.clear()
self.camera.apply()
for blob in self.core.game_world.blobs:
anim = blob.get_component(AnimatedSpriteComponent)
phys = blob.get_component(PhysicsComponent)
if anim and phys:
anim.animated_sprite.update(dt)
anim.animated_sprite.set_rotation(phys.get_rotation())
anim.draw(phys.get_position())
if self.enable_physics_debug:
self.draw_physics_debug()
if self.enable_fps_display:
self._fps_display.draw()
if self.enable_camera_debug:
self.camera.debug_draw()
| 31.656863 | 115 | 0.648188 |
acef15abdb17cad1a32bd46cd393d9d0e36d7d8a | 2,662 | py | Python | udb_py/index/udb_hash_multivalued_index.py | akaterra/udb.py | 3c04fa788e4b2fc8356c1210b9d81004aa932c0b | [
"MIT"
] | 2 | 2020-01-04T12:04:58.000Z | 2020-02-15T16:32:12.000Z | udb_py/index/udb_hash_multivalued_index.py | akaterra/udb.py | 3c04fa788e4b2fc8356c1210b9d81004aa932c0b | [
"MIT"
] | null | null | null | udb_py/index/udb_hash_multivalued_index.py | akaterra/udb.py | 3c04fa788e4b2fc8356c1210b9d81004aa932c0b | [
"MIT"
] | null | null | null | from ..common import EMPTY
from ..udb_index import UdbIndex, UdbEmbeddedIndex
class UdbHashMultivaluedIndex(UdbIndex):
is_multivalued = True
type = 'hash_multivalued'
def __init__(self, schema, name=None):
UdbIndex.__init__(self, schema, name)
self._hash = {}
def __len__(self):
return len(self._hash)
def clear(self):
self._hash.clear()
return self
def delete(self, key, uid):
old_existing = self._hash.get(key, EMPTY)
if old_existing != EMPTY and uid in old_existing:
if len(old_existing) == 1:
self._hash.pop(key)
else:
old_existing.remove(uid)
return self
def insert(self, key, uid):
old_existing = self._hash.get(key, EMPTY)
if old_existing == EMPTY:
self._hash[key] = {uid}
else:
old_existing.add(uid)
return self
def search_by_key(self, key):
val = self._hash.get(key, EMPTY)
if val != EMPTY:
for _ in val:
yield _
def search_by_key_in(self, keys):
for key in keys:
val = self._hash.get(key, EMPTY)
if val != EMPTY:
for _ in val:
yield _
def upsert(self, old, new, uid):
if old != new:
old_existing = self._hash.get(old, EMPTY)
if old_existing != EMPTY and uid in old_existing:
if len(old_existing) == 1:
self._hash.pop(old)
else:
old_existing.remove(uid)
new_existing = self._hash.get(new, EMPTY)
if new_existing == EMPTY:
self._hash[new] = {uid}
else:
new_existing.add(uid)
return self
class UdbHashMultivaluedEmbeddedIndex(UdbHashMultivaluedIndex, UdbEmbeddedIndex):
type = 'hash_multivalued_embedded'
def delete(self, key, uid=None):
for key in key:
old_existing = self._hash.get(key, EMPTY)
if old_existing != EMPTY and uid in old_existing:
if len(old_existing) == 1:
self._hash.pop(key)
else:
old_existing.remove(uid)
return self
def insert(self, key, uid):
for key in key:
old_existing = self._hash.get(key, EMPTY)
if old_existing == EMPTY:
self._hash[key] = {uid}
else:
old_existing.append(uid)
return self
def upsert(self, old, new, uid):
self.delete(old)
self.insert(new, uid)
return self
| 24.422018 | 81 | 0.538317 |
acef16be44a31d928af8240181359d4b708cc5e0 | 4,108 | py | Python | ml/fetch_data.py | dpachov/language-detector | 7c4a1c6b2d0c9ddd35fafa7b99cc8142c72e750a | [
"BSD-2-Clause"
] | null | null | null | ml/fetch_data.py | dpachov/language-detector | 7c4a1c6b2d0c9ddd35fafa7b99cc8142c72e750a | [
"BSD-2-Clause"
] | null | null | null | ml/fetch_data.py | dpachov/language-detector | 7c4a1c6b2d0c9ddd35fafa7b99cc8142c72e750a | [
"BSD-2-Clause"
] | null | null | null | # Simple python script to collect text paragraphs from various languages on the
# same topic namely the Wikipedia encyclopedia itself
import os
try:
# Python 2 compat
from urllib2 import Request, build_opener
except ImportError:
# Python 3
from urllib.request import Request, build_opener
import lxml.html
from lxml.etree import ElementTree
import numpy as np
pages = {
u'ar': u'http://ar.wikipedia.org/wiki/%D9%88%D9%8A%D9%83%D9%8A%D8%A8%D9%8A%D8%AF%D9%8A%D8%A7',
u'de': u'http://de.wikipedia.org/wiki/Wikipedia',
u'en': u'http://en.wikipedia.org/wiki/Wikipedia',
u'es': u'http://es.wikipedia.org/wiki/Wikipedia',
u'fr': u'http://fr.wikipedia.org/wiki/Wikip%C3%A9dia',
u'it': u'http://it.wikipedia.org/wiki/Wikipedia',
u'ja': u'http://ja.wikipedia.org/wiki/Wikipedia',
u'nl': u'http://nl.wikipedia.org/wiki/Wikipedia',
u'pl': u'http://pl.wikipedia.org/wiki/Wikipedia',
u'pt': u'http://pt.wikipedia.org/wiki/Wikip%C3%A9dia',
u'ru': u'http://ru.wikipedia.org/wiki/%D0%92%D0%B8%D0%BA%D0%B8%D0%BF%D0%B5%D0%B4%D0%B8%D1%8F',
u'bg': u'http://bg.wikipedia.org/wiki/%D0%9D%D0%B0%D1%87%D0%B0%D0%BB%D0%BD%D0%B0_%D1%81%D1%82%D1%80%D0%B0%D0%BD%D0%B8%D1%86%D0%B0'
# u'zh': u'http://zh.wikipedia.org/wiki/Wikipedia',
}
html_folder = u'html'
text_folder = u'paragraphs'
#short_text_folder = u'short_paragraphs'
short_text_folder = u'paragraphs'
n_words_per_short_text = 5
if not os.path.exists(html_folder):
os.makedirs(html_folder)
for lang, page in pages.items():
text_lang_folder = os.path.join(text_folder, lang)
if not os.path.exists(text_lang_folder):
os.makedirs(text_lang_folder)
short_text_lang_folder = os.path.join(short_text_folder, lang)
if not os.path.exists(short_text_lang_folder):
os.makedirs(short_text_lang_folder)
opener = build_opener()
html_filename = os.path.join(html_folder, lang + '.html')
if not os.path.exists(html_filename):
print("Downloading %s" % page)
request = Request(page)
# change the User Agent to avoid being blocked by Wikipedia
# downloading a couple of articles ones should not be abusive
request.add_header('User-Agent', 'OpenAnything/1.0')
html_content = opener.open(request).read()
open(html_filename, 'wb').write(html_content)
# decode the payload explicitly as UTF-8 since lxml is confused for some
# reason
html_content = open(html_filename).read()
if hasattr(html_content, 'decode'):
html_content = html_content.decode('utf-8')
tree = ElementTree(lxml.html.document_fromstring(html_content))
i = 0
j = 0
for p in tree.findall('//p'):
content = p.text_content()
if len(content) < 100:
# skip paragraphs that are too short - probably too noisy and not
# representative of the actual language
continue
i = j ## added
text_filename = os.path.join(text_lang_folder,
'%s_%04d.txt' % (lang, i))
print("Writing %s" % text_filename)
open(text_filename, 'wb').write(content.encode('utf-8', 'ignore'))
i += 1
j = i ## added
print "j is ... %d " % j
# split the paragraph into fake smaller paragraphs to make the
# problem harder e.g. more similar to tweets
if lang in ('zh', 'ja'):
# FIXME: whitespace tokenizing does not work on chinese and japanese
continue
words = content.split()
n_groups = len(words) / n_words_per_short_text
if n_groups < 1:
continue
groups = np.array_split(words, n_groups)
for group in groups:
small_content = u" ".join(group)
short_text_filename = os.path.join(short_text_lang_folder,
'%s_%04d.txt' % (lang, j))
print("Writing %s" % short_text_filename)
open(short_text_filename, 'wb').write(
small_content.encode('utf-8', 'ignore'))
j += 1
if j >= 3000:
break
| 37.345455 | 134 | 0.635346 |
acef184e868882f6c13c9a9907e5bb5b6457cca0 | 284 | py | Python | Python/learning/testing_code.py | AdnanCodes/Code-Challenges | 000d2ddc493d8ad43ff9c8f6102535a5910979e6 | [
"MIT"
] | null | null | null | Python/learning/testing_code.py | AdnanCodes/Code-Challenges | 000d2ddc493d8ad43ff9c8f6102535a5910979e6 | [
"MIT"
] | null | null | null | Python/learning/testing_code.py | AdnanCodes/Code-Challenges | 000d2ddc493d8ad43ff9c8f6102535a5910979e6 | [
"MIT"
] | null | null | null | class User:
def __init__(self, name, is_admin=False):
self.name = name
self.is_admin = is_admin
class Customer(User):
def __init__(self, name):
super().__init__(name)
self.purchase = []
c = Customer('jeff')
print(c.is_admin)
print(c.name)
| 16.705882 | 45 | 0.616197 |
acef18d2492c992d9ae5efd7408c2fd51aa9e3c3 | 15,357 | py | Python | lisa/node.py | KsenijaS/lisa | f09291a088c81de40e57bc4e37e9348220a87417 | [
"MIT"
] | 1 | 2021-06-17T13:02:44.000Z | 2021-06-17T13:02:44.000Z | lisa/node.py | KsenijaS/lisa | f09291a088c81de40e57bc4e37e9348220a87417 | [
"MIT"
] | null | null | null | lisa/node.py | KsenijaS/lisa | f09291a088c81de40e57bc4e37e9348220a87417 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
from pathlib import Path, PurePath, PurePosixPath, PureWindowsPath
from random import randint
from typing import Any, Iterable, List, Optional, Type, TypeVar, Union, cast
from lisa import schema
from lisa.executable import Tools
from lisa.feature import Features
from lisa.operating_system import OperatingSystem
from lisa.tools import Echo, Reboot
from lisa.util import (
ContextMixin,
InitializableMixin,
LisaException,
constants,
fields_to_dict,
subclasses,
)
from lisa.util.logger import get_logger
from lisa.util.process import ExecutableResult, Process
from lisa.util.shell import ConnectionInfo, LocalShell, Shell, SshShell
T = TypeVar("T")
class Node(subclasses.BaseClassWithRunbookMixin, ContextMixin, InitializableMixin):
_factory: Optional[subclasses.Factory[Node]] = None
def __init__(
self,
runbook: schema.Node,
index: int,
logger_name: str,
base_log_path: Optional[Path] = None,
) -> None:
super().__init__(runbook=runbook)
self.is_default = runbook.is_default
self.capability = runbook.capability
self.name = runbook.name
self.index = index
self._shell: Optional[Shell] = None
# will be initialized by platform
self.features: Features
self.tools = Tools(self)
# the path uses remotely
node_id = str(self.index) if self.index >= 0 else ""
self.log = get_logger(logger_name, node_id)
# The working path will be created in remote node, when it's used.
self._working_path: Optional[PurePath] = None
self._base_local_log_path = base_log_path
# Not to set the log path until its first used. Because the path
# contains node name, which is not set in __init__.
self._local_log_path: Optional[Path] = None
self._support_sudo: Optional[bool] = None
self._connection_info: Optional[ConnectionInfo] = None
@property
def shell(self) -> Shell:
assert self._shell, "Shell is not initialized"
return self._shell
@property
def is_posix(self) -> bool:
self.initialize()
return self.os.is_posix
@property
def is_remote(self) -> bool:
raise NotImplementedError()
@property
def support_sudo(self) -> bool:
self.initialize()
# check if sudo supported
if self.is_posix and self._support_sudo is None:
process = self._execute("command -v sudo", shell=True, no_info_log=True)
result = process.wait_result(10)
if result.exit_code == 0:
self._support_sudo = True
else:
self._support_sudo = False
self.log.debug("node doesn't support sudo, may cause failure later.")
if self._support_sudo is None:
# set Windows to true to ignore sudo asks.
self._support_sudo = True
return self._support_sudo
@property
def is_connected(self) -> bool:
return self._shell is not None and self._shell.is_connected
@property
def local_log_path(self) -> Path:
if not self._local_log_path:
base_path = self._base_local_log_path
if not base_path:
base_path = constants.RUN_LOCAL_PATH
path_name = self.name
if not path_name:
if self.index:
index = self.index
else:
index = randint(0, 10000)
path_name = f"node-{index}"
self._local_log_path = base_path / path_name
if self._local_log_path.exists():
raise LisaException(
"Conflicting node log path detected, "
"make sure LISA invocations have individual runtime paths."
f"'{self._local_log_path}'"
)
self._local_log_path.mkdir(parents=True)
return self._local_log_path
@property
def working_path(self) -> PurePath:
"""
The working path may be a remote path on remote node. It uses to put executable.
"""
if not self._working_path:
self._working_path = self._create_working_path()
self.shell.mkdir(self._working_path, parents=True, exist_ok=True)
self.log.debug(f"working path is: '{self._working_path}'")
return self._working_path
@classmethod
def create(
cls,
index: int,
runbook: schema.Node,
logger_name: str = "node",
base_log_path: Optional[Path] = None,
) -> Node:
if not cls._factory:
cls._factory = subclasses.Factory[Node](Node)
node = cls._factory.create_by_runbook(
index=index,
runbook=runbook,
logger_name=logger_name,
base_log_path=base_log_path,
)
node.log.debug(
f"created, type: '{node.__class__.__name__}', default: {runbook.is_default}"
)
return node
def reboot(self) -> None:
self.tools[Reboot].reboot()
def execute(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
cwd: Optional[PurePath] = None,
timeout: int = 600,
) -> ExecutableResult:
process = self.execute_async(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
return process.wait_result(timeout=timeout)
def execute_async(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
cwd: Optional[PurePath] = None,
) -> Process:
self.initialize()
if sudo and not self.support_sudo:
raise LisaException(
f"node doesn't support [command] or [sudo], cannot execute: {cmd}"
)
return self._execute(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
def close(self) -> None:
self.log.debug("closing node connection...")
if self._shell:
self._shell.close()
def _initialize(self, *args: Any, **kwargs: Any) -> None:
self.log.info(f"initializing node '{self.name}' {self}")
self.shell.initialize()
self.os: OperatingSystem = OperatingSystem.create(self)
def _execute(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = False,
cwd: Optional[PurePath] = None,
) -> Process:
cmd_id = str(randint(0, 10000))
process = Process(cmd_id, self.shell, parent_logger=self.log)
process.start(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
return process
def _create_working_path(self) -> PurePath:
raise NotImplementedError()
class RemoteNode(Node):
def __repr__(self) -> str:
return str(self._connection_info)
@property
def is_remote(self) -> bool:
return True
@classmethod
def type_name(cls) -> str:
return constants.ENVIRONMENTS_NODES_REMOTE
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return schema.RemoteNode
def set_connection_info_by_runbook(
self,
default_username: str = "",
default_password: str = "",
default_private_key_file: str = "",
) -> None:
fields = [
constants.ENVIRONMENTS_NODES_REMOTE_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PORT,
constants.ENVIRONMENTS_NODES_REMOTE_PUBLIC_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PUBLIC_PORT,
]
parameters = fields_to_dict(self.runbook, fields)
# use default credential, if they are not specified
node_runbook = cast(schema.RemoteNode, self.runbook)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_USERNAME] = (
node_runbook.username if node_runbook.username else default_username
)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_PASSWORD] = (
node_runbook.password if node_runbook.password else default_password
)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_PRIVATE_KEY_FILE] = (
node_runbook.private_key_file
if node_runbook.private_key_file
else default_private_key_file
)
self.set_connection_info(**parameters)
def set_connection_info(
self,
address: str = "",
port: Optional[int] = 22,
public_address: str = "",
public_port: Optional[int] = 22,
username: str = "root",
password: str = "",
private_key_file: str = "",
) -> None:
if self._connection_info is not None:
raise LisaException(
"node is set connection information already, cannot set again"
)
if not address and not public_address:
raise LisaException(
"at least one of address and public_address need to be set"
)
elif not address:
address = public_address
elif not public_address:
public_address = address
if not port and not public_port:
raise LisaException("at least one of port and public_port need to be set")
elif not port:
port = public_port
elif not public_port:
public_port = port
assert public_port
assert port
self._connection_info = ConnectionInfo(
public_address,
public_port,
username,
password,
private_key_file,
)
self._shell = SshShell(self._connection_info)
self.public_address = public_address
self.public_port = public_port
self.internal_address = address
self.internal_port = port
def _initialize(self, *args: Any, **kwargs: Any) -> None:
assert self._connection_info, "call setConnectionInfo before use remote node"
super()._initialize(*args, **kwargs)
def _create_working_path(self) -> PurePath:
if self.is_posix:
remote_root_path = Path("$HOME")
else:
remote_root_path = Path("%TEMP%")
working_path = remote_root_path.joinpath(
constants.PATH_REMOTE_ROOT, constants.RUN_LOGIC_PATH
).as_posix()
# expand environment variables in path
echo = self.tools[Echo]
result = echo.run(working_path, shell=True)
# PurePath is more reasonable here, but spurplus doesn't support it.
if self.is_posix:
result_path: PurePath = PurePosixPath(result.stdout)
else:
result_path = PureWindowsPath(result.stdout)
return result_path
class LocalNode(Node):
def __init__(
self,
runbook: schema.Node,
index: int,
logger_name: str,
base_log_path: Optional[Path],
) -> None:
super().__init__(
index=index,
runbook=runbook,
logger_name=logger_name,
base_log_path=base_log_path,
)
self._shell = LocalShell()
@property
def is_remote(self) -> bool:
return False
@classmethod
def type_name(cls) -> str:
return constants.ENVIRONMENTS_NODES_LOCAL
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return schema.LocalNode
def _create_working_path(self) -> PurePath:
return constants.RUN_LOCAL_PATH
def __repr__(self) -> str:
return "local"
class Nodes:
def __init__(self) -> None:
super().__init__()
self._default: Optional[Node] = None
self._list: List[Node] = list()
def __getitem__(self, key: Union[int, str]) -> Node:
found = None
if not self._list:
raise LisaException("no node found")
if isinstance(key, int):
if len(self._list) > key:
found = self._list[key]
else:
for node in self._list:
if node.name == key:
found = node
break
if not found:
raise KeyError(f"cannot find node {key}")
return found
def __setitem__(self, key: Union[int, str], v: Node) -> None:
raise NotImplementedError("don't set node directly, call from_*")
def __len__(self) -> int:
return len(self._list)
@property
def default(self) -> Node:
if self._default is None:
default = None
for node in self._list:
if node.is_default:
default = node
break
if default is None:
if len(self._list) == 0:
raise LisaException("No node found in current environment")
else:
default = self._list[0]
self._default = default
return self._default
def list(self) -> Iterable[Node]:
for node in self._list:
yield node
def initialize(self) -> None:
for node in self._list:
node.initialize()
def close(self) -> None:
for node in self._list:
node.close()
def from_existing(
self,
node_runbook: schema.Node,
environment_name: str,
base_log_path: Optional[Path] = None,
) -> Node:
node = Node.create(
index=len(self._list),
runbook=node_runbook,
logger_name=environment_name,
base_log_path=base_log_path,
)
self._list.append(node)
return node
def from_requirement(
self,
node_requirement: schema.NodeSpace,
environment_name: str,
base_log_path: Optional[Path] = None,
) -> Node:
min_requirement = cast(
schema.Capability,
node_requirement.generate_min_capability(node_requirement),
)
assert isinstance(min_requirement.node_count, int), (
f"must be int after generate_min_capability, "
f"actual: {min_requirement.node_count}"
)
# node count should be expanded in platform already
assert min_requirement.node_count == 1, f"actual: {min_requirement.node_count}"
mock_runbook = schema.RemoteNode(
type=constants.ENVIRONMENTS_NODES_REMOTE,
capability=min_requirement,
is_default=node_requirement.is_default,
)
node = Node.create(
index=len(self._list),
runbook=mock_runbook,
logger_name=environment_name,
base_log_path=base_log_path,
)
self._list.append(node)
return node
| 30.470238 | 88 | 0.59341 |
acef19ced801be5aa34f2a72ab4be327d88cf227 | 144 | py | Python | src/textee/apps.py | pyrexdraum/textee | 5090196643e8558f3ad45496f335bfc1209be119 | [
"MIT"
] | 1 | 2021-12-21T07:00:07.000Z | 2021-12-21T07:00:07.000Z | src/textee/apps.py | pyrexdraum/textee | 5090196643e8558f3ad45496f335bfc1209be119 | [
"MIT"
] | null | null | null | src/textee/apps.py | pyrexdraum/textee | 5090196643e8558f3ad45496f335bfc1209be119 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class TexteeConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'textee'
| 20.571429 | 56 | 0.756944 |
acef1b23ef0251471ee5b3d9f3c44c8b265e5aaf | 1,585 | py | Python | parse.py | chris3k/dilbertbrowser | 9b98245a048685ee53fc41453750f186b56335df | [
"MIT"
] | null | null | null | parse.py | chris3k/dilbertbrowser | 9b98245a048685ee53fc41453750f186b56335df | [
"MIT"
] | null | null | null | parse.py | chris3k/dilbertbrowser | 9b98245a048685ee53fc41453750f186b56335df | [
"MIT"
] | null | null | null | import requests
from HTMLParser import HTMLParser
from Strip import Strip
import json
from StringIO import StringIO
class DilbertParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.properties = dict()
@property
def strip(self):
s = Strip()
s.title = self.properties.get("og:title", "")
s.url = self.properties.get("og:url", "")
s.img_url = self.properties.get("og:image", "")
try:
r = requests.get(s.img_url)
s.comic = StringIO(r.content)
except Exception as e:
s.comic = None
# s.local_img_path = self.properties.get("", "") "dt170206.gif"
s.prev_strip = self.properties.get("prev_strip", "")
s.next_strip = self.properties.get("next_strip", "")
return s
def handle_starttag(self, tag, attrs):
if tag == "meta":
m = dict((k, v) for k, v in attrs)
k = m.get("property", None)
if k:
self.properties[k] = m.get("content", "")
if tag == "a":
m = dict((k, v) for k, v in attrs)
if m.get("title") == "Older Strip":
self.properties["prev_strip"] = m.get("href")
if m.get("title") == "Next Strip":
self.properties["next_strip"] = m.get("href")
if __name__ == "__main__":
parser = DilbertParser()
r = requests.get("http://dilbert.com/strip/2017-02-05")
parser.feed(r.text)
print json.dumps(parser.properties, indent=2)
| 32.346939 | 72 | 0.541325 |
acef1d0af301cc88a5f618da9a9f05237f2d8307 | 698 | py | Python | app/models.py | Ephrao1/News-App | 18462d5fda92d1683ad01c44b6aa1464b997f66a | [
"MIT"
] | 2 | 2020-10-19T04:56:17.000Z | 2020-11-11T05:36:17.000Z | app/models.py | Ephrao1/News-App | 18462d5fda92d1683ad01c44b6aa1464b997f66a | [
"MIT"
] | null | null | null | app/models.py | Ephrao1/News-App | 18462d5fda92d1683ad01c44b6aa1464b997f66a | [
"MIT"
] | 1 | 2021-10-31T17:21:59.000Z | 2021-10-31T17:21:59.000Z | class Source:
'''
Sources class to define Sources Objects
'''
def __init__(self,id,name,description,url,category,country,language):
self.id =id
self.name = name
self.description = description
self.url = url
self.category = category
self.country = country
self.language = language
class Articles:
'''
Articles class to define articles objects
'''
def __init__(self,id,author,title,description,url,image,date):
self.id = id
self.author = author
self.title = title
self.description = description
self.url = url
self.image = image
self.date = date | 26.846154 | 73 | 0.597421 |
acef1db5eb92ddc6f42d5df99ce2eccf1d62341e | 44,102 | py | Python | src/sage/misc/functional.py | bopopescu/sagesmc | e8d1d31f6f598dba2d763baa2d2e804338f9e89e | [
"BSL-1.0"
] | 5 | 2015-01-04T07:15:06.000Z | 2022-03-04T15:15:18.000Z | src/sage/misc/functional.py | bopopescu/sagesmc | e8d1d31f6f598dba2d763baa2d2e804338f9e89e | [
"BSL-1.0"
] | null | null | null | src/sage/misc/functional.py | bopopescu/sagesmc | e8d1d31f6f598dba2d763baa2d2e804338f9e89e | [
"BSL-1.0"
] | 10 | 2016-09-28T13:12:40.000Z | 2022-02-12T09:28:34.000Z | # -*- coding: utf-8 -*-
"""
Functional notation
These are functions so that you can write foo(x) instead of x.foo()
in certain common cases.
AUTHORS:
- William Stein: Initial version
- David Joyner (2005-12-20): More Examples
"""
#*****************************************************************************
# Copyright (C) 2004 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import sage.misc.latex
import sage.interfaces.expect
import sage.interfaces.mathematica
from sage.rings.complex_double import CDF
from sage.rings.real_double import RDF, RealDoubleElement
import sage.rings.real_mpfr
import sage.rings.complex_field
import sage.rings.integer
import __builtin__
LOG_TEN_TWO_PLUS_EPSILON = 3.321928094887363 # a small overestimate of log(10,2)
##############################################################################
# There are many functions on elements of a ring, which mathematicians
# usually write f(x), e.g., it is weird to write x.log() and natural
# to write log(x). The functions below allow for the more familiar syntax.
##############################################################################
def additive_order(x):
"""
Returns the additive order of `x`.
EXAMPLES::
sage: additive_order(5)
+Infinity
sage: additive_order(Mod(5,11))
11
sage: additive_order(Mod(4,12))
3
"""
return x.additive_order()
def base_ring(x):
"""
Returns the base ring over which x is defined.
EXAMPLES::
sage: R = PolynomialRing(GF(7), 'x')
sage: base_ring(R)
Finite Field of size 7
"""
return x.base_ring()
def base_field(x):
"""
Returns the base field over which x is defined.
EXAMPLES::
sage: R = PolynomialRing(GF(7), 'x')
sage: base_ring(R)
Finite Field of size 7
sage: base_field(R)
Finite Field of size 7
This catches base rings which are fields as well, but does
not implement a ``base_field`` method for objects which do
not have one::
sage: R.base_field()
Traceback (most recent call last):
...
AttributeError: 'PolynomialRing_dense_mod_p_with_category' object has no attribute 'base_field'
"""
try:
return x.base_field()
except AttributeError:
y = x.base_ring()
if is_field(y):
return y
else:
raise AttributeError("The base ring of %s is not a field"%x)
def basis(x):
"""
Returns the fixed basis of x.
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: S = V.subspace([[1,2,0],[2,2,-1]])
sage: basis(S)
[
(1, 0, -1),
(0, 1, 1/2)
]
"""
return x.basis()
def category(x):
"""
Returns the category of x.
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: category(V)
Category of vector spaces over Rational Field
"""
try:
return x.category()
except AttributeError:
import sage.categories.all
return sage.categories.all.Objects()
def ceil(x):
"""
Returns the ceiling (least integer) function of x.
EXAMPLES::
sage: ceil(3.5)
4
sage: ceil(7/2)
4
sage: ceil(-3.5)
-3
sage: ceil(RIF(1.3,2.3))
3.?
"""
try:
return x.ceil()
except AttributeError:
return sage.rings.all.ceil(x)
def characteristic_polynomial(x, var='x'):
"""
Returns the characteristic polynomial of x in the given variable.
EXAMPLES::
sage: M = MatrixSpace(QQ,3,3)
sage: A = M([1,2,3,4,5,6,7,8,9])
sage: charpoly(A)
x^3 - 15*x^2 - 18*x
sage: charpoly(A, 't')
t^3 - 15*t^2 - 18*t
::
sage: k.<alpha> = GF(7^10); k
Finite Field in alpha of size 7^10
sage: alpha.charpoly('T')
T^10 + T^6 + T^5 + 4*T^4 + T^3 + 2*T^2 + 3*T + 3
sage: characteristic_polynomial(alpha, 'T')
T^10 + T^6 + T^5 + 4*T^4 + T^3 + 2*T^2 + 3*T + 3
"""
try:
return x.charpoly(var)
except AttributeError:
raise NotImplementedError, "computation of charpoly of x (=%s) not implemented"%x
charpoly = characteristic_polynomial
def coerce(P, x):
"""
Attempts to coerce x to type P if possible.
EXAMPLES::
sage: type(5)
<type 'sage.rings.integer.Integer'>
sage: type(coerce(QQ,5))
<type 'sage.rings.rational.Rational'>
"""
try:
return P._coerce_(x)
except AttributeError:
return P(x)
def acos(x):
"""
Returns the arc cosine of x.
EXAMPLES::
sage: acos(.5)
1.04719755119660
sage: acos(sin(pi/3))
arccos(1/2*sqrt(3))
sage: acos(sin(pi/3)).simplify_full()
1/6*pi
"""
try: return x.acos()
except AttributeError: return RDF(x).acos()
def asin(x):
"""
Returns the arc sine of x.
EXAMPLES::
sage: asin(.5)
0.523598775598299
sage: asin(sin(pi/3))
arcsin(1/2*sqrt(3))
sage: asin(sin(pi/3)).simplify_full()
1/3*pi
"""
try: return x.asin()
except AttributeError: return RDF(x).asin()
def atan(x):
"""
Returns the arc tangent of x.
EXAMPLES::
sage: z = atan(3);z
arctan(3)
sage: n(z)
1.24904577239825
sage: atan(tan(pi/4))
1/4*pi
"""
try: return x.atan()
except AttributeError: return RDF(x).atan()
## def cuspidal_submodule(x):
## return x.cuspidal_submodule()
## def cuspidal_subspace(x):
## return x.cuspidal_subspace()
def cyclotomic_polynomial(n, var='x'):
"""
Returns the `n^{th}` cyclotomic polynomial.
EXAMPLES::
sage: cyclotomic_polynomial(3)
x^2 + x + 1
sage: cyclotomic_polynomial(4)
x^2 + 1
sage: cyclotomic_polynomial(9)
x^6 + x^3 + 1
sage: cyclotomic_polynomial(10)
x^4 - x^3 + x^2 - x + 1
sage: cyclotomic_polynomial(11)
x^10 + x^9 + x^8 + x^7 + x^6 + x^5 + x^4 + x^3 + x^2 + x + 1
"""
return sage.rings.all.ZZ[var].cyclotomic_polynomial(n)
def decomposition(x):
"""
Returns the decomposition of x.
EXAMPLES::
sage: M = matrix([[2, 3], [3, 4]])
sage: M.decomposition()
[
(Ambient free module of rank 2 over the principal ideal domain Integer Ring, True)
]
::
sage: G.<a,b> = DirichletGroup(20)
sage: c = a*b
sage: d = c.decomposition(); d
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4]
sage: d[0].parent()
Group of Dirichlet characters of modulus 4 over Cyclotomic Field of order 4 and degree 2
"""
return x.decomposition()
def denominator(x):
"""
Returns the denominator of x.
EXAMPLES::
sage: denominator(17/11111)
11111
sage: R.<x> = PolynomialRing(QQ)
sage: F = FractionField(R)
sage: r = (x+1)/(x-1)
sage: denominator(r)
x - 1
"""
if isinstance(x, (int, long)):
return 1
return x.denominator()
def det(x):
"""
Returns the determinant of x.
EXAMPLES::
sage: M = MatrixSpace(QQ,3,3)
sage: A = M([1,2,3,4,5,6,7,8,9])
sage: det(A)
0
"""
return x.det()
def dimension(x):
"""
Returns the dimension of x.
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: S = V.subspace([[1,2,0],[2,2,-1]])
sage: dimension(S)
2
"""
return x.dimension()
dim = dimension
def discriminant(x):
"""
Returns the discriminant of x.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: S = R.quotient(x^29 - 17*x - 1, 'alpha')
sage: K = S.number_field()
sage: discriminant(K)
-15975100446626038280218213241591829458737190477345113376757479850566957249523
"""
return x.discriminant()
disc = discriminant
# This is dangerous since it gets the scoping all wrong ??
#import __builtin__
#def eval(x):
# try:
# return x._eval_()
# except AttributeError:
# return __builtin__.eval(x)
def eta(x):
r"""
Returns the value of the eta function at `x`, which must be
in the upper half plane.
The `\eta` function is
.. math::
\eta(z) = e^{\pi i z / 12} \prod_{n=1}^{\infty}(1-e^{2\pi inz})
EXAMPLES::
sage: eta(1+I)
0.742048775837 + 0.19883137023*I
"""
try: return x.eta()
except AttributeError: return CDF(x).eta()
def exp(x):
"""
Returns the value of the exponentiation function at x.
EXAMPLES::
sage: exp(3)
e^3
sage: exp(0)
1
sage: exp(2.5)
12.1824939607035
sage: exp(pi*i)
-1
"""
try: return x.exp()
except AttributeError: return RDF(x).exp()
def factor(x, *args, **kwds):
"""
Returns the (prime) factorization of x.
EXAMPLES::
sage: factor(factorial(10))
2^8 * 3^4 * 5^2 * 7
sage: n = next_prime(10^6); n
1000003
sage: factor(n)
1000003
Note that this depends on the type of x::
sage: factor(55)
5 * 11
sage: factor(x^2+2*x+1)
(x + 1)^2
sage: factor(55*x^2+110*x+55)
55*(x + 1)^2
"""
try: return x.factor(*args, **kwds)
except AttributeError: return sage.rings.all.factor(x, *args, **kwds)
factorization = factor
factorisation = factor
def fcp(x, var='x'):
"""
Returns the factorization of the characteristic polynomial of x.
EXAMPLES::
sage: M = MatrixSpace(QQ,3,3)
sage: A = M([1,2,3,4,5,6,7,8,9])
sage: fcp(A, 'x')
x * (x^2 - 15*x - 18)
"""
try: return x.fcp(var)
except AttributeError: return factor(charpoly(x, var))
## def floor(x):
## try:
## return x.floor()
## except AttributeError:
## return sage.rings.all.floor(x)
def gen(x):
"""
Returns the generator of x.
EXAMPLES::
sage: R.<x> = QQ[]; R
Univariate Polynomial Ring in x over Rational Field
sage: gen(R)
x
sage: gen(GF(7))
1
sage: A = AbelianGroup(1, [23])
sage: gen(A)
f
"""
return x.gen()
def gens(x):
"""
Returns the generators of x.
EXAMPLES::
sage: R.<x,y> = SR[]
sage: R
Multivariate Polynomial Ring in x, y over Symbolic Ring
sage: gens(R)
(x, y)
sage: A = AbelianGroup(5, [5,5,7,8,9])
sage: gens(A)
(f0, f1, f2, f3, f4)
"""
return x.gens()
def hecke_operator(x,n):
"""
Returns the n-th Hecke operator T_n acting on x.
EXAMPLES::
sage: M = ModularSymbols(1,12)
sage: hecke_operator(M,5)
Hecke operator T_5 on Modular Symbols space of dimension 3 for Gamma_0(1) of weight 12 with sign 0 over Rational Field
"""
return x.hecke_operator(n)
ideal = sage.rings.ideal.Ideal
def image(x):
"""
Returns the image of x.
EXAMPLES::
sage: M = MatrixSpace(QQ,3,3)
sage: A = M([1,2,3,4,5,6,7,8,9])
sage: image(A)
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1]
[ 0 1 2]
"""
return x.image()
def symbolic_sum(expression, *args, **kwds):
r"""
Returns the symbolic sum `\sum_{v = a}^b expression` with respect
to the variable `v` with endpoints `a` and `b`.
INPUT:
- ``expression`` - a symbolic expression
- ``v`` - a variable or variable name
- ``a`` - lower endpoint of the sum
- ``b`` - upper endpoint of the sum
- ``algorithm`` - (default: 'maxima') one of
- 'maxima' - use Maxima (the default)
- 'maple' - (optional) use Maple
- 'mathematica' - (optional) use Mathematica
EXAMPLES::
sage: k, n = var('k,n')
sage: sum(k, k, 1, n).factor()
1/2*(n + 1)*n
::
sage: sum(1/k^4, k, 1, oo)
1/90*pi^4
::
sage: sum(1/k^5, k, 1, oo)
zeta(5)
A well known binomial identity::
sage: sum(binomial(n,k), k, 0, n)
2^n
The binomial theorem::
sage: x, y = var('x, y')
sage: sum(binomial(n,k) * x^k * y^(n-k), k, 0, n)
(x + y)^n
::
sage: sum(k * binomial(n, k), k, 1, n)
2^(n - 1)*n
::
sage: sum((-1)^k*binomial(n,k), k, 0, n)
0
::
sage: sum(2^(-k)/(k*(k+1)), k, 1, oo)
-log(2) + 1
Another binomial identity (trac #7952)::
sage: t,k,i = var('t,k,i')
sage: sum(binomial(i+t,t),i,0,k)
binomial(k + t + 1, t + 1)
Summing a hypergeometric term::
sage: sum(binomial(n, k) * factorial(k) / factorial(n+1+k), k, 0, n)
1/2*sqrt(pi)/factorial(n + 1/2)
We check a well known identity::
sage: bool(sum(k^3, k, 1, n) == sum(k, k, 1, n)^2)
True
A geometric sum::
sage: a, q = var('a, q')
sage: sum(a*q^k, k, 0, n)
(a*q^(n + 1) - a)/(q - 1)
The geometric series::
sage: assume(abs(q) < 1)
sage: sum(a*q^k, k, 0, oo)
-a/(q - 1)
A divergent geometric series. Don't forget
to forget your assumptions::
sage: forget()
sage: assume(q > 1)
sage: sum(a*q^k, k, 0, oo)
Traceback (most recent call last):
...
ValueError: Sum is divergent.
This summation only Mathematica can perform::
sage: sum(1/(1+k^2), k, -oo, oo, algorithm = 'mathematica') # optional - mathematica
pi*coth(pi)
Use Maple as a backend for summation::
sage: sum(binomial(n,k)*x^k, k, 0, n, algorithm = 'maple') # optional - maple
(x + 1)^n
Python ints should work as limits of summation (trac #9393)::
sage: sum(x, x, 1r, 5r)
15
.. note::
#. Sage can currently only understand a subset of the output of Maxima, Maple and
Mathematica, so even if the chosen backend can perform the summation the
result might not be convertable into a Sage expression.
"""
if hasattr(expression, 'sum'):
return expression.sum(*args, **kwds)
elif len(args) <= 1:
return sum(expression, *args)
else:
from sage.symbolic.ring import SR
return SR(expression).sum(*args, **kwds)
def integral(x, *args, **kwds):
"""
Returns an indefinite or definite integral of an object x.
First call x.integral() and if that fails make an object and
integrate it using Maxima, maple, etc, as specified by algorithm.
For symbolic expression calls
:func:`sage.calculus.calculus.integral` - see this function for
available options.
EXAMPLES::
sage: f = cyclotomic_polynomial(10)
sage: integral(f)
1/5*x^5 - 1/4*x^4 + 1/3*x^3 - 1/2*x^2 + x
::
sage: integral(sin(x),x)
-cos(x)
::
sage: y = var('y')
sage: integral(sin(x),y)
y*sin(x)
::
sage: integral(sin(x), x, 0, pi/2)
1
sage: sin(x).integral(x, 0,pi/2)
1
sage: integral(exp(-x), (x, 1, oo))
e^(-1)
Numerical approximation::
sage: h = integral(tan(x)/x, (x, 1, pi/3)); h
integrate(tan(x)/x, x, 1, 1/3*pi)
sage: h.n()
0.07571599101...
Specific algorithm can be used for integration::
sage: integral(sin(x)^2, x, algorithm='maxima')
1/2*x - 1/4*sin(2*x)
sage: integral(sin(x)^2, x, algorithm='sympy')
-1/2*cos(x)*sin(x) + 1/2*x
TESTS:
A symbolic integral from :trac:`11445` that was incorrect in
earlier versions of Maxima::
sage: f = abs(x - 1) + abs(x + 1) - 2*abs(x)
sage: integrate(f, (x, -Infinity, Infinity))
2
Another symbolic integral, from :trac:`11238`, that used to return
zero incorrectly; with maxima 5.26.0 one gets 1/2*sqrt(pi)*e^(1/4),
whereas with 5.29.1 the expression is less pleasant, but still
has the same value::
sage: f = exp(-x) * sinh(sqrt(x))
sage: t = integrate(f, x, 0, Infinity); t # long time
1/4*(sqrt(pi)*(erf(1) - 1) + sqrt(pi) + 2*e^(-1) - 2)*e^(1/4) - 1/4*(sqrt(pi)*(erf(1) - 1) - sqrt(pi) + 2*e^(-1) - 2)*e^(1/4)
sage: t.simplify_exp() # long time
1/2*sqrt(pi)*e^(1/4)
An integral which used to return -1 before maxima 5.28. See :trac:`12842`::
sage: f = e^(-2*x)/sqrt(1-e^(-2*x))
sage: integrate(f, x, 0, infinity)
1
This integral would cause a stack overflow in earlier versions of
Maxima, crashing sage. See :trac:`12377`. We don't care about the
result here, just that the computation completes successfully::
sage: y = (x^2)*exp(x) / (1 + exp(x))^2
sage: _ = integrate(y, x, -1000, 1000)
"""
if hasattr(x, 'integral'):
return x.integral(*args, **kwds)
else:
from sage.symbolic.ring import SR
return SR(x).integral(*args, **kwds)
integrate = integral
def integral_closure(x):
"""
Returns the integral closure of x.
EXAMPLES::
sage: integral_closure(QQ)
Rational Field
sage: K.<a> = QuadraticField(5)
sage: O2 = K.order(2*a); O2
Order in Number Field in a with defining polynomial x^2 - 5
sage: integral_closure(O2)
Maximal Order in Number Field in a with defining polynomial x^2 - 5
"""
return x.integral_closure()
def interval(a, b):
r"""
Integers between a and b *inclusive* (a and b integers).
EXAMPLES::
sage: I = interval(1,3)
sage: 2 in I
True
sage: 1 in I
True
sage: 4 in I
False
"""
return range(a,b+1)
def xinterval(a, b):
r"""
Iterator over the integers between a and b, *inclusive*.
EXAMPLES::
sage: I = xinterval(2,5); I
xrange(2, 6)
sage: 5 in I
True
sage: 6 in I
False
"""
return xrange(a, b+1)
def is_commutative(x):
"""
Returns whether or not x is commutative.
EXAMPLES::
sage: R = PolynomialRing(QQ, 'x')
sage: is_commutative(R)
True
"""
return x.is_commutative()
def is_even(x):
"""
Returns whether or not an integer x is even, e.g., divisible by 2.
EXAMPLES::
sage: is_even(-1)
False
sage: is_even(4)
True
sage: is_even(-2)
True
"""
try: return x.is_even()
except AttributeError: return x%2==0
def is_integrally_closed(x):
"""
Returns whether x is integrally closed.
EXAMPLES::
sage: is_integrally_closed(QQ)
True
sage: K.<a> = NumberField(x^2 + 189*x + 394)
sage: R = K.order(2*a)
sage: is_integrally_closed(R)
False
"""
return x.is_integrally_closed()
def is_field(x):
"""
Returns whether or not x is a field.
EXAMPLES::
sage: R = PolynomialRing(QQ, 'x')
sage: F = FractionField(R)
sage: is_field(F)
True
"""
return x.is_field()
def is_noetherian(x):
"""
Returns whether or not x is a Noetherian
object (has ascending chain condition).
EXAMPLES::
sage: from sage.misc.functional import is_noetherian
sage: is_noetherian(ZZ)
True
sage: is_noetherian(QQ)
True
sage: A = SteenrodAlgebra(3)
sage: is_noetherian(A)
False
"""
return x.is_noetherian()
def is_odd(x):
"""
Returns whether or not x is odd. This is by definition the
complement of is_even.
EXAMPLES::
sage: is_odd(-2)
False
sage: is_odd(-3)
True
sage: is_odd(0)
False
sage: is_odd(1)
True
"""
return not is_even(x)
## def j_invariant(x):
## """
## Return the j_invariant of x.
## EXAMPLES:
## sage: E = EllipticCurve([0, -1, 1, -10, -20])
## sage: E
## Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field
## sage: j_invariant(E)
## -122023936/161051
## """
## return x.j_invariant()
def kernel(x):
"""
Returns the left kernel of x.
EXAMPLES::
sage: M = MatrixSpace(QQ,3,2)
sage: A = M([1,2,3,4,5,6])
sage: kernel(A)
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[ 1 -2 1]
sage: kernel(A.transpose())
Vector space of degree 2 and dimension 0 over Rational Field
Basis matrix:
[]
Here are two corner cases:
sage: M=MatrixSpace(QQ,0,3)
sage: A=M([])
sage: kernel(A)
Vector space of degree 0 and dimension 0 over Rational Field
Basis matrix:
[]
sage: kernel(A.transpose()).basis()
[
(1, 0, 0),
(0, 1, 0),
(0, 0, 1)
]
"""
return x.kernel()
def krull_dimension(x):
"""
Returns the Krull dimension of x.
EXAMPLES::
sage: krull_dimension(QQ)
0
sage: krull_dimension(ZZ)
1
sage: krull_dimension(ZZ[sqrt(5)])
1
sage: U.<x,y,z> = PolynomialRing(ZZ,3); U
Multivariate Polynomial Ring in x, y, z over Integer Ring
sage: U.krull_dimension()
4
"""
return x.krull_dimension()
def lift(x):
"""
Lift an object of a quotient ring `R/I` to `R`.
EXAMPLES: We lift an integer modulo `3`.
::
sage: Mod(2,3).lift()
2
We lift an element of a quotient polynomial ring.
::
sage: R.<x> = QQ['x']
sage: S.<xmod> = R.quo(x^2 + 1)
sage: lift(xmod-7)
x - 7
"""
try:
return x.lift()
except AttributeError:
raise ArithmeticError, "no lift defined."
def log(x,b=None):
r"""
Returns the log of x to the base b. The default base is e.
INPUT:
- ``x`` - number
- ``b`` - base (default: None, which means natural
log)
OUTPUT: number
.. note::
In Magma, the order of arguments is reversed from in Sage,
i.e., the base is given first. We use the opposite ordering, so
the base can be viewed as an optional second argument.
EXAMPLES::
sage: log(e^2)
2
sage: log(16,2)
4
sage: log(3.)
1.09861228866811
"""
if b is None:
if hasattr(x, 'log'):
return x.log()
return RDF(x)._log_base(1)
else:
if hasattr(x, 'log'):
return x.log(b)
return RDF(x).log(b)
def minimal_polynomial(x, var='x'):
"""
Returns the minimal polynomial of x.
EXAMPLES::
sage: a = matrix(ZZ, 2, [1..4])
sage: minpoly(a)
x^2 - 5*x - 2
sage: minpoly(a,'t')
t^2 - 5*t - 2
sage: minimal_polynomial(a)
x^2 - 5*x - 2
sage: minimal_polynomial(a,'theta')
theta^2 - 5*theta - 2
"""
try:
return x.minpoly(var=var)
except AttributeError:
return x.minimal_polynomial(var=var)
minpoly = minimal_polynomial
def multiplicative_order(x):
r"""
Returns the multiplicative order of self, if self is a unit, or
raise ``ArithmeticError`` otherwise.
EXAMPLES::
sage: a = mod(5,11)
sage: multiplicative_order(a)
5
sage: multiplicative_order(mod(2,11))
10
sage: multiplicative_order(mod(2,12))
Traceback (most recent call last):
...
ArithmeticError: multiplicative order of 2 not defined since it is not a unit modulo 12
"""
return x.multiplicative_order()
## def new_submodule(x):
## return x.new_submodule()
## def new_subspace(x):
## return x.new_subspace()
def ngens(x):
"""
Returns the number of generators of x.
EXAMPLES::
sage: R.<x,y> = SR[]; R
Multivariate Polynomial Ring in x, y over Symbolic Ring
sage: ngens(R)
2
sage: A = AbelianGroup(5, [5,5,7,8,9])
sage: ngens(A)
5
sage: ngens(ZZ)
1
"""
return x.ngens()
def norm(x):
r"""
Returns the norm of ``x``.
For matrices and vectors, this returns the L2-norm. The L2-norm of a
vector `\textbf{v} = (v_1, v_2, \dots, v_n)`, also called the Euclidean
norm, is defined as
.. MATH::
|\textbf{v}|
=
\sqrt{\sum_{i=1}^n |v_i|^2}
where `|v_i|` is the complex modulus of `v_i`. The Euclidean norm is often
used for determining the distance between two points in two- or
three-dimensional space.
For complex numbers, the function returns the field norm. If
`c = a + bi` is a complex number, then the norm of `c` is defined as the
product of `c` and its complex conjugate
.. MATH::
\text{norm}(c)
=
\text{norm}(a + bi)
=
c \cdot \overline{c}
=
a^2 + b^2.
The norm of a complex number is different from its absolute value.
The absolute value of a complex number is defined to be the square
root of its norm. A typical use of the complex norm is in the
integral domain `\ZZ[i]` of Gaussian integers, where the norm of
each Gaussian integer `c = a + bi` is defined as its complex norm.
.. SEEALSO::
- :meth:`sage.matrix.matrix2.Matrix.norm`
- :meth:`sage.modules.free_module_element.FreeModuleElement.norm`
- :meth:`sage.rings.complex_double.ComplexDoubleElement.norm`
- :meth:`sage.rings.complex_number.ComplexNumber.norm`
- :meth:`sage.symbolic.expression.Expression.norm`
EXAMPLES:
The norm of vectors::
sage: z = 1 + 2*I
sage: norm(vector([z]))
sqrt(5)
sage: v = vector([-1,2,3])
sage: norm(v)
sqrt(14)
sage: _ = var("a b c d")
sage: v = vector([a, b, c, d])
sage: norm(v)
sqrt(abs(a)^2 + abs(b)^2 + abs(c)^2 + abs(d)^2)
The norm of matrices::
sage: z = 1 + 2*I
sage: norm(matrix([[z]]))
2.2360679775
sage: M = matrix(ZZ, [[1,2,4,3], [-1,0,3,-10]])
sage: norm(M)
10.6903311292
sage: norm(CDF(z))
5.0
sage: norm(CC(z))
5.00000000000000
The norm of complex numbers::
sage: z = 2 - 3*I
sage: norm(z)
13
sage: a = randint(-10^10, 100^10)
sage: b = randint(-10^10, 100^10)
sage: z = a + b*I
sage: bool(norm(z) == a^2 + b^2)
True
The complex norm of symbolic expressions::
sage: a, b, c = var("a, b, c")
sage: assume((a, 'real'), (b, 'real'), (c, 'real'))
sage: z = a + b*I
sage: bool(norm(z).simplify() == a^2 + b^2)
True
sage: norm(a + b).simplify()
a^2 + 2*a*b + b^2
sage: v = vector([a, b, c])
sage: bool(norm(v).simplify() == sqrt(a^2 + b^2 + c^2))
True
sage: forget()
"""
return x.norm()
def numerator(x):
"""
Returns the numerator of x.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: F = FractionField(R)
sage: r = (x+1)/(x-1)
sage: numerator(r)
x + 1
sage: numerator(17/11111)
17
"""
if isinstance(x, (int, long)):
return x
return x.numerator()
# Following is the top-level numerical_approx function.
# Implement a ._numerical_approx(prec, digits) method for your
# objects to enable the three top-level functions and three methods
def numerical_approx(x, prec=None, digits=None):
r"""
Returns a numerical approximation of an object ``x`` with at
least ``prec`` bits (or decimal ``digits``) of precision.
.. note::
Both upper case ``N`` and lower case ``n`` are aliases for
:func:`numerical_approx`, and all three may be used as
methods.
INPUT:
- ``x`` - an object that has a numerical_approx
method, or can be coerced into a real or complex field
- ``prec (optional)`` - an integer (bits of
precision)
- ``digits (optional)`` - an integer (digits of
precision)
If neither the ``prec`` or ``digits`` are specified,
the default is 53 bits of precision. If both are
specified, then ``prec`` is used.
EXAMPLES::
sage: numerical_approx(pi, 10)
3.1
sage: numerical_approx(pi, digits=10)
3.141592654
sage: numerical_approx(pi^2 + e, digits=20)
12.587886229548403854
sage: n(pi^2 + e)
12.5878862295484
sage: N(pi^2 + e)
12.5878862295484
sage: n(pi^2 + e, digits=50)
12.587886229548403854194778471228813633070946500941
sage: a = CC(-5).n(prec=100)
sage: b = ComplexField(100)(-5)
sage: a == b
True
sage: type(a) == type(b)
True
sage: numerical_approx(9)
9.00000000000000
You can also usually use method notation. ::
sage: (pi^2 + e).n()
12.5878862295484
sage: (pi^2 + e).N()
12.5878862295484
sage: (pi^2 + e).numerical_approx()
12.5878862295484
Vectors and matrices may also have their entries approximated. ::
sage: v = vector(RDF, [1,2,3])
sage: v.n()
(1.00000000000000, 2.00000000000000, 3.00000000000000)
sage: v = vector(CDF, [1,2,3])
sage: v.n()
(1.00000000000000, 2.00000000000000, 3.00000000000000)
sage: _.parent()
Vector space of dimension 3 over Complex Field with 53 bits of precision
sage: v.n(prec=75)
(1.000000000000000000000, 2.000000000000000000000, 3.000000000000000000000)
sage: u = vector(QQ, [1/2, 1/3, 1/4])
sage: n(u, prec=15)
(0.5000, 0.3333, 0.2500)
sage: n(u, digits=5)
(0.50000, 0.33333, 0.25000)
sage: v = vector(QQ, [1/2, 0, 0, 1/3, 0, 0, 0, 1/4], sparse=True)
sage: u = v.numerical_approx(digits=4)
sage: u.is_sparse()
True
sage: u
(0.5000, 0.0000, 0.0000, 0.3333, 0.0000, 0.0000, 0.0000, 0.2500)
sage: A = matrix(QQ, 2, 3, range(6))
sage: A.n()
[0.000000000000000 1.00000000000000 2.00000000000000]
[ 3.00000000000000 4.00000000000000 5.00000000000000]
sage: B = matrix(Integers(12), 3, 8, srange(24))
sage: N(B, digits=2)
[0.00 1.0 2.0 3.0 4.0 5.0 6.0 7.0]
[ 8.0 9.0 10. 11. 0.00 1.0 2.0 3.0]
[ 4.0 5.0 6.0 7.0 8.0 9.0 10. 11.]
Internally, numerical approximations of real numbers are stored in base-2.
Therefore, numbers which look the same in their decimal expansion might be
different::
sage: x=N(pi, digits=3); x
3.14
sage: y=N(3.14, digits=3); y
3.14
sage: x==y
False
sage: x.str(base=2)
'11.001001000100'
sage: y.str(base=2)
'11.001000111101'
As an exceptional case, ``digits=1`` usually leads to 2 digits (one
significant) in the decimal output (see :trac:`11647`)::
sage: N(pi, digits=1)
3.2
sage: N(pi, digits=2)
3.1
sage: N(100*pi, digits=1)
320.
sage: N(100*pi, digits=2)
310.
In the following example, ``pi`` and ``3`` are both approximated to two
bits of precision and then subtracted, which kills two bits of precision::
sage: N(pi, prec=2)
3.0
sage: N(3, prec=2)
3.0
sage: N(pi - 3, prec=2)
0.00
TESTS::
sage: numerical_approx(I)
1.00000000000000*I
sage: x = QQ['x'].gen()
sage: F.<k> = NumberField(x^2+2, embedding=sqrt(CC(2))*CC.0)
sage: numerical_approx(k)
1.41421356237309*I
sage: type(numerical_approx(CC(1/2)))
<type 'sage.rings.complex_number.ComplexNumber'>
The following tests :trac:`10761`, in which ``n()`` would break when
called on complex-valued algebraic numbers. ::
sage: E = matrix(3, [3,1,6,5,2,9,7,3,13]).eigenvalues(); E
[18.16815365088822?, -0.08407682544410650? - 0.2190261484802906?*I, -0.08407682544410650? + 0.2190261484802906?*I]
sage: E[1].parent()
Algebraic Field
sage: [a.n() for a in E]
[18.1681536508882, -0.0840768254441065 - 0.219026148480291*I, -0.0840768254441065 + 0.219026148480291*I]
Make sure we've rounded up log(10,2) enough to guarantee
sufficient precision (trac #10164)::
sage: ks = 4*10**5, 10**6
sage: check_str_length = lambda k: len(str(numerical_approx(1+10**-k,digits=k+1)))-1 >= k+1
sage: check_precision = lambda k: numerical_approx(1+10**-k,digits=k+1)-1 > 0
sage: all(check_str_length(k) and check_precision(k) for k in ks)
True
Testing we have sufficient precision for the golden ratio (:trac:`12163`), note
that the decimal point adds 1 to the string length::
sage: len(str(n(golden_ratio, digits=5000)))
5001
sage: len(str(n(golden_ratio, digits=5000000))) # long time (4s on sage.math, 2012)
5000001
"""
if prec is None:
if digits is None:
prec = 53
else:
prec = int((digits+1) * LOG_TEN_TWO_PLUS_EPSILON) + 1
try:
return x._numerical_approx(prec)
except AttributeError:
from sage.rings.complex_double import is_ComplexDoubleElement
from sage.rings.complex_number import is_ComplexNumber
if not (is_ComplexNumber(x) or is_ComplexDoubleElement(x)):
try:
return sage.rings.real_mpfr.RealField(prec)(x)
# Trac 10761: now catches ValueErrors as well as TypeErrors
except (TypeError, ValueError):
pass
return sage.rings.complex_field.ComplexField(prec)(x)
n = numerical_approx
N = numerical_approx
def objgens(x):
"""
EXAMPLES::
sage: R, x = objgens(PolynomialRing(QQ,3, 'x'))
sage: R
Multivariate Polynomial Ring in x0, x1, x2 over Rational Field
sage: x
(x0, x1, x2)
"""
return x.objgens()
def objgen(x):
"""
EXAMPLES::
sage: R, x = objgen(FractionField(QQ['x']))
sage: R
Fraction Field of Univariate Polynomial Ring in x over Rational Field
sage: x
x
"""
return x.objgen()
def one(R):
"""
Returns the one element of the ring R.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: one(R)*x == x
True
sage: one(R) in R
True
"""
return R(1)
def order(x):
"""
Returns the order of x. If x is a ring or module element, this is
the additive order of x.
EXAMPLES::
sage: C = CyclicPermutationGroup(10)
sage: order(C)
10
sage: F = GF(7)
sage: order(F)
7
"""
return x.order()
def rank(x):
"""
Returns the rank of x.
EXAMPLES: We compute the rank of a matrix::
sage: M = MatrixSpace(QQ,3,3)
sage: A = M([1,2,3,4,5,6,7,8,9])
sage: rank(A)
2
We compute the rank of an elliptic curve::
sage: E = EllipticCurve([0,0,1,-1,0])
sage: rank(E)
1
"""
return x.rank()
def regulator(x):
"""
Returns the regulator of x.
EXAMPLES::
sage: regulator(NumberField(x^2-2, 'a'))
0.881373587019543
sage: regulator(EllipticCurve('11a'))
1.00000000000000
"""
return x.regulator()
def round(x, ndigits=0):
"""
round(number[, ndigits]) - double-precision real number
Round a number to a given precision in decimal digits (default 0
digits). If no precision is specified this just calls the element's
.round() method.
EXAMPLES::
sage: round(sqrt(2),2)
1.41
sage: q = round(sqrt(2),5); q
1.41421
sage: type(q)
<type 'sage.rings.real_double.RealDoubleElement'>
sage: q = round(sqrt(2)); q
1
sage: type(q)
<type 'sage.rings.integer.Integer'>
sage: round(pi)
3
sage: b = 5.4999999999999999
sage: round(b)
5
Since we use floating-point with a limited range, some roundings can't
be performed::
sage: round(sqrt(Integer('1'*1000)),2)
+infinity
IMPLEMENTATION: If ndigits is specified, it calls Python's builtin
round function, and converts the result to a real double field
element. Otherwise, it tries the argument's .round() method; if
that fails, it reverts to the builtin round function, converted to
a real double field element.
.. note::
This is currently slower than the builtin round function, since
it does more work - i.e., allocating an RDF element and
initializing it. To access the builtin version do
``import __builtin__; __builtin__.round``.
"""
try:
if ndigits:
return RealDoubleElement(__builtin__.round(x, ndigits))
else:
try:
return x.round()
except AttributeError:
return RealDoubleElement(__builtin__.round(x, 0))
except ArithmeticError:
if not isinstance(x, RealDoubleElement):
return round(RDF(x), ndigits)
else:
raise
def quotient(x, y, *args, **kwds):
"""
Returns the quotient object x/y, e.g., a quotient of numbers or of a
polynomial ring x by the ideal generated by y, etc.
EXAMPLES::
sage: quotient(5,6)
5/6
sage: quotient(5.,6.)
0.833333333333333
sage: R.<x> = ZZ[]; R
Univariate Polynomial Ring in x over Integer Ring
sage: I = Ideal(R, x^2+1)
sage: quotient(R, I)
Univariate Quotient Polynomial Ring in xbar over Integer Ring with modulus x^2 + 1
"""
try:
return x.quotient(y, *args, **kwds)
except AttributeError:
return x/y
quo = quotient
def show(x, *args, **kwds):
r"""
Show a graphics object x.
For additional ways to show objects in the notebook, look
at the methods on the html object. For example,
html.table will produce an HTML table from a nested
list.
OPTIONAL INPUT:
- ``filename`` - (default: None) string
SOME OF THESE MAY APPLY:
- ``dpi`` - dots per inch
- ``figsize``- [width, height] (same for square aspect)
- ``axes`` - (default: True)
- ``fontsize`` - positive integer
- ``frame`` - (default: False) draw a MATLAB-like frame around the
image
EXAMPLES::
sage: show(graphs(3))
sage: show(list(graphs(3)))
"""
if not isinstance(x, (sage.interfaces.expect.Expect, sage.interfaces.expect.ExpectElement)):
try:
return x.show(*args, **kwds)
except AttributeError:
pass
if isinstance(x, sage.interfaces.mathematica.MathematicaElement):
return x.show(*args, **kwds)
import types
if isinstance(x, types.GeneratorType):
x = list(x)
if isinstance(x, list):
if len(x) > 0:
from sage.graphs.graph import GenericGraph
if isinstance(x[0], GenericGraph):
import sage.graphs.graph_list as graphs_list
graphs_list.show_graphs(x)
return
_do_show(x)
def _do_show(x):
if sage.doctest.DOCTEST_MODE:
return sage.misc.latex.latex(x)
from latex import view
view(x, mode='display')
#raise AttributeError, "object %s does not support show."%(x, )
def sqrt(x):
"""
Returns a square root of x.
This function (``numerical_sqrt``) is deprecated. Use ``sqrt(x,
prec=n)`` instead.
EXAMPLES::
sage: numerical_sqrt(10.1)
doctest:1: DeprecationWarning: numerical_sqrt is deprecated, use sqrt(x, prec=n) instead
See http://trac.sagemath.org/5404 for details.
3.17804971641414
sage: numerical_sqrt(9)
3
"""
from sage.misc.superseded import deprecation
deprecation(5404, "numerical_sqrt is deprecated, use sqrt(x, prec=n) instead")
try: return x.sqrt()
except (AttributeError, ValueError):
try:
return RDF(x).sqrt()
except TypeError:
return CDF(x).sqrt()
def isqrt(x):
"""
Returns an integer square root, i.e., the floor of a square root.
EXAMPLES::
sage: isqrt(10)
3
sage: isqrt(10r)
3
"""
try:
return x.isqrt()
except AttributeError:
from sage.functions.all import floor
n = sage.rings.integer.Integer(floor(x))
return n.isqrt()
def squarefree_part(x):
"""
Returns the square free part of `x`, i.e., a divisor
`z` such that `x = z y^2`, for a perfect square
`y^2`.
EXAMPLES::
sage: squarefree_part(100)
1
sage: squarefree_part(12)
3
sage: squarefree_part(10)
10
sage: squarefree_part(216r) # see #8976
6
::
sage: x = QQ['x'].0
sage: S = squarefree_part(-9*x*(x-6)^7*(x-3)^2); S
-9*x^2 + 54*x
sage: S.factor()
(-9) * (x - 6) * x
::
sage: f = (x^3 + x + 1)^3*(x-1); f
x^10 - x^9 + 3*x^8 + 3*x^5 - 2*x^4 - x^3 - 2*x - 1
sage: g = squarefree_part(f); g
x^4 - x^3 + x^2 - 1
sage: g.factor()
(x - 1) * (x^3 + x + 1)
"""
try:
return x.squarefree_part()
except AttributeError:
pass
F = factor(x)
n = parent(x)(1)
for p, e in F:
if e%2 != 0:
n *= p
return n * F.unit()
## def square_root(x):
## """
## Return a square root of x with the same parent as x, if possible,
## otherwise raise a ValueError.
## EXAMPLES:
## sage: square_root(9)
## 3
## sage: square_root(100)
## 10
## """
## try:
## return x.square_root()
## except AttributeError:
## raise NotImplementedError
def transpose(x):
"""
Returns the transpose of x.
EXAMPLES::
sage: M = MatrixSpace(QQ,3,3)
sage: A = M([1,2,3,4,5,6,7,8,9])
sage: transpose(A)
[1 4 7]
[2 5 8]
[3 6 9]
"""
return x.transpose()
## def vector(x, R):
## r"""
## Return the \sage vector over $R$ obtained from x, if possible.
## """
## try:
## return x._vector_(R)
## except AttributeError:
## import sage.modules.free_module_element
## return sage.modules.free_module_element.Vector(x, R)
def zero(R):
"""
Returns the zero element of the ring R.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: zero(R) in R
True
sage: zero(R)*x == zero(R)
True
"""
return R(0)
#################################################################
# Generic parent
#################################################################
def parent(x):
"""
Returns x.parent() if defined, or type(x) if not.
EXAMPLE::
sage: Z = parent(int(5))
sage: Z(17)
17
sage: Z
<type 'int'>
"""
try:
return x.parent()
except AttributeError:
return type(x)
| 24.446785 | 133 | 0.548796 |
acef1fbc4bb5faf5b9c94009e6471bb8d278d3de | 252 | py | Python | packs/vadc/actions/vtm_del_persistence.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | packs/vadc/actions/vtm_del_persistence.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | packs/vadc/actions/vtm_del_persistence.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | #! /usr/bin/python
from st2actions.runners.pythonrunner import Action
from lib.vadc import Vtm
class VtmDelPersistence(Action):
def run(self, vtm, name):
vtm = Vtm(self.config, self.logger, vtm)
vtm.delSessionPersistence(name)
| 19.384615 | 50 | 0.710317 |
acef21cdf7f8e73d915aeb048ed733c68dceae99 | 3,041 | py | Python | signalpy/app.py | Ksengine/SignalPy | bca374def747241263e7cb67abc10f3a42334b63 | [
"MIT"
] | 6 | 2020-07-26T09:18:43.000Z | 2021-12-29T14:54:34.000Z | signalpy/app.py | Ksengine/SignalPy | bca374def747241263e7cb67abc10f3a42334b63 | [
"MIT"
] | 2 | 2020-10-18T03:36:44.000Z | 2020-10-31T15:30:32.000Z | signalpy/app.py | Ksengine/SignalPy | bca374def747241263e7cb67abc10f3a42334b63 | [
"MIT"
] | 1 | 2020-10-16T20:00:44.000Z | 2020-10-16T20:00:44.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from .data import data
import logging
from sys import version_info
import threading
if version_info.major > 2:
unicode = str
else:
unicode = unicode
logger = logging.getLogger(__name__)
class WSGIApp(object):
def __init__(self):
self.routes = {}
self.data = data
def __call__(self, environ, start_response):
""" Each instance of class is a WSGI application. """
return self.wsgi(environ, start_response)
def wsgi(self, environ, start_response):
""" The WSGI-interface. """
self.environ = environ
for route in self.routes:
if route.endswith('*') and environ.get('PATH_INFO'
).startswith(route[:-1]):
try:
r = self.bytes(self.routes.get(route)(environ,
start_response))
except Exception as e:
return self.bytes(self.ERROR(environ,
start_response, e))
return r
if environ.get('PATH_INFO') == route:
try:
r = self.bytes(self.routes.get(route)(environ,
start_response))
except Exception as e:
return self.bytes(self.ERROR(environ,
start_response, e))
return r
return self.bytes(self.NOT_FOUND(environ, start_response))
def bytes(self, out):
if not out:
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list)) and isinstance(out[0],
(bytes, unicode)):
out = (out[0])[0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode()
# Byte Strings are just returned
if isinstance(out, bytes):
return [out]
def NOT_FOUND(self, environ, start_response):
err = data.NOT_FOUND
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('404 NOT FOUND', headers)
return [err]
def ERROR(
self,
environ,
start_response,
E,
):
logger.warn(' error : ' + str(E))
err = '</br><p>SignalPy Error : ' + str(E) + '</p>'
headers = [('Content-Type', 'text/html; charset=UTF-8')]
try:
start_response('500 INTERNAL SERVER ERROR', headers)
except:
err += \
'<p>cannot send 500 error.because above error happened after sending status</p>'
return [data.ERROR(err)]
def route(self, r):
def decorator(callback):
self.routes[r] = callback
return callback
return decorator
| 30.717172 | 96 | 0.501809 |
acef22117fc1ae105bd9a9d495dcb5ccf2ed4cd2 | 29,553 | py | Python | disco/extensions/upgrade_simulation/upgrades/cost_computation.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | 2 | 2022-03-11T20:04:34.000Z | 2022-03-14T22:25:29.000Z | disco/extensions/upgrade_simulation/upgrades/cost_computation.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | 4 | 2022-03-11T17:48:50.000Z | 2022-03-17T21:39:47.000Z | disco/extensions/upgrade_simulation/upgrades/cost_computation.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | null | null | null | import logging
import pandas as pd
import ast
from jade.utils.timing_utils import track_timing, Timer
from jade.utils.utils import load_data, dump_data
from disco import timer_stats_collector
logger = logging.getLogger(__name__)
# Dictionary used to convert between different length units and meters, which are used for all the calculations.
# OpenDSS can output results in any of these lengths.
LENGTH_CONVERSION_TO_METRE = {
"mi": 1609.34,
"kft": 304.8,
"km": 1000,
"ft": 0.3048,
"in": 0.0254,
"cm": 0.01,
"m": 1,
}
@track_timing(timer_stats_collector)
def compute_all_costs(
output_json_xfmr_upgrades_filepath,
output_json_line_upgrades_filepath,
output_json_voltage_upgrades_filepath,
cost_database_filepath,
thermal_cost_output_filepath,
voltage_cost_output_filepath,
total_cost_output_filepath
):
# upgrades files
# TODO add except statement for FileNotFoundError
xfmr_upgrades_df = pd.DataFrame(load_data(output_json_xfmr_upgrades_filepath))
line_upgrades_df = pd.DataFrame(load_data(output_json_line_upgrades_filepath))
voltage_upgrades_df = pd.DataFrame(load_data(output_json_voltage_upgrades_filepath))
# unit cost database files
xfmr_cost_database = pd.read_excel(cost_database_filepath, "transformers")
line_cost_database = pd.read_excel(cost_database_filepath, "lines")
controls_cost_database = pd.read_excel(cost_database_filepath, "control_changes")
voltage_regulators_cost_database = pd.read_excel(
cost_database_filepath, "voltage_regulators"
)
misc_database = pd.read_excel(cost_database_filepath, "misc")
output_columns = ["type", "count", "total_cost_usd", "comment", "equipment_parameters"]
# reformat data
if not xfmr_upgrades_df.empty:
xfmr_upgrades_df, xfmr_cost_database = reformat_xfmr_files(
xfmr_upgrades_df=xfmr_upgrades_df, xfmr_cost_database=xfmr_cost_database
)
# compute thermal upgrade costs
xfmr_cost_df = compute_transformer_costs(
xfmr_upgrades_df=xfmr_upgrades_df,
xfmr_cost_database=xfmr_cost_database,
misc_database=misc_database,
)
else:
xfmr_cost_df = pd.DataFrame(columns=output_columns).astype({"count": int, "total_cost_usd": float})
if not line_upgrades_df.empty:
line_upgrades_df, line_cost_database = reformat_line_files(
line_upgrades_df=line_upgrades_df, line_cost_database=line_cost_database
)
line_cost_df = compute_line_costs(
line_upgrades_df=line_upgrades_df, line_cost_database=line_cost_database
)
else:
line_cost_df = pd.DataFrame(columns=output_columns).astype({"count": int, "total_cost_usd": float})
thermal_cost_df = xfmr_cost_df.append(line_cost_df)
if not voltage_upgrades_df.empty:
# compute voltage upgrade costs
cap_cost_df = compute_capcontrol_cost(voltage_upgrades_df=voltage_upgrades_df,
controls_cost_database=controls_cost_database)
reg_cost_df = compute_voltage_regcontrol_cost(voltage_upgrades_df=voltage_upgrades_df,
vreg_control_cost_database=controls_cost_database,
vreg_xfmr_cost_database=voltage_regulators_cost_database, xfmr_cost_database=xfmr_cost_database)
voltage_cost_df = cap_cost_df.append(reg_cost_df)
else:
voltage_cost_df = pd.DataFrame(columns=output_columns).astype({"count": int, "total_cost_usd": float})
total_cost_df = get_total_costs(thermal_cost_df, voltage_cost_df)
# save output files
dump_data(thermal_cost_df.to_dict('records'), thermal_cost_output_filepath, indent=4)
dump_data(voltage_cost_df.to_dict('records'), voltage_cost_output_filepath, indent=4)
dump_data(total_cost_df.to_dict('records'), total_cost_output_filepath, indent=4)
def compute_transformer_costs(xfmr_upgrades_df, xfmr_cost_database, **kwargs):
"""This function computes the transformer costs.
-Unit equipment cost for new(parallel) and "upgrade" transformers are the same in the database.
The difference would be the fixed costs added (if present in misc_database)
-For transformers that are of ActionType "upgrade":
transformers of old rating are removed, and that of upgraded rating is added.
-For transformers that are of ActionType "new(parallel)":
A new transformer is placed in parallel with the existing transformer
-These are the properties considered while choosing unit cost:
["rated_kVA", "phases", "primary_kV", "secondary_kV", "primary_connection_type", "secondary_connection_type",
"num_windings"]
-For a given transformer, if unit cost pertaining to these properties is not available,
then the closest "rated_kVA" unit cost is chosen.
User can decide if they want to choose backup based on another property.
Parameters
----------
xfmr_upgrades_df
xfmr_cost_database
kwargs
Returns
-------
"""
output_cost_field = "total_cost_usd"
output_count_field = "count"
deciding_columns = ["rated_kVA", "phases", "primary_kV", "secondary_kV", "primary_connection_type",
"secondary_connection_type", "num_windings"]
output_columns_list = ["type", output_count_field, output_cost_field, "comment", "equipment_parameters"]
backup_deciding_property = kwargs.get("backup_deciding_property", "rated_kVA")
misc_database = kwargs.get("misc_database", None)
# choose which properties are to be saved
upgrade_type_list = ["upgrade", "new (parallel)"]
added_xfmr_df = xfmr_upgrades_df.loc[(xfmr_upgrades_df["Upgrade_Type"].isin(upgrade_type_list)) & (xfmr_upgrades_df["Action"] == "add")]
computed_cost = []
for index, row in added_xfmr_df.iterrows():
unit_cost = xfmr_cost_database.loc[(xfmr_cost_database["rated_kVA"] == row["rated_kVA"]) &
(xfmr_cost_database["phases"] == row["phases"]) &
(xfmr_cost_database["primary_kV"] == row["primary_kV"]) &
(xfmr_cost_database["secondary_kV"] == row["secondary_kV"]) &
(xfmr_cost_database["primary_connection_type"] == row["primary_connection_type"]) &
(xfmr_cost_database["secondary_connection_type"] == row["secondary_connection_type"]) &
(xfmr_cost_database["num_windings"] == row["num_windings"])
]["cost"]
params_dict = dict(row[['final_equipment_name'] + deciding_columns])
row["equipment_parameters"] = params_dict
row["type"] = "Transformer"
if len(unit_cost) > 0: # if there are more than one rows, the first one is chosen in random
unit_cost = unit_cost.values[0]
row[output_cost_field] = unit_cost
row["comment"] = ""
row[output_count_field] = 1
else: # if costs are not present for this transformer, then choose closest rated_kVA
# (or whatever backup deciding property is passed) (ignore other properties)
closest = xfmr_cost_database.loc[abs(xfmr_cost_database[backup_deciding_property] -
row[backup_deciding_property]).idxmin()]
row[output_cost_field] = closest["cost"]
row[output_count_field] = 1
comment_string = f"Transformer {row['final_equipment_name']}: Exact cost not available. " \
f"Unit cost for transformer with these parameters used " \
f"(based on closest {backup_deciding_property}: {dict(closest)}"
logger.debug(comment_string)
row["comment"] = comment_string
# add transformer fixed costs, if given in database. (depending on upgrade type)
if (misc_database is not None) and (not misc_database.empty):
misc_xfmr_fields = {"replace": "Replace transformer (fixed cost)",
"new": "Add new transformer (fixed cost)"}
# if equipment is upgraded, and misc database contains fixed cost for replacing xfmr
if (row["Upgrade_Type"].lower() == "upgrade") and \
((misc_database["Description"] == misc_xfmr_fields["replace"]).any()):
field_name = misc_xfmr_fields["replace"]
fixed_cost = misc_database.loc[misc_database["Description"] == field_name]
# if equipment is new, and misc database contains fixed cost for adding new xfmr
elif row["Upgrade_Type"].lower() == "new (parallel)" and \
((misc_database["Description"] == misc_xfmr_fields["new"]).any()):
field_name = misc_xfmr_fields["new"]
fixed_cost = misc_database.loc[misc_database["Description"] == field_name]
else:
fixed_cost = pd.DataFrame()
if not fixed_cost.empty:
row[output_cost_field] += misc_database.loc[misc_database["Description"]
== field_name]["total_cost"].values[0]
computed_cost.append(row[output_columns_list])
xfmr_cost_df = pd.DataFrame(computed_cost)
return xfmr_cost_df
def reformat_xfmr_files(xfmr_upgrades_df, xfmr_cost_database):
"""This function renames, reformats transformer upgrades dataframe to match cost database columns
Parameters
----------
xfmr_upgrades_df
xfmr_cost_database
Returns
-------
"""
xfmr_upgrades_df.rename(columns={"kVA": "rated_kVA", "windings": "num_windings"}, inplace=True)
xfmr_upgrades_df["rated_kVA"] = xfmr_upgrades_df["rated_kVA"].astype(float)
xfmr_upgrades_df["num_windings"] = xfmr_upgrades_df["num_windings"].astype(int)
xfmr_upgrades_df["phases"] = xfmr_upgrades_df["phases"].astype(int)
xfmr_upgrades_df["primary_kV"] = xfmr_upgrades_df["kVs"].str[0].astype(float)
xfmr_upgrades_df["secondary_kV"] = xfmr_upgrades_df["kVs"].str[-1].astype(float)
xfmr_upgrades_df["primary_connection_type"] = xfmr_upgrades_df["conns"].str[0]
xfmr_upgrades_df["secondary_connection_type"] = xfmr_upgrades_df["conns"].str[-1]
xfmr_cost_database["rated_kVA"] = xfmr_cost_database["rated_kVA"].astype(float)
xfmr_cost_database["num_windings"] = xfmr_cost_database["num_windings"].astype(int)
xfmr_cost_database["phases"] = xfmr_cost_database["phases"].astype(int)
xfmr_cost_database["primary_kV"] = xfmr_cost_database["primary_kV"].astype(float)
xfmr_cost_database["secondary_kV"] = xfmr_cost_database["secondary_kV"].astype(float)
xfmr_cost_database["cost"] = xfmr_cost_database["cost"].astype(float)
return xfmr_upgrades_df, xfmr_cost_database
def reformat_xfmr_upgrades_file(xfmr_upgrades_df):
xfmr_upgrades_df.rename(columns={"kVA": "rated_kVA", "windings": "num_windings"}, inplace=True)
xfmr_upgrades_df["rated_kVA"] = xfmr_upgrades_df["rated_kVA"].astype(float)
xfmr_upgrades_df["num_windings"] = xfmr_upgrades_df["num_windings"].astype(int)
xfmr_upgrades_df["phases"] = xfmr_upgrades_df["phases"].astype(int)
xfmr_upgrades_df["primary_kV"] = xfmr_upgrades_df["kVs"].str[0].astype(float)
xfmr_upgrades_df["secondary_kV"] = xfmr_upgrades_df["kVs"].str[-1].astype(float)
xfmr_upgrades_df["conns"] = xfmr_upgrades_df["conns"].apply(ast.literal_eval)
xfmr_upgrades_df["primary_connection_type"] = xfmr_upgrades_df["conns"].str[0]
xfmr_upgrades_df["secondary_connection_type"] = xfmr_upgrades_df["conns"].str[-1]
return xfmr_upgrades_df
def compute_line_costs(line_upgrades_df, line_cost_database, **kwargs):
"""This function computes the line costs.
-Unit equipment cost for new(parallel) and "upgrade" line are the not same in the database.
There are different costs given for reconductored and new lines
-For lines that are of ActionType "upgrade": "reconductored" line unit costs need to be used
-For lines that are of ActionType "new (parallel)": "new" line unit costs need to be used
-Upgraded lines and new lines run along existing circuit, so length is the same for both
-These are the properties considered while choosing unit cost:
["phases", "voltage_kV", "ampere_rating", "line_placement", "Description" (i.e. whether new or reconductored)]
-For a given line, if unit cost pertaining to these properties is not available,
then the closest "ampere_rating" unit cost is chosen.
User can decide if they want to choose backup based on another property.
For 3 phase lines, voltage_kV should be LN voltage (# TODO check if this is correct)
Parameters
----------
line_upgrades_df
line_cost_database
kwargs
Returns
-------
"""
output_cost_field = "total_cost_usd"
output_count_field = "count"
deciding_columns = ["phases", "voltage_kV", "ampere_rating", "line_placement", "Description"]
output_columns_list = ["type", output_count_field, output_cost_field, "comment", "equipment_parameters"]
backup_deciding_property = kwargs.get("backup_deciding_property", "ampere_rating")
# choose which properties are to be saved
upgrade_type_list = ["upgrade", "new (parallel)"]
added_line_df = line_upgrades_df.loc[(line_upgrades_df["Upgrade_Type"].isin(upgrade_type_list)) & (line_upgrades_df["Action"] == "add")]
computed_cost = []
for index, row in added_line_df.iterrows():
if row["Upgrade_Type"] == "upgrade":
description = "reconductored_line"
elif row["Upgrade_Type"] == "new (parallel)":
description = "new_line"
else:
# if anything else, by default, use new_line prices
description = "new_line"
row["Description"] = description
unit_cost = line_cost_database.loc[(line_cost_database["phases"] == row["phases"]) &
(line_cost_database["voltage_kV"] == row["voltage_kV"]) &
(line_cost_database["ampere_rating"] == row["ampere_rating"]) &
(line_cost_database["line_placement"] == row["line_placement"]) &
(line_cost_database["Description"] == description)
]["cost_per_m"]
# convert line length to metres
line_length_m = row["length"] * LENGTH_CONVERSION_TO_METRE[row["units"]]
params_dict = dict(row[['final_equipment_name'] + deciding_columns])
row["equipment_parameters"] = params_dict
row["type"] = "Line"
if len(unit_cost) > 0: # if there are more than one rows, the first one is chosen in random
unit_cost = unit_cost.values[0]
row[output_cost_field] = unit_cost * line_length_m
row[output_count_field] = 1
row["comment"] = ""
else: # if costs are not present for this transformer, then choose closest ampere_rating
# (or whatever backup deciding property is passed) (ignore other properties)
closest = line_cost_database.loc[abs(line_cost_database[backup_deciding_property] -
row[backup_deciding_property]).idxmin()]
row[output_cost_field] = closest["cost_per_m"] * line_length_m
comment_string = f"Line {row['final_equipment_name']}: Exact cost not available. " \
f"Unit cost for line with these parameters used " \
f"(based on closest {backup_deciding_property}: {dict(closest)}"
logger.debug(comment_string)
row["comment"] = comment_string
row[output_count_field] = 1
computed_cost.append(row[output_columns_list])
line_cost_df = pd.DataFrame(computed_cost)
return line_cost_df
def reformat_line_files(line_upgrades_df, line_cost_database):
"""This function renames, reformats line upgrades dataframe to match cost database columns
Parameters
----------
line_upgrades_df
line_cost_database
Returns
-------
"""
line_upgrades_df.rename(columns={"normamps": "ampere_rating", "kV": "voltage_kV"}, inplace=True)
line_upgrades_df["ampere_rating"] = line_upgrades_df["ampere_rating"].astype(float).round(2)
line_upgrades_df["phases"] = line_upgrades_df["phases"].astype(int)
line_upgrades_df["voltage_kV"] = line_upgrades_df["voltage_kV"].astype(float).round(2)
# assign original equipment length to new equipment
line_upgrades_df["length"] = line_upgrades_df.groupby("original_equipment_name")["length"].transform("first")
line_cost_database["ampere_rating"] = line_cost_database["ampere_rating"].astype(float).round(2)
line_cost_database["phases"] = line_cost_database["phases"].astype(int)
line_cost_database["voltage_kV"] = line_cost_database["voltage_kV"].astype(float).round(2)
line_cost_database["cost_per_m"] = line_cost_database["cost_per_m"].astype(float)
return line_upgrades_df, line_cost_database
def compute_capcontrol_cost(voltage_upgrades_df, controls_cost_database, keyword="Capacitor"):
"""This function computes the capacitor controller related costs.
Note we currently are not adding new capacitors to integrate PV.
Considered here: new controllers, control setting changes
Parameters
----------
voltage_upgrades_df
controls_cost_database
Returns
-------
"""
output_cost_field = "total_cost_usd"
output_count_field = "count"
output_columns_list = ["type", output_count_field, output_cost_field, "comment"]
output_rows = ["New Capacitor controller", "Capacitor controller setting change"]
capcontrol_fields = {"add_new_cap_controller": "New controller added",
"change_cap_control": "Controller settings modified"}
cost_database_fields = {"add_new_cap_controller": "Add new capacitor controller",
"change_cap_control": "Change capacitor controller settings",
"replace_cap_controller": "Replace capacitor controller"
}
empty_cap_cost_dict = {"type": output_rows,
"count": [0] * len(output_rows), "total_cost_usd": [0] * len(output_rows)}
zero_cost_df = pd.DataFrame.from_dict(empty_cap_cost_dict)
if voltage_upgrades_df.empty: # if there are no voltage upgrades
return zero_cost_df
cap_upgrades_df = voltage_upgrades_df.loc[voltage_upgrades_df['equipment_type'].str.contains(keyword)]
if cap_upgrades_df.empty: # if there are no capacitor control upgrades
return zero_cost_df
# if there are capacitor controller upgrades
count_new_controller = cap_upgrades_df[capcontrol_fields["add_new_cap_controller"]].sum()
unit_cost_new_controller = controls_cost_database.loc[controls_cost_database["Type"]
== cost_database_fields["add_new_cap_controller"]]["cost"].values[0]
total_cost_new_controller = count_new_controller * unit_cost_new_controller
count_setting_changes = cap_upgrades_df[capcontrol_fields["change_cap_control"]].sum()
unit_cost_setting_changes = controls_cost_database.loc[controls_cost_database["Type"] ==
cost_database_fields["change_cap_control"]]["cost"].values[0]
total_cost_setting_changes = count_setting_changes * unit_cost_setting_changes
cap_cost_dict = {
"type": output_rows,
"count": [count_new_controller, count_setting_changes],
"total_cost_usd": [total_cost_new_controller, total_cost_setting_changes],
}
cap_cost_df = pd.DataFrame.from_dict(cap_cost_dict)
cap_cost_df["comment"] = ""
cap_cost_df = cap_cost_df[output_columns_list]
return cap_cost_df
def compute_voltage_regcontrol_cost(voltage_upgrades_df, vreg_control_cost_database, vreg_xfmr_cost_database, xfmr_cost_database, keyword="RegControl"):
"""This function computes the voltage regulator controller related costs.
Considered here: new voltage regulator controllers, control setting changes
Parameters
----------
voltage_upgrades_df
controls_cost_database
Returns
-------
"""
output_cost_field = "total_cost_usd"
output_count_field = "count"
output_columns_list = ["type", output_count_field, output_cost_field, "comment"]
upgrade_fields_dict = {"add_new_reg_control": "New controller added",
"change_reg_control": "Controller settings modified",
"add_new_transformer": "New transformer added",
"at_substation": "At Substation",
# "change_ltc_control": "Substation LTC settings modified",
}
cost_database_fields_dict = {"add_new_reg_control": "Add new voltage regulator controller",
"change_reg_control": "Change voltage regulator controller settings",
# "replace_reg_control": "Replace voltage regulator controller", # this is not used currently
"add_substation_ltc": "Add new LTC controller",
"change_ltc_control": "Change LTC settings",
"add_new_transformer": "Add new voltage regulator transformer"}
output_fields_dict = {"add_new_reg_control": "New in-line voltage regulator",
"change_reg_control": "In-line voltage regulator control setting change",
# "replace_reg_control": "Replace in-line voltage regulator controller", # this is not used currently
"add_substation_ltc": "New substation LTC",
"change_ltc_control": "Substation LTC setting change",
"add_new_vreg_transformer": "Transformer for voltage regulator",
"add_new_substation_transformer": "Substation transformer"}
output_rows = ["New in-line voltage regulator", "In-line voltage regulator control setting change",
"Replace in-line voltage regulator controller",
"New substation LTC", "Substation LTC setting change",
"Substation transformer", "Transformer for voltage regulator"]
control_computation_fields = ["add_new_reg_control", "change_reg_control"]
xfmr_fields = ["add_new_transformer"]
empty_reg_cost_dict = {"type": output_rows, "count": [0] * len(output_rows), "total_cost_usd": [0] * len(output_rows)}
zero_cost_df = pd.DataFrame.from_dict(empty_reg_cost_dict)
if voltage_upgrades_df.empty: # if there are no voltage upgrades
return zero_cost_df
reg_upgrades_df = voltage_upgrades_df.loc[voltage_upgrades_df['equipment_type'].str.contains(keyword)]
if reg_upgrades_df.empty: # if there are no regulator controller upgrades
return zero_cost_df
cost_list = []
# if there are regulator controller upgrades
for field in control_computation_fields:
# if at substation
at_substation_df = reg_upgrades_df.loc[reg_upgrades_df[upgrade_fields_dict["at_substation"]] == True]
if field == "add_new_reg_control":
cost_field = "add_substation_ltc"
elif field == "change_reg_control":
cost_field = "change_ltc_control"
else:
raise Exception(f"Unknown field {field} in regulator cost computation")
if not at_substation_df.empty: # if there are no regcontrols at substation
count = at_substation_df[upgrade_fields_dict[field]].sum()
unit_cost = vreg_control_cost_database.loc[vreg_control_cost_database["Type"] == cost_database_fields_dict[cost_field]]["cost"].values[0]
total_cost = count * unit_cost
else:
count = 0
total_cost = 0
cost_list.append({"type": output_fields_dict[cost_field], "count": count, "total_cost_usd": total_cost, "comment": ""})
# if not at substation
not_at_substation_df = reg_upgrades_df.loc[reg_upgrades_df[upgrade_fields_dict["at_substation"]] == False]
cost_field = field
if not not_at_substation_df.empty: # if not at substation
count = not_at_substation_df[upgrade_fields_dict[field]].sum()
unit_cost = vreg_control_cost_database.loc[vreg_control_cost_database["Type"] == cost_database_fields_dict[cost_field]]["cost"].values[0]
total_cost = count * unit_cost
else:
count = 0
total_cost = 0
cost_list.append({"type": output_fields_dict[cost_field], "count": count, "total_cost_usd": total_cost, "comment": ""})
# add costs for added transformers (needed for voltage regulators)
vreg_xfmr_cost_database = vreg_xfmr_cost_database.append(xfmr_cost_database)
for field in xfmr_fields:
cost_field = field
# if at substation
new_xfmr_added_df = reg_upgrades_df.loc[reg_upgrades_df[upgrade_fields_dict["add_new_transformer"]] == True]
for index, row in new_xfmr_added_df.iterrows():
output_row = {}
added_xfmr_details = row["Final Settings"]
added_xfmr_details = reformat_xfmr_upgrades_file(pd.DataFrame([added_xfmr_details]))
deciding_columns = ["rated_kVA", "phases", "primary_kV", "secondary_kV", "primary_connection_type",
"secondary_connection_type", "num_windings"]
params_dict = added_xfmr_details[["name"] + deciding_columns].to_dict(orient="records")[0]
added_xfmr_details = added_xfmr_details.to_dict(orient="records")[0]
output_row["equipment_parameters"] = params_dict
# reformat xfmr dict
unit_cost = vreg_xfmr_cost_database.loc[(vreg_xfmr_cost_database["rated_kVA"] == added_xfmr_details["rated_kVA"]) &
(vreg_xfmr_cost_database["primary_kV"] == added_xfmr_details["primary_kV"]) &
(vreg_xfmr_cost_database["secondary_kV"] == added_xfmr_details["secondary_kV"]) &
(vreg_xfmr_cost_database["phases"] == added_xfmr_details["phases"]) &
(vreg_xfmr_cost_database["num_windings"] == added_xfmr_details["num_windings"]) &
(vreg_xfmr_cost_database["primary_connection_type"] == added_xfmr_details["primary_connection_type"]) &
(vreg_xfmr_cost_database["secondary_connection_type"] == added_xfmr_details["secondary_connection_type"])
]["cost"]
if len(unit_cost) > 0: # if there are more than one rows, the first one is chosen in random
unit_cost = unit_cost.values[0]
output_row[output_cost_field] = unit_cost
output_row["comment"] = ""
output_row[output_count_field] = 1
else: # if costs are not present for this transformer, then choose from other xfmr database rated_kVA
backup_deciding_property = "rated_kVA"
closest = vreg_xfmr_cost_database.loc[abs(vreg_xfmr_cost_database[backup_deciding_property] - added_xfmr_details[backup_deciding_property]).idxmin()]
output_row[output_cost_field] = closest["cost"]
output_row[output_count_field] = 1
comment_string = f"Transformer {added_xfmr_details['name']}: Exact cost not available. " \
f"Unit cost for transformer with these parameters used " \
f"(based on closest {backup_deciding_property}: {dict(closest)}"
logger.debug(comment_string)
output_row["comment"] = comment_string
if row[upgrade_fields_dict["at_substation"]]:
output_row["type"] = output_fields_dict["add_new_substation_transformer"]
else:
output_row["type"] = output_fields_dict["add_new_vreg_transformer"]
cost_list.append(output_row)
reg_cost_df = pd.DataFrame(cost_list)
reg_cost_df = reg_cost_df[output_columns_list]
return reg_cost_df
def get_total_costs(thermal_cost_df, voltage_cost_df):
"""This function combines voltage and thermal upgrades costs into one file.
"""
total_cost_df = thermal_cost_df.append(voltage_cost_df)
total_cost_df = total_cost_df.groupby('type').sum()
total_cost_df.reset_index(inplace=True)
return total_cost_df
if __name__ == "__main__":
compute_all_costs()
| 55.655367 | 166 | 0.657361 |
acef23d4deec41f283120a37d65827bb5265da34 | 14,088 | py | Python | eval.py | ok1zjf/LBAE | a6c29c4542b8eb719ba65cbffd5cff0c8236a7a3 | [
"MIT"
] | 15 | 2020-07-17T09:11:15.000Z | 2021-12-09T06:20:47.000Z | eval.py | ok1zjf/LBAE | a6c29c4542b8eb719ba65cbffd5cff0c8236a7a3 | [
"MIT"
] | 2 | 2021-02-18T15:46:14.000Z | 2021-06-28T01:08:22.000Z | eval.py | ok1zjf/LBAE | a6c29c4542b8eb719ba65cbffd5cff0c8236a7a3 | [
"MIT"
] | 1 | 2021-06-21T06:03:08.000Z | 2021-06-21T06:03:08.000Z | from __future__ import absolute_import, division, print_function
__author__ = 'Jiri Fajtl'
__email__ = 'ok1zjf@gmail.com'
__version__= '1.8'
__status__ = "Research"
__date__ = "2/1/2020"
__license__= "MIT License"
import random
import warnings
warnings.filterwarnings('ignore')
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
import glob
import numpy as np
import fid
import imageio
import tensorflow as tf
from tqdm import tqdm
from sklearn.metrics.pairwise import polynomial_kernel
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from image_utils import psnr, ssim
rnd_seed = 12345
random.seed(rnd_seed)
np.random.seed(rnd_seed)
tf.compat.v2.random.set_seed(rnd_seed)
tf.random.set_random_seed(rnd_seed)
BATCH_SIZE = 100
def eval_init():
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
# config = ConfigProto(device_count={'GPU': 1})
config = ConfigProto()
# config = ConfigProto(allow_soft_placement=True, log_device_placement=True)
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
def polynomial_mmd_averages(codes_g, codes_r, n_subsets=50, subset_size=1000,
ret_var=True, output=sys.stdout, **kernel_args):
m = min(codes_g.shape[0], codes_r.shape[0])
mmds = np.zeros(n_subsets)
if ret_var:
vars = np.zeros(n_subsets)
choice = np.random.choice
with tqdm(range(n_subsets), desc='MMD', file=output) as bar:
for i in bar:
g = codes_g[choice(len(codes_g), subset_size, replace=False)]
r = codes_r[choice(len(codes_r), subset_size, replace=False)]
o = polynomial_mmd(g, r, **kernel_args, var_at_m=m, ret_var=ret_var)
if ret_var:
mmds[i], vars[i] = o
else:
mmds[i] = o
bar.set_postfix({'mean': mmds[:i+1].mean()})
return (mmds, vars) if ret_var else mmds
def polynomial_mmd(codes_g, codes_r, degree=3, gamma=None, coef0=1,
var_at_m=None, ret_var=True):
# use k(x, y) = (gamma <x, y> + coef0)^degree
# default gamma is 1 / dim
X = codes_g
Y = codes_r
K_XX = polynomial_kernel(X, degree=degree, gamma=gamma, coef0=coef0)
K_YY = polynomial_kernel(Y, degree=degree, gamma=gamma, coef0=coef0)
K_XY = polynomial_kernel(X, Y, degree=degree, gamma=gamma, coef0=coef0)
return _mmd2_and_variance(K_XX, K_XY, K_YY,
var_at_m=var_at_m, ret_var=ret_var)
def _sqn(arr):
flat = np.ravel(arr)
return flat.dot(flat)
def _mmd2_and_variance(K_XX, K_XY, K_YY, unit_diagonal=False,
mmd_est='unbiased', block_size=1024,
var_at_m=None, ret_var=True):
# based on
# https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/mmd.py
# but changed to not compute the full kernel matrix at once
m = K_XX.shape[0]
assert K_XX.shape == (m, m)
assert K_XY.shape == (m, m)
assert K_YY.shape == (m, m)
if var_at_m is None:
var_at_m = m
# Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if unit_diagonal:
diag_X = diag_Y = 1
sum_diag_X = sum_diag_Y = m
sum_diag2_X = sum_diag2_Y = m
else:
diag_X = np.diagonal(K_XX)
diag_Y = np.diagonal(K_YY)
sum_diag_X = diag_X.sum()
sum_diag_Y = diag_Y.sum()
sum_diag2_X = _sqn(diag_X)
sum_diag2_Y = _sqn(diag_Y)
Kt_XX_sums = K_XX.sum(axis=1) - diag_X
Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
K_XY_sums_0 = K_XY.sum(axis=0)
K_XY_sums_1 = K_XY.sum(axis=1)
Kt_XX_sum = Kt_XX_sums.sum()
Kt_YY_sum = Kt_YY_sums.sum()
K_XY_sum = K_XY_sums_0.sum()
if mmd_est == 'biased':
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
assert mmd_est in {'unbiased', 'u-statistic'}
mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m-1))
if mmd_est == 'unbiased':
mmd2 -= 2 * K_XY_sum / (m * m)
else:
mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m-1))
if not ret_var:
return mmd2
Kt_XX_2_sum = _sqn(K_XX) - sum_diag2_X
Kt_YY_2_sum = _sqn(K_YY) - sum_diag2_Y
K_XY_2_sum = _sqn(K_XY)
dot_XX_XY = Kt_XX_sums.dot(K_XY_sums_1)
dot_YY_YX = Kt_YY_sums.dot(K_XY_sums_0)
m1 = m - 1
m2 = m - 2
zeta1_est = (
1 / (m * m1 * m2) * (
_sqn(Kt_XX_sums) - Kt_XX_2_sum + _sqn(Kt_YY_sums) - Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 1 / (m * m * m1) * (
_sqn(K_XY_sums_1) + _sqn(K_XY_sums_0) - 2 * K_XY_2_sum)
- 2 / m**4 * K_XY_sum**2
- 2 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 2 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
zeta2_est = (
1 / (m * m1) * (Kt_XX_2_sum + Kt_YY_2_sum)
- 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 2 / (m * m) * K_XY_2_sum
- 2 / m**4 * K_XY_sum**2
- 4 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
+ 4 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
)
var_est = (4 * (var_at_m - 2) / (var_at_m * (var_at_m - 1)) * zeta1_est
+ 2 / (var_at_m * (var_at_m - 1)) * zeta2_est)
return mmd2, var_est
def precalc(data_path, output_path):
print("CALCULATING THE GT STATS....")
# data_path = 'reconstructed_test/eval' # set path to training set images
# output_path = data_path+'/fid_stats.npz' # path for where to store the statistics
# if you have downloaded and extracted
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
# set this path to the directory where the extracted files are, otherwise
# just set it to None and the script will later download the files for you
inception_path = None
print("check for inception model..", end=" ", flush=True)
inception_path = fid.check_or_download_inception(inception_path) # download inception if necessary
print("ok")
# loads all images into memory (this might require a lot of RAM!)
print("load images..", end=" " , flush=True)
image_list = glob.glob(os.path.join(data_path, '*.jpg'))
if len(image_list) == 0:
print("No images in directory ", data_path)
return
images = np.array([imageio.imread(str(fn),as_gray=False, pilmode="RGB").astype(np.float32) for fn in image_list])
print("%d images found and loaded" % len(images))
print("create inception graph..", end=" ", flush=True)
fid.create_inception_graph(inception_path) # load the graph into the current TF graph
print("ok")
print("calculte FID stats..", end=" ", flush=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mu, sigma, acts = fid.calculate_activation_statistics(images, sess, batch_size=BATCH_SIZE)
np.savez_compressed(output_path, mu=mu, sigma=sigma, activations=acts)
print("finished")
def fid_imgs(cfg):
print("CALCULATING FID/KID scores")
rnd_seed = 12345
random.seed(rnd_seed)
np.random.seed(rnd_seed)
tf.compat.v2.random.set_seed(rnd_seed)
tf.random.set_random_seed(rnd_seed)
inception_path = fid.check_or_download_inception(None) # download inception network
# load precalculated training set statistics
print("Loading stats from:", cfg.stats_filename, ' ...', end='')
f = np.load(cfg.stats_filename)
mu_real, sigma_real = f['mu'][:], f['sigma'][:]
activations_ref = None
if 'activations' in f:
activations_ref = f['activations']
print(" reference activations #:", activations_ref.shape[0])
f.close()
print("done")
fid_epoch = 0
epoch_info_file = cfg.exp_path+'/fid-epoch.txt'
if os.path.isfile(epoch_info_file):
fid_epoch = open(epoch_info_file, 'rt').read()
else:
print("ERROR: couldnot find file:", epoch_info_file)
best_fid_file = cfg.exp_path+'/fid-best.txt'
best_fid = 1e10
if os.path.isfile(best_fid_file):
best_fid = float(open(best_fid_file, 'rt').read())
print("Best FID: "+str(best_fid))
pr = None
pr_file = cfg.exp_path+'/pr.txt'
if os.path.isfile(pr_file):
pr = open(pr_file).read()
print("PR: "+str(pr))
rec = []
rec.append(fid_epoch)
rec.append('nref:'+str(activations_ref.shape[0]))
fid.create_inception_graph(inception_path) # load the graph into the current TF graph
dirs = cfg.image_path.split(',')
first_fid = None
for dir in dirs:
print("Working on:",dir)
test_name = dir.split('/')[-1]
rec.append(test_name)
# loads all images into memory (this might require a lot of RAM!)
image_list = glob.glob(os.path.join(dir, '*.jpg'))
image_list = image_list + glob.glob(os.path.join(dir, '*.png'))
image_list.sort()
print("Loading images:", len(image_list), ' ...', end='')
images = np.array([imageio.imread(str(fn),as_gray=False, pilmode="RGB").astype(np.float32) for fn in image_list])
print("done")
print("Extracting features ", end='')
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
mu_gen, sigma_gen, activations = fid.calculate_activation_statistics(images, sess, batch_size=BATCH_SIZE)
print("Extracted activations:", activations.shape[0])
rec.append('ntest:'+str(activations.shape[0]))
if cfg.fid:
# Calculate FID
print("Calculating FID.....")
fid_value = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
rec.append('fid:'+str(fid_value))
if first_fid is None:
first_fid = fid_value
if best_fid > first_fid and fid_epoch != 0:
epoch = int(fid_epoch.split(' ')[0].split(':')[1])
print("Storing best FID model. Epoch: "+str(epoch)+" Current FID: "+str(best_fid)+" new: "+str(first_fid))
best_fid = first_fid
# Store best fid & weights
with open(best_fid_file, 'wt') as f:
f.write(str(first_fid))
model_file = cfg.exp_path+'/models/weights-'+str(epoch)+'.cp'
backup_model_file = cfg.exp_path+'/models/'+str(epoch)+'.cp'
os.system('cp ' + model_file + ' '+ backup_model_file)
if cfg.kid:
# Calculate KID
# Parameters:
print("Calculating KID...")
mmd_degree=3
mmd_gamma=None
mmd_coef0=1
mmd_var = False
mmd_subsets=100
mmd_subset_size=1000
ret = polynomial_mmd_averages(
activations, activations_ref, degree=mmd_degree, gamma=mmd_gamma,
coef0=mmd_coef0, ret_var=mmd_var,
n_subsets=mmd_subsets, subset_size=mmd_subset_size)
if mmd_var:
mmd2s, vars = ret
else:
mmd2s = ret
kid_value = mmd2s.mean()
kid_value_std = mmd2s.std()
rec.append('kid_mean:'+str(kid_value))
rec.append('kid_std:'+str(kid_value_std))
if cfg.psnr and test_name == 'reco':
image_list = glob.glob(os.path.join(cfg.stats_path, '*.jpg'))
image_list.sort()
if len(image_list) == 0:
print("No images in directory ", cfg.stats_path)
return
images_gt = np.array([imageio.imread(str(fn),as_gray=False, pilmode="RGB").astype(np.float32) for fn in image_list])
print("%d images found and loaded" % len(images_gt))
print("Calculating PSNR...")
psnr_val = psnr(images_gt, images)
print("Calculating SSIM...")
ssim_val = ssim(images_gt, images)
print('PSNR:', psnr_val, 'SSIM:', ssim_val)
rec.append('psnr:'+str(psnr_val))
rec.append('ssim:'+str(ssim_val))
print(' '.join(rec))
if pr is not None:
rec.append(pr)
print(' '.join(rec))
# Write out results
with open(cfg.exp_path+'/results.txt', 'a+') as f:
f.write(' '.join(rec)+'\n')
return first_fid
class EvalConfig:
def __init__(self):
return
#------------------------------------------------------------------
if __name__ == "__main__":
image_path = 'generated/samples/' # set path to some generated images
stats_path = 'reconstructed_test/eval' # set path to training set images
stats_path = None
for arg in sys.argv:
toks = arg.split('=')
if toks[0] =='s':
stats_path=toks[1]
if toks[0] =='t':
image_path=toks[1]
if stats_path is None:
# Print help
print("Help")
print(sys.argv[0], " s=<path_to_GT_images> t=<path_to_test_images>")
print("\t File fid_stats.npz with the GT stats will be created in the dir with GT iamges")
sys.exit(0)
print('stats_path:', stats_path)
print('image_path:', image_path)
stats_filename = stats_path+'/fid_stats.npz'
cfg = EvalConfig()
cfg.exp_path = '.'
cfg.image_path = image_path
cfg.stats_path = stats_path
cfg.stats_filename = stats_filename
cfg.kid = True
cfg.fid = True
cfg.psnr = False
eval_init()
# eval_init()
# eval_init()
if not os.path.isfile(stats_filename):
precalc(stats_path, stats_filename)
# sys.exit(0)
fid_imgs(cfg)
print("==============================================================")
| 34.957816 | 128 | 0.602712 |
acef24261df7fa83f655069ca0ebe2b9ecc1034c | 1,223 | py | Python | app/views.py | austinkaruru/News-highlights | 7da1ab32ec21ce6383c22323aaf74c1c1e0f6a7c | [
"MIT"
] | null | null | null | app/views.py | austinkaruru/News-highlights | 7da1ab32ec21ce6383c22323aaf74c1c1e0f6a7c | [
"MIT"
] | null | null | null | app/views.py | austinkaruru/News-highlights | 7da1ab32ec21ce6383c22323aaf74c1c1e0f6a7c | [
"MIT"
] | null | null | null | # from flask import render_template
# from app import app
# from flask import render_template
# from . requests import get_news, get_news1
# @app.route('/news/<int:news_id>')
# def news(news_id):
# return render_template('news.html', id=news_id)
# @app.route('/')
# def index():
# '''
# View root page function that returns the index page and its data
# '''
# # category = get_news('business')
# # print(category)
# # general = get_news1('sports')
# title = 'Home - Welcome to News highlights 2!!'
# message = 'Ola Amigo'
# return render_template('index.html', message=message, title=title)
# from . requests import get_news, get_news1
# # @app.route('/news/<int:news_id>')
# # def news(news_id):
# # return render_template('news.html', id=news_id)
# # @app.route('/')
# # def index():
# # '''
# # View root page function that returns the index page and its data
# # '''
# # # category = get_news('business')
# # # print(category)
# # general = get_news('general')
# # title = 'Home - Welcome to News highlights 2!!'
# # message = 'Ola Amigo'
# # return render_template('index.html', message=message, title=title, general=general)
| 26.586957 | 91 | 0.627964 |
acef24bc54d356a6277774770d6444c3051c0cbc | 105 | py | Python | 6 kyu/Easy Diagonal.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | 6 kyu/Easy Diagonal.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | 6 kyu/Easy Diagonal.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | from math import factorial
def diagonal(n, p):
return factorial(n+1)//(factorial(p+1)*factorial(n-p)) | 35 | 58 | 0.714286 |
acef24ebfe42ca2312d3b2297fa82633e84c2a70 | 1,874 | py | Python | tests/functional/services/user/user_service/settings.py | arareko/pysoa | a90e428558500cf692f7f6e33fd358dd2779c328 | [
"Apache-2.0"
] | 91 | 2017-05-08T22:41:33.000Z | 2022-02-09T11:37:07.000Z | tests/functional/services/user/user_service/settings.py | arareko/pysoa | a90e428558500cf692f7f6e33fd358dd2779c328 | [
"Apache-2.0"
] | 63 | 2017-06-14T20:08:49.000Z | 2021-06-16T23:08:25.000Z | tests/functional/services/user/user_service/settings.py | arareko/pysoa | a90e428558500cf692f7f6e33fd358dd2779c328 | [
"Apache-2.0"
] | 26 | 2017-10-13T23:23:13.000Z | 2022-01-11T16:58:17.000Z | from pysoa.common.transport.redis_gateway.constants import REDIS_BACKEND_TYPE_SENTINEL
SECRET_KEY = 'aou8a1ud34pa8ofe4c8tce6geo78hu8o89hu8'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'mysql.pysoa',
'USER': 'root',
'PASSWORD': 'functionalTestPassword',
'NAME': 'user_service',
'OPTIONS': {
'charset': 'utf8mb4',
},
'CONN_MAX_AGE': 2,
},
}
CACHES = {
'request': {
'BACKEND': 'pysoa.server.django.cache.PySOARequestScopedMemoryCache',
'LOCATION': 'cerberus-request',
},
'process': {
'BACKEND': 'pysoa.server.django.cache.PySOAProcessScopedMemoryCache',
'LOCATION': 'cerberus-process',
},
'persistent': {
'BACKEND': 'pysoa.server.django.cache.PySOAProcessScopedMemoryCache',
'LOCATION': 'cerberus-persistent',
},
}
CACHES['default'] = CACHES['request']
SOA_SERVER_SETTINGS = {
'heartbeat_file': '/srv/user_service-{{fid}}.heartbeat',
'middleware': [], # TODO
'transport': {
'path': 'pysoa.common.transport.redis_gateway.server:RedisServerTransport',
'kwargs': {
'backend_layer_kwargs': {
'hosts': [
('sentinel1.redis6.pysoa', 26379),
('sentinel2.redis6.pysoa', 26379),
('sentinel3.redis6.pysoa', 26379),
],
'connection_kwargs': {
'username': 'user_service',
'password': 'rnE8W86nr8y362pQF2nc7RKS4UkBL8Yt',
'ssl_ca_certs': '/srv/run/tls/ca.crt',
'ssl_certfile': '/srv/run/tls/redis.crt',
'ssl_keyfile': '/srv/run/tls/redis.key',
},
},
'backend_type': REDIS_BACKEND_TYPE_SENTINEL,
},
},
}
| 31.762712 | 86 | 0.546425 |
acef27aac9126283fbc595c9874d362dd1671684 | 3,712 | py | Python | code/ngram.py | edrebin/NLP-Course | 004af059e4a48b9086dc122d32c864799f1f16f1 | [
"Apache-2.0"
] | 9 | 2021-08-29T15:23:09.000Z | 2022-01-09T20:13:39.000Z | code/ngram.py | edrebin/NLP-Course | 004af059e4a48b9086dc122d32c864799f1f16f1 | [
"Apache-2.0"
] | 1 | 2022-03-08T11:05:29.000Z | 2022-03-08T13:48:32.000Z | code/ngram.py | edrebin/NLP-Course | 004af059e4a48b9086dc122d32c864799f1f16f1 | [
"Apache-2.0"
] | 2 | 2021-12-12T22:11:44.000Z | 2022-01-26T03:55:56.000Z | # Copyright 2021 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from text_models.dataset import TokenCount
from glob import glob
from collections import Counter
from typing import Iterable
class Read(object):
def __init__(self, fnames: list, n_gram: int=2) -> None:
np.random.seed(0)
self._fnames = fnames
self.test_set = []
self.n_gram = n_gram
def read(self) -> Iterable:
def process(lst: list) -> str:
L = [lst.pop() for _ in range(len(lst))]
L.reverse()
line = "".join(L)
if self.n_gram <= 1:
return line
frst = " ".join(["#p"] * (self.n_gram - 1))
scnd = " ".join(["p#"] * (self.n_gram - 1))
return "%s %s %s" % (frst, line, scnd)
for fname in self._fnames:
L = []
for line in open(fname).readlines():
if line.count("*** END OF THE PROJECT GUTENBERG"):
if len(L):
if np.random.uniform() < 0.10:
self.test_set.append(process(L))
else:
yield process(L)
break
if line == "\n" and len(L):
if np.random.uniform() < 0.10:
self.test_set.append(process(L))
continue
yield process(L)
elif len(line):
_ = line.strip()
if len(_):
L.append(_)
if len(L):
if np.random.uniform() < 0.10:
self.test_set.append(process(L))
else:
yield process(L)
class LM(object):
def __init__(self, data, words: bool=False) -> None:
self._words = words
self._data = data
if words:
self.N = sum(data.values())
else:
self.__init()
def __init(self) -> None:
N = Counter()
for k, v in self._data.items():
words = "~".join(k.split("~")[:-1])
N.update({words: v})
self.N = N
def log_prob(self, ngram: str) -> float:
c1 = self._data[ngram]
if self._words:
c2 = self.N
else:
words = "~".join(ngram.split("~")[:-1])
c2 = self.N[words]
if c1 and c2:
return np.log(c1) - np.log(c2)
raise ValueError("ngram %s not found" % ngram)
def prob(self, ngram: str) -> float:
return np.exp(self.log_prob(ngram))
tm = TokenCount.textModel(token_list=[-3])
token = TokenCount(tokenizer=tm.tokenize)
read = Read(glob("books/*.txt"),
n_gram=tm.token_list[0] * -1)
token.process(read.read())
lm = LM(token.counter, words=tm.token_list[0] == -1)
logp = 0
max_logp, cnt = 0, 0
N = 0
for txt in read.test_set:
for ngram in tm.tokenize(txt):
N += 1
try:
_ = lm.log_prob(ngram)
if _ < max_logp:
max_logp = _
logp -= _
except ValueError:
cnt += 1
logp -= max_logp * cnt
pp = np.exp(logp / N)
pp | 31.193277 | 74 | 0.517241 |
acef2832a53248c2fadf923ebf724959dc9103e1 | 27,692 | py | Python | bmutest/dcload.py | taiwenko/python | c13c170405f4cac1d1c191413f8b61e21052d8b3 | [
"MIT"
] | null | null | null | bmutest/dcload.py | taiwenko/python | c13c170405f4cac1d1c191413f8b61e21052d8b3 | [
"MIT"
] | null | null | null | bmutest/dcload.py | taiwenko/python | c13c170405f4cac1d1c191413f8b61e21052d8b3 | [
"MIT"
] | 1 | 2019-08-30T04:00:34.000Z | 2019-08-30T04:00:34.000Z | '''
Open Source Initiative OSI - The MIT License:Licensing
Tue, 2006-10-31 04:56 - nelson
The MIT License
Copyright (c) 2009 BK Precision
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This python module provides a functional interface to a B&K DC load
through the DCLoad object. This object can also be used as a COM
server by running this module as a script to register it. All the
DCLoad object methods return strings. All units into and out of the
DCLoad object's methods are in SI units.
See the documentation file that came with this script.
$RCSfile: dcload.py $
$Revision: 1.0 $
$Date: 2008/05/17 15:57:15 $
$Author: Don Peterson $
'''
from __future__ import division
import sys, time, serial
from string import join
try:
from win32com.server.exception import COMException
except:
pass
# Debugging information is set to stdout by default. You can change
# the out variable to another method to e.g. write to a different
# stream.
out = sys.stdout.write
nl = "\n"
class InstrumentException(Exception): pass
class InstrumentInterface:
'''Provides the interface to a 26 byte instrument along with utility
functions.
'''
debug = 0 # Set to 1 to see dumps of commands and responses
length_packet = 26 # Number of bytes in a packet
convert_current = 1e4 # Convert current in A to 0.1 mA
convert_voltage = 1e3 # Convert voltage in V to mV
convert_power = 1e3 # Convert power in W to mW
convert_resistance = 1e3 # Convert resistance in ohm to mohm
to_ms = 1000 # Converts seconds to ms
# Number of settings storage registers
lowest_register = 1
highest_register = 25
# Values for setting modes of CC, CV, CW, or CR
modes = {"cc":0, "cv":1, "cw":2, "cr":3}
def Initialize(self, com_port, baudrate, address=0):
self.sp = serial.Serial(com_port, baudrate, timeout=1)
# self.sp = serial.Serial(com_port-1, baudrate) # original code edited JPM 2014-11-20
self.address = address
def DumpCommand(self, bytes):
'''Print out the contents of a 26 byte command. Example:
aa .. 20 01 .. .. .. .. .. ..
.. .. .. .. .. .. .. .. .. ..
.. .. .. .. .. cb
'''
assert(len(bytes) == self.length_packet)
header = " "*3
out(header)
for i in xrange(self.length_packet):
if i % 10 == 0 and i != 0:
out(nl + header)
if i % 5 == 0:
out(" ")
s = "%02x" % ord(bytes[i])
if s == "00":
# Use the decimal point character if you see an
# unattractive printout on your machine.
#s = "."*2
# The following alternate character looks nicer
# in a console window on Windows.
s = chr(250)*2
out(s)
out(nl)
def CommandProperlyFormed(self, cmd):
'''Return 1 if a command is properly formed; otherwise, return 0.
'''
commands = (
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33,
0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B,
0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x12
)
# Must be proper length
if len(cmd) != self.length_packet:
out("Command length = " + str(len(cmd)) + "-- should be " + \
str(self.length_packet) + nl)
return 0
# First character must be 0xaa
if ord(cmd[0]) != 0xaa:
out("First byte should be 0xaa" + nl)
return 0
# Second character (address) must not be 0xff
if ord(cmd[1]) == 0xff:
out("Second byte cannot be 0xff" + nl)
return 0
# Third character must be valid command
byte3 = "%02X" % ord(cmd[2])
if ord(cmd[2]) not in commands:
out("Third byte not a valid command: %s\n" % byte3)
return 0
# Calculate checksum and validate it
checksum = self.CalculateChecksum(cmd)
if checksum != ord(cmd[-1]):
out("Incorrect checksum" + nl)
return 0
return 1
def CalculateChecksum(self, cmd):
'''Return the sum of the bytes in cmd modulo 256.
'''
assert((len(cmd) == self.length_packet - 1) or (len(cmd) == self.length_packet))
checksum = 0
for i in xrange(self.length_packet - 1):
checksum += ord(cmd[i])
checksum %= 256
return checksum
def StartCommand(self, byte):
return chr(0xaa) + chr(self.address) + chr(byte)
def SendCommand(self, command):
'''Sends the command to the serial stream and returns the 26 byte
response.
'''
assert(len(command) == self.length_packet)
self.sp.write(command)
response = self.sp.read(self.length_packet)
assert(len(response) == self.length_packet)
return response
def ResponseStatus(self, response):
'''Return a message string about what the response meant. The
empty string means the response was OK.
'''
responses = {
0x90 : "Wrong checksum",
0xA0 : "Incorrect parameter value",
0xB0 : "Command cannot be carried out",
0xC0 : "Invalid command",
0x80 : "",
}
assert(len(response) == self.length_packet)
assert(ord(response[2]) == 0x12)
return responses[ord(response[3])]
def CodeInteger(self, value, num_bytes=4):
'''Construct a little endian string for the indicated value. Two
and 4 byte integers are the only ones allowed.
'''
assert(num_bytes == 1 or num_bytes == 2 or num_bytes == 4)
value = int(value) # Make sure it's an integer
s = chr(value & 0xff)
if num_bytes >= 2:
s += chr((value & (0xff << 8)) >> 8)
if num_bytes == 4:
s += chr((value & (0xff << 16)) >> 16)
s += chr((value & (0xff << 24)) >> 24)
assert(len(s) == 4)
return s
def DecodeInteger(self, str):
'''Construct an integer from the little endian string. 1, 2, and 4 byte
strings are the only ones allowed.
'''
assert(len(str) == 1 or len(str) == 2 or len(str) == 4)
n = ord(str[0])
if len(str) >= 2:
n += (ord(str[1]) << 8)
if len(str) == 4:
n += (ord(str[2]) << 16)
n += (ord(str[3]) << 24)
return n
def GetReserved(self, num_used):
'''Construct a string of nul characters of such length to pad a
command to one less than the packet size (leaves room for the
checksum byte.
'''
num = self.length_packet - num_used - 1
assert(num > 0)
return chr(0)*num
def PrintCommandAndResponse(self, cmd, response, cmd_name):
'''Print the command and its response if debugging is on.
'''
assert(cmd_name)
if self.debug:
out(cmd_name + " command:" + nl)
self.DumpCommand(cmd)
out(cmd_name + " response:" + nl)
self.DumpCommand(response)
def GetCommand(self, command, value, num_bytes=4):
'''Construct the command with an integer value of 0, 1, 2, or
4 bytes.
'''
cmd = self.StartCommand(command)
if num_bytes > 0:
r = num_bytes + 3
cmd += self.CodeInteger(value)[:num_bytes] + self.Reserved(r)
else:
cmd += self.Reserved(0)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
return cmd
def GetData(self, data, num_bytes=4):
'''Extract the little endian integer from the data and return it.
'''
assert(len(data) == self.length_packet)
if num_bytes == 1:
return ord(data[3])
elif num_bytes == 2:
return self.DecodeInteger(data[3:5])
elif num_bytes == 4:
return self.DecodeInteger(data[3:7])
else:
raise Exception("Bad number of bytes: %d" % num_bytes)
def Reserved(self, num_used):
assert(num_used >= 3 and num_used < self.length_packet - 1)
return chr(0)*(self.length_packet - num_used - 1)
def SendIntegerToLoad(self, byte, value, msg, num_bytes=4):
'''Send the indicated command along with value encoded as an integer
of the specified size. Return the instrument's response status.
'''
cmd = self.GetCommand(byte, value, num_bytes)
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, msg)
return self.ResponseStatus(response)
def GetIntegerFromLoad(self, cmd_byte, msg, num_bytes=4):
'''Construct a command from the byte in cmd_byte, send it, get
the response, then decode the response into an integer with the
number of bytes in num_bytes. msg is the debugging string for
the printout. Return the integer.
'''
assert(num_bytes == 1 or num_bytes == 2 or num_bytes == 4)
cmd = self.StartCommand(cmd_byte)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, msg)
return self.DecodeInteger(response[3:3 + num_bytes])
class DCLoad(InstrumentInterface):
_reg_clsid_ = "{943E2FA3-4ECE-448A-93AF-9ECAEB49CA1B}"
_reg_desc_ = "B&K DC Load COM Server"
_reg_progid_ = "BKServers.DCLoad85xx" # External name
_public_attrs_ = ["debug"]
_public_methods_ = [
"DisableLocalControl",
"EnableLocalControl",
"GetBatteryTestVoltage",
"GetCCCurrent",
"GetCRResistance",
"GetCVVoltage",
"GetCWPower",
"GetFunction",
"GetInputValues",
"GetLoadOnTimer",
"GetLoadOnTimerState",
"GetMaxCurrent",
"GetMaxPower",
"GetMaxVoltage",
"GetMode",
"GetProductInformation",
"GetRemoteSense",
"GetTransient",
"GetTriggerSource",
"Initialize",
"RecallSettings",
"SaveSettings",
"SetBatteryTestVoltage",
"SetCCCurrent",
"SetCRResistance",
"SetCVVoltage",
"SetCWPower",
"SetCommunicationAddress",
"SetFunction",
"SetLoadOnTimer",
"SetLoadOnTimerState",
"SetLocalControl",
"SetMaxCurrent",
"SetMaxPower",
"SetMaxVoltage",
"SetMode",
"SetRemoteControl",
"SetRemoteSense",
"SetTransient",
"SetTriggerSource",
"TimeNow",
"TriggerLoad",
"TurnLoadOff",
"TurnLoadOn",
]
def Initialize(self, com_port, baudrate, address=0):
"Initialize the base class"
InstrumentInterface.Initialize(self, com_port, baudrate, address)
def TimeNow(self):
"Returns a string containing the current time"
return time.asctime()
def TurnLoadOn(self):
"Turns the load on"
msg = "Turn load on"
on = 1
return self.SendIntegerToLoad(0x21, on, msg, num_bytes=1)
def TurnLoadOff(self):
"Turns the load off"
msg = "Turn load off"
off = 0
return self.SendIntegerToLoad(0x21, off, msg, num_bytes=1)
def SetRemoteControl(self):
"Sets the load to remote control"
msg = "Set remote control"
remote = 1
return self.SendIntegerToLoad(0x20, remote, msg, num_bytes=1)
def SetLocalControl(self):
"Sets the load to local control"
msg = "Set local control"
local = 0
return self.SendIntegerToLoad(0x20, local, msg, num_bytes=1)
def SetMaxCurrent(self, current):
"Sets the maximum current the load will sink"
msg = "Set max current"
return self.SendIntegerToLoad(0x24, current*self.convert_current, msg, num_bytes=4)
def GetMaxCurrent(self):
"Returns the maximum current the load will sink"
msg = "Set max current"
return self.GetIntegerFromLoad(0x25, msg, num_bytes=4)/self.convert_current
def SetMaxVoltage(self, voltage):
"Sets the maximum voltage the load will allow"
msg = "Set max voltage"
return self.SendIntegerToLoad(0x22, voltage*self.convert_voltage, msg, num_bytes=4)
def GetMaxVoltage(self):
"Gets the maximum voltage the load will allow"
msg = "Get max voltage"
return self.GetIntegerFromLoad(0x23, msg, num_bytes=4)/self.convert_voltage
def SetMaxPower(self, power):
"Sets the maximum power the load will allow"
msg = "Set max power"
return self.SendIntegerToLoad(0x26, power*self.convert_power, msg, num_bytes=4)
def GetMaxPower(self):
"Gets the maximum power the load will allow"
msg = "Get max power"
return self.GetIntegerFromLoad(0x27, msg, num_bytes=4)/self.convert_power
def SetMode(self, mode):
"Sets the mode (constant current, constant voltage, etc."
if mode.lower() not in self.modes:
raise Exception("Unknown mode")
msg = "Set mode"
return self.SendIntegerToLoad(0x28, self.modes[mode.lower()], msg, num_bytes=1)
def GetMode(self):
"Gets the mode (constant current, constant voltage, etc."
msg = "Get mode"
mode = self.GetIntegerFromLoad(0x29, msg, num_bytes=1)
modes_inv = {0:"cc", 1:"cv", 2:"cw", 3:"cr"}
return modes_inv[mode]
def SetCCCurrent(self, current):
"Sets the constant current mode's current level"
msg = "Set CC current"
return self.SendIntegerToLoad(0x2A, current*self.convert_current, msg, num_bytes=4)
def GetCCCurrent(self):
"Gets the constant current mode's current level"
msg = "Get CC current"
return self.GetIntegerFromLoad(0x2B, msg, num_bytes=4)/self.convert_current
def SetCVVoltage(self, voltage):
"Sets the constant voltage mode's voltage level"
msg = "Set CV voltage"
return self.SendIntegerToLoad(0x2C, voltage*self.convert_voltage, msg, num_bytes=4)
def GetCVVoltage(self):
"Gets the constant voltage mode's voltage level"
msg = "Get CV voltage"
return self.GetIntegerFromLoad(0x2D, msg, num_bytes=4)/self.convert_voltage
def SetCWPower(self, power):
"Sets the constant power mode's power level"
msg = "Set CW power"
return self.SendIntegerToLoad(0x2E, power*self.convert_power, msg, num_bytes=4)
def GetCWPower(self):
"Gets the constant power mode's power level"
msg = "Get CW power"
return self.GetIntegerFromLoad(0x2F, msg, num_bytes=4)/self.convert_power
def SetCRResistance(self, resistance):
"Sets the constant resistance mode's resistance level"
msg = "Set CR resistance"
return self.SendIntegerToLoad(0x30, resistance*self.convert_resistance, msg, num_bytes=4)
def GetCRResistance(self):
"Gets the constant resistance mode's resistance level"
msg = "Get CR resistance"
return self.GetIntegerFromLoad(0x31, msg, num_bytes=4)/self.convert_resistance
def SetTransient(self, mode, A, A_time_s, B, B_time_s, operation="continuous"):
'''Sets up the transient operation mode. mode is one of
"CC", "CV", "CW", or "CR".
'''
if mode.lower() not in self.modes:
raise Exception("Unknown mode")
opcodes = {"cc":0x32, "cv":0x34, "cw":0x36, "cr":0x38}
if mode.lower() == "cc":
const = self.convert_current
elif mode.lower() == "cv":
const = self.convert_voltage
elif mode.lower() == "cw":
const = self.convert_power
else:
const = self.convert_resistance
cmd = self.StartCommand(opcodes[mode.lower()])
cmd += self.CodeInteger(A*const, num_bytes=4)
cmd += self.CodeInteger(A_time_s*self.to_ms, num_bytes=2)
cmd += self.CodeInteger(B*const, num_bytes=4)
cmd += self.CodeInteger(B_time_s*self.to_ms, num_bytes=2)
transient_operations = {"continuous":0, "pulse":1, "toggled":2}
cmd += self.CodeInteger(transient_operations[operation], num_bytes=1)
cmd += self.Reserved(16)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Set %s transient" % mode)
return self.ResponseStatus(response)
def GetTransient(self, mode):
"Gets the transient mode settings"
if mode.lower() not in self.modes:
raise Exception("Unknown mode")
opcodes = {"cc":0x33, "cv":0x35, "cw":0x37, "cr":0x39}
cmd = self.StartCommand(opcodes[mode.lower()])
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Get %s transient" % mode)
A = self.DecodeInteger(response[3:7])
A_timer_ms = self.DecodeInteger(response[7:9])
B = self.DecodeInteger(response[9:13])
B_timer_ms = self.DecodeInteger(response[13:15])
operation = self.DecodeInteger(response[15])
time_const = 1e3
transient_operations_inv = {0:"continuous", 1:"pulse", 2:"toggled"}
if mode.lower() == "cc":
return str((A/self.convert_current, A_timer_ms/time_const,
B/self.convert_current, B_timer_ms/time_const,
transient_operations_inv[operation]))
elif mode.lower() == "cv":
return str((A/self.convert_voltage, A_timer_ms/time_const,
B/self.convert_voltage, B_timer_ms/time_const,
transient_operations_inv[operation]))
elif mode.lower() == "cw":
return str((A/self.convert_power, A_timer_ms/time_const,
B/self.convert_power, B_timer_ms/time_const,
transient_operations_inv[operation]))
else:
return str((A/self.convert_resistance, A_timer_ms/time_const,
B/self.convert_resistance, B_timer_ms/time_const,
transient_operations_inv[operation]))
def SetBatteryTestVoltage(self, min_voltage):
"Sets the battery test voltage"
msg = "Set battery test voltage"
return self.SendIntegerToLoad(0x4E, min_voltage*self.convert_voltage, msg, num_bytes=4)
def GetBatteryTestVoltage(self):
"Gets the battery test voltage"
msg = "Get battery test voltage"
return self.GetIntegerFromLoad(0x4F, msg, num_bytes=4)/self.convert_voltage
def SetLoadOnTimer(self, time_in_s):
"Sets the time in seconds that the load will be on"
msg = "Set load on timer"
return self.SendIntegerToLoad(0x50, time_in_s, msg, num_bytes=2)
def GetLoadOnTimer(self):
"Gets the time in seconds that the load will be on"
msg = "Get load on timer"
return self.GetIntegerFromLoad(0x51, msg, num_bytes=2)
def SetLoadOnTimerState(self, enabled=0):
"Enables or disables the load on timer state"
msg = "Set load on timer state"
return self.SendIntegerToLoad(0x50, enabled, msg, num_bytes=1)
def GetLoadOnTimerState(self):
"Gets the load on timer state"
msg = "Get load on timer"
state = self.GetIntegerFromLoad(0x53, msg, num_bytes=1)
if state == 0:
return "disabled"
else:
return "enabled"
def SetCommunicationAddress(self, address=0):
'''Sets the communication address. Note: this feature is
not currently supported. The communication address should always
be set to 0.
'''
msg = "Set communication address"
return self.SendIntegerToLoad(0x54, address, msg, num_bytes=1)
def EnableLocalControl(self):
"Enable local control (i.e., key presses work) of the load"
msg = "Enable local control"
enabled = 1
return self.SendIntegerToLoad(0x55, enabled, msg, num_bytes=1)
def DisableLocalControl(self):
"Disable local control of the load"
msg = "Disable local control"
disabled = 0
return self.SendIntegerToLoad(0x55, disabled, msg, num_bytes=1)
def SetRemoteSense(self, enabled=0):
"Enable or disable remote sensing"
msg = "Set remote sense"
return self.SendIntegerToLoad(0x56, enabled, msg, num_bytes=1)
def GetRemoteSense(self):
"Get the state of remote sensing"
msg = "Get remote sense"
return self.GetIntegerFromLoad(0x57, msg, num_bytes=1)
def SetTriggerSource(self, source="immediate"):
'''Set how the instrument will be triggered.
"immediate" means triggered from the front panel.
"external" means triggered by a TTL signal on the rear panel.
"bus" means a software trigger (see TriggerLoad()).
'''
trigger = {"immediate":0, "external":1, "bus":2}
if source not in trigger:
raise Exception("Trigger type %s not recognized" % source)
msg = "Set trigger type"
return self.SendIntegerToLoad(0x54, trigger[source], msg, num_bytes=1)
def GetTriggerSource(self):
"Get how the instrument will be triggered"
msg = "Get trigger source"
t = self.GetIntegerFromLoad(0x59, msg, num_bytes=1)
trigger_inv = {0:"immediate", 1:"external", 2:"bus"}
return trigger_inv[t]
def TriggerLoad(self):
'''Provide a software trigger. This is only of use when the trigger
mode is set to "bus".
'''
cmd = self.StartCommand(0x5A)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Trigger load (trigger = bus)")
return self.ResponseStatus(response)
def SaveSettings(self, register=0):
"Save instrument settings to a register"
assert(self.lowest_register <= register <= self.highest_register)
msg = "Save to register %d" % register
return self.SendIntegerToLoad(0x5B, register, msg, num_bytes=1)
def RecallSettings(self, register=0):
"Restore instrument settings from a register"
assert(self.lowest_register <= register <= self.highest_register)
cmd = self.GetCommand(0x5C, register, num_bytes=1)
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Recall register %d" % register)
return self.ResponseStatus(response)
def SetFunction(self, function="fixed"):
'''Set the function (type of operation) of the load.
function is one of "fixed", "short", "transient", or "battery".
Note "list" is intentionally left out for now.
'''
msg = "Set function to %s" % function
functions = {"fixed":0, "short":1, "transient":2, "battery":4}
return self.SendIntegerToLoad(0x5D, functions[function], msg, num_bytes=1)
def GetFunction(self):
"Get the function (type of operation) of the load"
msg = "Get function"
fn = self.GetIntegerFromLoad(0x5E, msg, num_bytes=1)
functions_inv = {0:"fixed", 1:"short", 2:"transient", 4:"battery"}
return functions_inv[fn]
def GetInputValues(self):
'''Returns voltage in V, current in A, and power in W, op_state byte,
and demand_state byte.
'''
cmd = self.StartCommand(0x5F)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Get input values")
voltage = self.DecodeInteger(response[3:7])/self.convert_voltage
current = self.DecodeInteger(response[7:11])/self.convert_current
power = self.DecodeInteger(response[11:15])/self.convert_power
op_state = hex(self.DecodeInteger(response[15]))
demand_state = hex(self.DecodeInteger(response[16:18]))
s = [str(voltage) + " V", str(current) + " A", str(power) + " W", str(op_state), str(demand_state)]
return join(s, "\t")
# Returns model number, serial number, and firmware version number
def GetProductInformation(self):
"Returns model number, serial number, and firmware version"
cmd = self.StartCommand(0x6A)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Get product info")
model = response[3:8]
fw = hex(ord(response[9]))[2:] + "."
fw += hex(ord(response[8]))[2:]
serial_number = response[10:20]
return join((str(model), str(serial_number), str(fw)), "\t")
def Register(pyclass=DCLoad):
from win32com.server.register import UseCommandLine
UseCommandLine(pyclass)
def Unregister(classid=DCLoad._reg_clsid_):
from win32com.server.register import UnregisterServer
UnregisterServer(classid)
# Run this script to register the COM server. Use the command line
# argument --unregister to unregister the server.
if __name__ == '__main__':
Register()
| 44.3072 | 108 | 0.608479 |
acef2926eba1eff8a4516245a448a1c737e95b61 | 19 | py | Python | testing/app/repositories.py | dry-python/dependencies | 1a8bba41ab42d0b5249b36471f5300d9faba81e7 | [
"BSD-2-Clause"
] | 175 | 2018-07-21T13:04:44.000Z | 2020-05-27T15:31:06.000Z | tests/helpers/app/repositories.py | proofit404/dependencies | 204e0cfadca801d64857f24aa4c74e7939ed9af0 | [
"BSD-2-Clause"
] | 325 | 2016-05-16T11:16:11.000Z | 2022-03-04T00:45:57.000Z | testing/app/repositories.py | dry-python/dependencies | 1a8bba41ab42d0b5249b36471f5300d9faba81e7 | [
"BSD-2-Clause"
] | 18 | 2018-06-17T09:33:16.000Z | 2020-05-20T18:12:30.000Z | create_user = None
| 9.5 | 18 | 0.789474 |
acef295c738022dc70abb9f3d5caf970d74baa10 | 458 | py | Python | report/parsers/regex.py | Kellel/reports | 975e99396301e87176a38dd440a273c9319b3e22 | [
"BSD-3-Clause"
] | null | null | null | report/parsers/regex.py | Kellel/reports | 975e99396301e87176a38dd440a273c9319b3e22 | [
"BSD-3-Clause"
] | null | null | null | report/parsers/regex.py | Kellel/reports | 975e99396301e87176a38dd440a273c9319b3e22 | [
"BSD-3-Clause"
] | null | null | null |
class REGEX:
date = r"(\d{4}-\d{2}-\d{2} [A-Z]{3})"
datetime = r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [A-Z]{3})"
datetime2 = r"(\d{2}\/\d{2}\/\d{4} \d{1,2}:\d{2})"
datetime3 = r"(\d{2}\/\d{2}\/\d{4})"
string_with_space = r"([\S ]*)"
string = r"(\S*)"
sku = r"([A-Z0-9]*)"
integer = r"(\d+)"
currency = r"([A-Z]{3})"
float = r"(\d+\.\d+)"
@staticmethod
def join(list, sep=None):
return sep.join(list)
| 26.941176 | 64 | 0.427948 |
acef29ef88364ce97709de8265baa8c1a904a70e | 862 | py | Python | crm_th/crm_th/urls.py | covrom/django_sample | f659588373fe8c95939b3ef14d2733d6b95384ba | [
"MIT"
] | null | null | null | crm_th/crm_th/urls.py | covrom/django_sample | f659588373fe8c95939b3ef14d2733d6b95384ba | [
"MIT"
] | null | null | null | crm_th/crm_th/urls.py | covrom/django_sample | f659588373fe8c95939b3ef14d2733d6b95384ba | [
"MIT"
] | null | null | null | """crm_th URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from material.frontend import urls as frontend_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include(frontend_urls)),
]
| 35.916667 | 79 | 0.711137 |
acef2a2519bb619c1caef09812f2a38bc3728690 | 3,955 | py | Python | src/features/knowledge/hierarchy.py | lenaWitterauf/Domain-Guided-Monitoring | b2b96faa66951e8dd6831e90da6f86b27ba2e9c3 | [
"MIT"
] | 1 | 2021-10-20T14:59:41.000Z | 2021-10-20T14:59:41.000Z | src/features/knowledge/hierarchy.py | lenaWitterauf/Domain-Guided-Monitoring | b2b96faa66951e8dd6831e90da6f86b27ba2e9c3 | [
"MIT"
] | null | null | null | src/features/knowledge/hierarchy.py | lenaWitterauf/Domain-Guided-Monitoring | b2b96faa66951e8dd6831e90da6f86b27ba2e9c3 | [
"MIT"
] | null | null | null | import pandas as pd
from typing import Dict, Set
from tqdm import tqdm
import logging
from .node import Node
from .base import BaseKnowledge
from .config import KnowledgeConfig
class HierarchyKnowledge(BaseKnowledge):
def __init__(
self,
config: KnowledgeConfig,
child_id_col="child_id",
parent_id_col="parent_id",
child_name_col="child_name",
parent_name_col="parent_name",
):
super(HierarchyKnowledge, self).__init__(config=config)
self.child_id_col = child_id_col
self.parent_id_col = parent_id_col
self.child_name_col = child_name_col
self.parent_name_col = parent_name_col
def get_connections_for_idx(self, idx: int) -> Set[int]:
return set(self.nodes[idx].get_ancestor_label_idxs() + [idx])
def get_description_vocab(self, ids: Set[int]) -> Dict[int, str]:
return {idx: node.label_name for idx, node in self.nodes.items() if idx in ids}
def build_hierarchy_from_df(
self, hierarchy_df: pd.DataFrame, vocab: Dict[str, int]
):
self.vocab: Dict[str, int] = vocab
self._build_extended_vocab(hierarchy_df, vocab)
for _, row in tqdm(hierarchy_df.iterrows(), desc="Building Hierarchy from df"):
child_id = row[self.child_id_col]
if child_id not in self.extended_vocab:
logging.debug("Ignoring node %s as not in dataset", child_id)
continue
child_node = self.nodes[self.extended_vocab[child_id]]
parent_node = self.nodes[self.extended_vocab[row[self.parent_id_col]]]
if child_node is not parent_node:
child_node.in_nodes.add(parent_node)
parent_node.out_nodes.add(child_node)
logging.info("Built hierarchy with %d nodes", len(self.nodes))
def _build_extended_vocab(self, hierarchy_df: pd.DataFrame, vocab: Dict[str, int]):
self.extended_vocab: Dict[str, int] = {}
self.nodes: Dict[int, Node] = {}
labels_to_handle = list(vocab.keys())
max_index = max(vocab.values())
while len(labels_to_handle) > 0:
label = labels_to_handle.pop()
if label in self.extended_vocab:
continue
if label in vocab:
self.extended_vocab[label] = vocab[label]
else:
self.extended_vocab[label] = max_index + 1
max_index = max_index + 1
label_names = set(
hierarchy_df[hierarchy_df[self.child_id_col] == label][
self.child_name_col
]
)
label_names.update(
set(
hierarchy_df[hierarchy_df[self.parent_id_col] == label][
self.parent_name_col
]
)
)
self.nodes[self.extended_vocab[label]] = Node(
label_idx=self.extended_vocab[label],
label_str=label,
label_names=label_names,
)
parents_df = hierarchy_df[hierarchy_df[self.child_id_col] == label]
parents = list(set(parents_df[self.parent_id_col]))
labels_to_handle = labels_to_handle + parents
self.extra_vocab: Dict[str, int] = {
k: v for k, v in self.extended_vocab.items() if k not in self.vocab
}
def __str__(self):
roots = [node for node in self.nodes.values() if node.is_root()]
all_strings = []
for root in roots:
all_strings = all_strings + self._to_string_recursive(root, "")
return "\n".join(all_strings)
def _to_string_recursive(self, current_node, current_prefix):
strings = [current_prefix + current_node.label_str]
for node in current_node.out_nodes:
strings = strings + self._to_string_recursive(node, current_prefix + "-")
return strings
| 36.962617 | 87 | 0.608597 |
acef2a89bf4974cb730a3dcde2b0b88dc114fe16 | 378 | py | Python | mesonbuild/scripts/copy.py | ondergormez/meson | aa495ff7584de5a69077903d59196d526597cea4 | [
"Apache-2.0"
] | 64 | 2015-01-09T13:45:23.000Z | 2015-06-13T20:16:01.000Z | mesonbuild/scripts/copy.py | ondergormez/meson | aa495ff7584de5a69077903d59196d526597cea4 | [
"Apache-2.0"
] | 110 | 2015-01-09T01:35:56.000Z | 2015-06-14T11:26:04.000Z | mesonbuild/scripts/copy.py | ondergormez/meson | aa495ff7584de5a69077903d59196d526597cea4 | [
"Apache-2.0"
] | 13 | 2015-01-05T09:08:37.000Z | 2015-06-04T08:34:45.000Z | # SPDX-License-Identifer: Apache-2.0
# Copyright © 2021 Intel Corporation
"""Helper script to copy files at build time.
This is easier than trying to detect whether to use copy, cp, or something else.
"""
import shutil
import typing as T
def run(args: T.List[str]) -> int:
try:
shutil.copy2(args[0], args[1])
except Exception:
return 1
return 0
| 19.894737 | 80 | 0.674603 |
acef2b465e872ccf9343e892cf451a1f594199c5 | 28 | py | Python | project/settings/__init__.py | mushahid54/feedback_survey | a568008f0717b52649010286e55e242f083734be | [
"MIT"
] | null | null | null | project/settings/__init__.py | mushahid54/feedback_survey | a568008f0717b52649010286e55e242f083734be | [
"MIT"
] | null | null | null | project/settings/__init__.py | mushahid54/feedback_survey | a568008f0717b52649010286e55e242f083734be | [
"MIT"
] | null | null | null | __author__ = 'MushahidKhan'
| 14 | 27 | 0.785714 |
acef2ca8c77de535748159d4b31152429a1c8e44 | 715 | py | Python | lib/galaxy/model/tool_shed_install/mapping.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/model/tool_shed_install/mapping.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | 6 | 2021-11-11T20:57:49.000Z | 2021-12-10T15:30:33.000Z | lib/galaxy/model/tool_shed_install/mapping.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | from galaxy.model import tool_shed_install as install_model
from galaxy.model.base import ModelMapping
from galaxy.model.orm.engine_factory import build_engine
from galaxy.model.tool_shed_install import mapper_registry
metadata = mapper_registry.metadata
def init(url, engine_options=None, create_tables=False):
engine = build_engine(url, engine_options)
if create_tables:
create_database_objects(engine)
return configure_model_mapping(engine)
def create_database_objects(engine):
mapper_registry.metadata.create_all(bind=engine)
def configure_model_mapping(engine):
# TODO: do we need to load local galaxy security policy?
return ModelMapping([install_model], engine=engine)
| 31.086957 | 60 | 0.806993 |
acef2cbd57bcf0f7ec22fef19061ef2a46960a66 | 3,984 | py | Python | polyjuice/generations/create_blanks.py | shwang/polyjuice | 5f9a3a23d95e4a3877cc048cbcef01f071dc6353 | [
"BSD-3-Clause"
] | 38 | 2021-05-25T02:18:40.000Z | 2022-03-25T12:09:58.000Z | polyjuice/generations/create_blanks.py | shwang/polyjuice | 5f9a3a23d95e4a3877cc048cbcef01f071dc6353 | [
"BSD-3-Clause"
] | 7 | 2021-06-03T04:08:55.000Z | 2021-12-06T06:53:05.000Z | polyjuice/generations/create_blanks.py | shwang/polyjuice | 5f9a3a23d95e4a3877cc048cbcef01f071dc6353 | [
"BSD-3-Clause"
] | 5 | 2021-11-12T21:43:59.000Z | 2022-03-22T21:51:08.000Z | import numpy as np
from ..helpers import unify_tags, flatten_fillins
from .special_tokens import BLANK_TOK
def create_blanked_sents(doc, indexes=None):
if indexes:
if type(indexes[0]) == int:
indexes = [indexes]
indexes_list = indexes #[indexes]
else:
indexes_list = get_random_idxes(
doc, is_token_only=False, max_count=3)
blanks = set([flatten_fillins(
doc, indexes, [BLANK_TOK] * len(indexes)) \
for indexes in indexes_list])
return blanks
# the function for placing BLANKS.
def get_one_random_idx_set(
doc, max_blank_block=3, req_dep=None, blank_type_prob=None,
pre_selected_idxes=None, is_token_only=False):
if req_dep is not None:
if type(req_dep) == str: req_dep = [req_dep]
idx_range = [i for i, token in enumerate(doc) if token.dep_ in req_dep or unify_tags(token.dep_) in req_dep]
else:
idx_range = list(range(len(doc)))
# only keep those pre_selected_idxes
if pre_selected_idxes is not None:
idx_range = [i for i in idx_range if i in pre_selected_idxes]
max_blank_block = min(len(idx_range), max_blank_block)
#print(req_dep, idx_range)
selected_indexes = []
while max_blank_block > 0 and not selected_indexes:
# if fixed the thing to change, then do one specific change
n_perturb = np.random.choice(list(range(1, max_blank_block+1))) #if req_dep is None else 1
replace_idx, total_run = -1, 1000
while (total_run > 0 and n_perturb > 0): #and len(span_and_edits) == 0:
replace_idx = np.random.choice(idx_range)
token = doc[replace_idx]
if token.is_punct:
total_run -= 1
continue
if blank_type_prob: p = blank_type_prob
else:
# if fixed the tree, then mostly use the tree
if is_token_only: p = [0.7, 0, 0.3]
elif req_dep is None: p = [0.4, 0.35, 0.25]
else: p = [0.1, 0.7, 0.2]
is_replace_subtree = np.random.choice(["token", "subtree", "insert"], p=p)
if is_replace_subtree == "subtree":
start, end = token.left_edge.i, token.right_edge.i+1
elif is_replace_subtree == "token":
start, end = token.i, token.i+1
else:
start, end = token.i, token.i
if all([end < sstart or start > send for sstart, send in selected_indexes]):
selected_indexes.append([start, end])
n_perturb -= 1
total_run -= 1
return sorted(selected_indexes, key=lambda idx: (idx[0], idx[1]))
def get_random_idxes(doc,
pre_selected_idxes=None,
deps=None, is_token_only=False,
max_blank_block=3, max_count=None):
unique_blanks = {str([[0, len(doc)]]): [[0, len(doc)]]}
default_deps = [None, "", ["subj","obj"], ["aux", "ROOT"], ["conj", "modifier", "clause"]]
if is_token_only:
unique_blanks = {}
if deps is None: deps = default_deps
for dep in deps:
# for each different dep, get some blank
rounds = 1 if dep is not None else 2
if is_token_only:
rounds = 5
for _ in range(rounds):
curr_idx = get_one_random_idx_set(
doc, req_dep=dep,
max_blank_block=max_blank_block,
pre_selected_idxes=pre_selected_idxes,
is_token_only=is_token_only) if dep != "" else None
if curr_idx is not None:
unique_blanks[str(curr_idx)] = curr_idx
unique_blanks = list(unique_blanks.values())
if max_count is not None:
try:
unique_blanks = list(np.random.choice(
np.array(unique_blanks, dtype="object"),
min(len(unique_blanks), max_count),
replace=False))
except:
unique_blanks = unique_blanks[:max_count]
return unique_blanks
| 41.5 | 116 | 0.599398 |
acef2d7e727d464c7a78f7f91111775b2ef87086 | 11,613 | py | Python | lib/i2c/tb/test_i2c_master.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 251 | 2015-07-29T11:36:59.000Z | 2022-03-30T05:08:22.000Z | lib/i2c/tb/test_i2c_master.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 4 | 2017-05-31T21:13:34.000Z | 2021-03-21T10:21:06.000Z | lib/i2c/tb/test_i2c_master.py | bmindur/xfcp | dd2eef2cb23a17209180b278b1e2640fd09fda28 | [
"MIT"
] | 107 | 2016-11-12T03:33:39.000Z | 2022-03-30T11:04:56.000Z | #!/usr/bin/env python
"""
Copyright (c) 2015-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import i2c
module = 'i2c_master'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_cmd_address = Signal(intbv(0)[7:])
s_axis_cmd_start = Signal(bool(0))
s_axis_cmd_read = Signal(bool(0))
s_axis_cmd_write = Signal(bool(0))
s_axis_cmd_write_multiple = Signal(bool(0))
s_axis_cmd_stop = Signal(bool(0))
s_axis_cmd_valid = Signal(bool(0))
s_axis_data_tdata = Signal(intbv(0)[8:])
s_axis_data_tvalid = Signal(bool(0))
s_axis_data_tlast = Signal(bool(0))
m_axis_data_tready = Signal(bool(0))
scl_i = Signal(bool(1))
sda_i = Signal(bool(1))
prescale = Signal(intbv(0)[16:])
stop_on_idle = Signal(bool(0))
s1_scl_i = Signal(bool(1))
s1_sda_i = Signal(bool(1))
s2_scl_i = Signal(bool(1))
s2_sda_i = Signal(bool(1))
# Outputs
s_axis_cmd_ready = Signal(bool(0))
s_axis_data_tready = Signal(bool(0))
m_axis_data_tdata = Signal(intbv(0)[8:])
m_axis_data_tvalid = Signal(bool(0))
m_axis_data_tlast = Signal(bool(0))
scl_o = Signal(bool(1))
scl_t = Signal(bool(1))
sda_o = Signal(bool(1))
sda_t = Signal(bool(1))
busy = Signal(bool(0))
bus_control = Signal(bool(0))
bus_active = Signal(bool(0))
missed_ack = Signal(bool(0))
s1_scl_o = Signal(bool(1))
s1_scl_t = Signal(bool(1))
s1_sda_o = Signal(bool(1))
s1_sda_t = Signal(bool(1))
s2_scl_o = Signal(bool(1))
s2_scl_t = Signal(bool(1))
s2_sda_o = Signal(bool(1))
s2_sda_t = Signal(bool(1))
# sources and sinks
cmd_source_pause = Signal(bool(0))
data_source_pause = Signal(bool(0))
data_sink_pause = Signal(bool(0))
cmd_source = axis_ep.AXIStreamSource()
cmd_source_logic = cmd_source.create_logic(
clk,
rst,
tdata=(s_axis_cmd_address, s_axis_cmd_start, s_axis_cmd_read, s_axis_cmd_write, s_axis_cmd_write_multiple, s_axis_cmd_stop),
tvalid=s_axis_cmd_valid,
tready=s_axis_cmd_ready,
pause=cmd_source_pause,
name='cmd_source'
)
data_source = axis_ep.AXIStreamSource()
data_source_logic = data_source.create_logic(
clk,
rst,
tdata=s_axis_data_tdata,
tvalid=s_axis_data_tvalid,
tready=s_axis_data_tready,
tlast=s_axis_data_tlast,
pause=data_source_pause,
name='data_source'
)
data_sink = axis_ep.AXIStreamSink()
data_sink_logic = data_sink.create_logic(
clk,
rst,
tdata=m_axis_data_tdata,
tvalid=m_axis_data_tvalid,
tready=m_axis_data_tready,
tlast=m_axis_data_tlast,
pause=data_sink_pause,
name='data_sink'
)
# I2C memory model 1
i2c_mem_inst1 = i2c.I2CMem(1024)
i2c_mem_logic1 = i2c_mem_inst1.create_logic(
scl_i=s1_scl_i,
scl_o=s1_scl_o,
scl_t=s1_scl_t,
sda_i=s1_sda_i,
sda_o=s1_sda_o,
sda_t=s1_sda_t,
abw=2,
address=0x50,
latency=0,
name='slave1'
)
# I2C memory model 2
i2c_mem_inst2 = i2c.I2CMem(1024)
i2c_mem_logic2 = i2c_mem_inst2.create_logic(
scl_i=s2_scl_i,
scl_o=s2_scl_o,
scl_t=s2_scl_t,
sda_i=s2_sda_i,
sda_o=s2_sda_o,
sda_t=s2_sda_t,
abw=2,
address=0x51,
latency=1000,
name='slave2'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_cmd_address=s_axis_cmd_address,
s_axis_cmd_start=s_axis_cmd_start,
s_axis_cmd_read=s_axis_cmd_read,
s_axis_cmd_write=s_axis_cmd_write,
s_axis_cmd_write_multiple=s_axis_cmd_write_multiple,
s_axis_cmd_stop=s_axis_cmd_stop,
s_axis_cmd_valid=s_axis_cmd_valid,
s_axis_cmd_ready=s_axis_cmd_ready,
s_axis_data_tdata=s_axis_data_tdata,
s_axis_data_tvalid=s_axis_data_tvalid,
s_axis_data_tready=s_axis_data_tready,
s_axis_data_tlast=s_axis_data_tlast,
m_axis_data_tdata=m_axis_data_tdata,
m_axis_data_tvalid=m_axis_data_tvalid,
m_axis_data_tready=m_axis_data_tready,
m_axis_data_tlast=m_axis_data_tlast,
scl_i=scl_i,
scl_o=scl_o,
scl_t=scl_t,
sda_i=sda_i,
sda_o=sda_o,
sda_t=sda_t,
busy=busy,
bus_control=bus_control,
bus_active=bus_active,
missed_ack=missed_ack,
prescale=prescale,
stop_on_idle=stop_on_idle
)
@always_comb
def bus():
# emulate I2C wired AND
scl_i.next = scl_o & s1_scl_o & s2_scl_o;
sda_i.next = sda_o & s1_sda_o & s2_sda_o;
s1_scl_i.next = scl_o & s1_scl_o & s2_scl_o;
s1_sda_i.next = sda_o & s1_sda_o & s2_sda_o;
s2_scl_i.next = scl_o & s1_scl_o & s2_scl_o;
s2_sda_i.next = sda_o & s1_sda_o & s2_sda_o;
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
prescale.next = 2
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: write")
current_test.next = 1
cmd_source.send([(
0x50, # address
0, # start
0, # read
0, # write
1, # write_multiple
1 # stop
)])
data_source.send((b'\x00\x04'+b'\x11\x22\x33\x44'))
yield clk.posedge
yield clk.posedge
yield clk.posedge
while busy or bus_active or not cmd_source.empty():
yield clk.posedge
yield clk.posedge
data = i2c_mem_inst1.read_mem(0, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert i2c_mem_inst1.read_mem(4,4) == b'\x11\x22\x33\x44'
yield delay(100)
yield clk.posedge
print("test 2: read")
current_test.next = 2
cmd_source.send([(
0x50, # address
0, # start
0, # read
0, # write
1, # write_multiple
0 # stop
)])
data_source.send((b'\x00\x04'))
for i in range(3):
cmd_source.send([(
0x50, # address
0, # start
1, # read
0, # write
0, # write_multiple
0 # stop
)])
cmd_source.send([(
0x50, # address
0, # start
1, # read
0, # write
0, # write_multiple
1 # stop
)])
yield clk.posedge
yield clk.posedge
yield clk.posedge
while busy or bus_active or not cmd_source.empty():
yield clk.posedge
yield clk.posedge
data = data_sink.recv()
assert data.data == b'\x11\x22\x33\x44'
yield delay(100)
yield clk.posedge
print("test 3: write to slave 2")
current_test.next = 3
cmd_source.send([(
0x51, # address
0, # start
0, # read
0, # write
1, # write_multiple
1 # stop
)])
data_source.send((b'\x00\x04'+b'\x44\x33\x22\x11'))
yield clk.posedge
yield clk.posedge
yield clk.posedge
while busy or bus_active or not cmd_source.empty():
yield clk.posedge
yield clk.posedge
data = i2c_mem_inst1.read_mem(0, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert i2c_mem_inst2.read_mem(4,4) == b'\x44\x33\x22\x11'
yield delay(100)
yield clk.posedge
print("test 4: read from slave 2")
current_test.next = 4
cmd_source.send([(
0x51, # address
0, # start
0, # read
0, # write
1, # write_multiple
0 # stop
)])
data_source.send((b'\x00\x04'))
for i in range(3):
cmd_source.send([(
0x51, # address
0, # start
1, # read
0, # write
0, # write_multiple
0 # stop
)])
cmd_source.send([(
0x51, # address
0, # start
1, # read
0, # write
0, # write_multiple
1 # stop
)])
yield clk.posedge
yield clk.posedge
yield clk.posedge
while busy or bus_active or not cmd_source.empty():
yield clk.posedge
yield clk.posedge
data = data_sink.recv()
assert data.data == b'\x44\x33\x22\x11'
yield delay(100)
yield clk.posedge
print("test 5: write to nonexistent device")
current_test.next = 5
cmd_source.send([(
0x52, # address
0, # start
0, # read
0, # write
1, # write_multiple
1 # stop
)])
data_source.send((b'\x00\x04'+b'\xde\xad\xbe\xef'))
got_missed_ack = False
for k in range(1000):
got_missed_ack |= missed_ack
yield clk.posedge
assert got_missed_ack
yield clk.posedge
yield clk.posedge
yield clk.posedge
while busy or bus_active or not cmd_source.empty():
yield clk.posedge
yield clk.posedge
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 26.155405 | 132 | 0.575217 |
acef2ddf94f2feb2210f5d4474d3f2e76f3606b2 | 906 | py | Python | new_officers.py | compserv/elections | 1ed5d957d637833adab93358551ecef60f8ecea7 | [
"BSD-3-Clause"
] | 2 | 2019-12-16T21:54:16.000Z | 2019-12-28T17:24:48.000Z | new_officers.py | compserv/elections | 1ed5d957d637833adab93358551ecef60f8ecea7 | [
"BSD-3-Clause"
] | 8 | 2019-12-11T20:14:13.000Z | 2021-05-03T05:44:20.000Z | new_officers.py | compserv/elections | 1ed5d957d637833adab93358551ecef60f8ecea7 | [
"BSD-3-Clause"
] | 5 | 2019-04-20T00:42:57.000Z | 2021-12-06T23:18:06.000Z | # @author: Catherine Hu, James Zhu, Carolyn Wang (for add_users)
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from hknlib.election.settings import ELECTION_SPREADSHEET_ID, NEW_OFFICER_RANGE
from hknlib.election.cred import get_credentials
from hknlib.election.sheets import get_sheet_data
from hknlib.election.users import add_users
from hknlib.election.groups import add_all_to_committes
# test_data = [
# ['12/2/2018 22:24:44', 'Test', 'User', 'test_user', 'jameszhu@hkn.eecs.berkeley.edu', 'compserv@'],
# ]
def main():
credentials = get_credentials("./secret/hknlib.json")
election_data = get_sheet_data(credentials, NEW_OFFICER_RANGE, ELECTION_SPREADSHEET_ID)
#print(election_data)
add_users(credentials, election_data)
add_all_to_committes(credentials, election_data)
if __name__ == '__main__':
main()
| 34.846154 | 105 | 0.756071 |
acef2e8ac7dadab6ad8ef5fb80a5a2fbcee2119d | 711 | py | Python | Linear-Programing-Optimizing/Problem-5.py | aminzayer/Amin-University-Data-Science | 8303554a70fbdbc1abe06be03c29c87385bc1137 | [
"MIT"
] | 2 | 2022-02-16T22:24:33.000Z | 2022-02-24T17:00:05.000Z | Linear-Programing-Optimizing/Problem-5.py | aminzayer/Amin-University-Data-Science | 8303554a70fbdbc1abe06be03c29c87385bc1137 | [
"MIT"
] | null | null | null | Linear-Programing-Optimizing/Problem-5.py | aminzayer/Amin-University-Data-Science | 8303554a70fbdbc1abe06be03c29c87385bc1137 | [
"MIT"
] | null | null | null | # Maximize: Z = 3x + 2y
# Subject to the constraints:
# x + 2y <= 4
# 6x + 4y <= 6
# x, y >= 0
# import the library pulp as p
import pulp as p
# Create a LP Minimization problem
Lp_prob = p.LpProblem('Problem', p.LpMaximize)
# Create problem Variables
x = p.LpVariable("x", lowBound=0) # Create a variable x >= 0
y = p.LpVariable("y", lowBound=0) # Create a variable y >= 0
# Objective Function
Lp_prob += 3 * x + 2 * y
# Constraints:
Lp_prob += x + 2 * y <= 4
Lp_prob += 6 * x + 4 * y <= 6
# Display the problem
print(Lp_prob)
status = Lp_prob.solve() # Solver
print(p.LpStatus[status]) # The solution status
# Printing the final solution
print(p.value(x), p.value(y), p.value(Lp_prob.objective))
| 22.21875 | 61 | 0.652602 |
acef2ecc76f109afae226b3d1d8a1579a8c7e8b3 | 123 | py | Python | examples/tutorial/tutorial/users/views.py | carbonariy/dvhb-hybrid | adbb250767ea255addc607fb6f6755c9add447db | [
"MIT"
] | 27 | 2018-05-08T16:03:24.000Z | 2020-02-20T06:39:19.000Z | examples/tutorial/tutorial/users/views.py | carbonariy/dvhb-hybrid | adbb250767ea255addc607fb6f6755c9add447db | [
"MIT"
] | 7 | 2018-10-20T16:03:36.000Z | 2021-11-03T11:09:22.000Z | examples/tutorial/tutorial/users/views.py | carbonariy/dvhb-hybrid | adbb250767ea255addc607fb6f6755c9add447db | [
"MIT"
] | 16 | 2018-12-11T15:34:22.000Z | 2022-01-25T00:20:55.000Z | async def get_users(request):
users = await request.app.m.user.get_list(fields=['username', 'email'])
return users
| 30.75 | 75 | 0.707317 |
acef2f26b023376924f9ad38b62e0e263ff78a55 | 2,289 | py | Python | superset/db_engine_specs/firebolt.py | mis-esta/superset | bdc6a1d4e37fd47948752836a7f4f400283b436c | [
"Apache-2.0"
] | 3 | 2019-12-12T02:12:50.000Z | 2019-12-17T02:05:17.000Z | superset/db_engine_specs/firebolt.py | mis-esta/superset | bdc6a1d4e37fd47948752836a7f4f400283b436c | [
"Apache-2.0"
] | 87 | 2020-03-14T22:39:04.000Z | 2021-01-04T08:12:02.000Z | superset/db_engine_specs/firebolt.py | mis-esta/superset | bdc6a1d4e37fd47948752836a7f4f400283b436c | [
"Apache-2.0"
] | 3 | 2019-07-23T10:37:16.000Z | 2020-08-02T21:01:13.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Optional
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
class FireboltEngineSpec(BaseEngineSpec):
"""Engine spec for Firebolt"""
engine = "firebolt"
engine_name = "Firebolt"
default_driver = "firebolt"
_time_grain_expressions = {
None: "{col}",
"PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))",
"PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))",
"PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))",
"P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))",
"P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))",
"P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))",
"P3M": "date_trunc('quarter', CAST({col} AS TIMESTAMP))",
"P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST('{dttm.date().isoformat()}' AS DATE)"
if tt == utils.TemporalType.DATETIME:
return f"""CAST('{dttm.isoformat(timespec="seconds")}' AS DATETIME)"""
if tt == utils.TemporalType.TIMESTAMP:
return f"""CAST('{dttm.isoformat(timespec="seconds")}' AS TIMESTAMP)"""
return None
@classmethod
def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
| 40.157895 | 83 | 0.67322 |
acef2f7dbb3cb77c342336ee352980faea7ab755 | 3,815 | py | Python | examples/wiki_game.py | redfungus/webtraversallibrary | d5013edc061deba40e859dcfcda314c7055ce82a | [
"Apache-2.0"
] | null | null | null | examples/wiki_game.py | redfungus/webtraversallibrary | d5013edc061deba40e859dcfcda314c7055ce82a | [
"Apache-2.0"
] | null | null | null | examples/wiki_game.py | redfungus/webtraversallibrary | d5013edc061deba40e859dcfcda314c7055ce82a | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Click on any random clickable element on a page.
Also demonstrates the use of postload_callbacks.
"""
from random import choice
import webtraversallibrary as wtl
from webtraversallibrary.actions import Abort, Click
from .util import parse_cli_args
@wtl.single_tab_coroutine
def policy():
workflow, view = yield
# Store Page A's URL
page_a_url = workflow.current_window.driver.current_url
workflow, view = yield Click(
choice(view.snapshot.elements.by_subtree(wtl.Selector("div[id='bodyContent']")).by_selector(wtl.Selector("a")))
)
# Store Page B's URL
page_b_url = workflow.current_window.driver.current_url
description = ""
# Stores first paragraph from page B's body
try:
description = view.snapshot.elements.by_selector(wtl.Selector("div p:nth-of-type(1)"))[0].metadata["text"]
if description.empty:
raise IndexError()
except IndexError:
description = view.snapshot.elements.by_selector(wtl.Selector("div p:nth-of-type(2)"))[0].metadata["text"]
# Limit the description to 50 characters to improve search
description_subset = str(description[0:49])
# Navigate back to page A
workflow, view = yield wtl.actions.Navigate(page_a_url)
link_to_click = view.snapshot.elements.by_selector(wtl.Selector("input[type='submit']"))
# In the search bar in page A, fill text with description_subset and
# click search to get search results for the descriptions
workflow, view = yield [
wtl.actions.FillText(wtl.Selector("input[type='search']"), str(description_subset)),
Click(link_to_click[0]),
]
# Store search result's URL
search_url = workflow.current_window.driver.current_url
search_results = view.snapshot.elements.by_selector(wtl.Selector("div[class=mw-search-result-heading] a"))
i = 0
# Go to first link in the search result
try:
workflow, view = yield Click(search_results[i])
except IndexError:
print("Empty search results!!")
yield Abort()
# Check if landing URL equals PAGE B URL, if yes, break, else iterate and go to next link in the search result
# untill the URL's match
while True:
if workflow.current_window.driver.current_url == page_b_url:
print("Woohoo!!!")
break
try:
workflow, view = yield [wtl.actions.Navigate(search_url), Click(search_results[i + 1])]
i += 1
except IndexError:
print("Search result exhausted!!")
break
yield None
if __name__ == "__main__":
cli_args = parse_cli_args()
wf = wtl.Workflow(
config=wtl.Config(cli_args.config),
policy=policy,
url="https://en.wikipedia.org/wiki/Special:Random",
output=cli_args.output,
)
wf.classifiers.add(wtl.ActiveElementFilter(action=Click))
wf.classifiers.add(wtl.ElementClassifier(name="textfield", action=wtl.actions.FillText, highlight=True))
wf.run()
wf.quit()
| 32.606838 | 119 | 0.695675 |
acef2f9d8da8e64a1801ba318109716f5506feb9 | 14,013 | py | Python | python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py | suytingwan/Paddle | d4560871878eaec4f1f682af56fb364a5f596b18 | [
"Apache-2.0"
] | 1 | 2020-05-12T04:00:14.000Z | 2020-05-12T04:00:14.000Z | python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py | suytingwan/Paddle | d4560871878eaec4f1f682af56fb364a5f596b18 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/dygraph_to_static/test_se_resnet.py | suytingwan/Paddle | d4560871878eaec4f1f682af56fb364a5f596b18 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import time
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import BatchNorm, Conv2D, Linear, Pool2D
from paddle.fluid.dygraph import declarative
from paddle.fluid.dygraph import ProgramTranslator
SEED = 2020
np.random.seed(SEED)
BATCH_SIZE = 8
EPOCH_NUM = 1
PRINT_STEP = 2
STEP_NUM = 10
place = fluid.CPUPlace()
# TODO(liym27): Diff exists between dygraph and static graph on CUDA place.
# place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
train_parameters = {
"learning_strategy": {
"name": "cosine_decay",
"batch_size": BATCH_SIZE,
"epochs": [40, 80, 100],
"steps": [0.1, 0.01, 0.001, 0.0001]
},
"lr": 0.0125,
"total_images": 6149,
"momentum_rate": 0.9,
"l2_decay": 1.2e-4,
"num_epochs": 1,
}
def optimizer_setting(params, parameter_list):
ls = params["learning_strategy"]
if "total_images" not in params:
total_images = 6149
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
l2_decay = params["l2_decay"]
momentum_rate = params["momentum_rate"]
step = int(math.ceil(float(total_images) / batch_size))
bd = [step * e for e in ls["epochs"]]
lr = params["lr"]
num_epochs = params["num_epochs"]
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.cosine_decay(
learning_rate=lr, step_each_epoch=step, epochs=num_epochs),
momentum=momentum_rate,
regularization=fluid.regularizer.L2Decay(l2_decay),
parameter_list=parameter_list)
return optimizer
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False)
self._batch_norm = BatchNorm(num_filters, act=act)
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class SqueezeExcitation(fluid.dygraph.Layer):
def __init__(self, num_channels, reduction_ratio):
super(SqueezeExcitation, self).__init__()
self._num_channels = num_channels
self._pool = Pool2D(pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(num_channels * 1.0)
self._fc = Linear(
num_channels,
num_channels // reduction_ratio,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)),
act='relu')
stdv = 1.0 / math.sqrt(num_channels / 16.0 * 1.0)
self._excitation = Linear(
num_channels // reduction_ratio,
num_channels,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)),
act='sigmoid')
def forward(self, input):
y = self._pool(input)
y = fluid.layers.reshape(y, shape=[-1, self._num_channels])
y = self._fc(y)
y = self._excitation(y)
y = fluid.layers.elementwise_mul(x=input, y=y, axis=0)
return y
class BottleneckBlock(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
cardinality,
reduction_ratio,
shortcut=True):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act="relu")
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act="relu")
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 2,
filter_size=1,
act=None)
self.scale = SqueezeExcitation(
num_channels=num_filters * 2, reduction_ratio=reduction_ratio)
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 2,
filter_size=1,
stride=stride)
self.shortcut = shortcut
self._num_channels_out = num_filters * 2
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
scale = self.scale(conv2)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale, act='relu')
return y
class SeResNeXt(fluid.dygraph.Layer):
def __init__(self, layers=50, class_dim=102):
super(SeResNeXt, self).__init__()
self.layers = layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
self.conv0 = ConvBNLayer(
num_channels=3,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
self.pool = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
elif layers == 101:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 23, 3]
num_filters = [128, 256, 512, 1024]
self.conv0 = ConvBNLayer(
num_channels=3,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
self.pool = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
elif layers == 152:
cardinality = 64
reduction_ratio = 16
depth = [3, 8, 36, 3]
num_filters = [128, 256, 512, 1024]
self.conv0 = ConvBNLayer(
num_channels=3,
num_filters=64,
filter_size=3,
stride=2,
act='relu')
self.conv1 = ConvBNLayer(
num_channels=64,
num_filters=64,
filter_size=3,
stride=1,
act='relu')
self.conv2 = ConvBNLayer(
num_channels=64,
num_filters=128,
filter_size=3,
stride=1,
act='relu')
self.pool = Pool2D(
pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
self.bottleneck_block_list = []
num_channels = 64
if layers == 152:
num_channels = 128
for block in range(len(depth)):
shortcut = False
for i in range(depth[block]):
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio,
shortcut=shortcut))
num_channels = bottleneck_block._num_channels_out
self.bottleneck_block_list.append(bottleneck_block)
shortcut = True
self.pool2d_avg = Pool2D(
pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(2048 * 1.0)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 2 * 1 * 1
self.out = Linear(
self.pool2d_avg_output,
class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
@declarative
def forward(self, inputs, label):
if self.layers == 50 or self.layers == 101:
y = self.conv0(inputs)
y = self.pool(y)
elif self.layers == 152:
y = self.conv0(inputs)
y = self.conv1(y)
y = self.conv2(y)
y = self.pool(y)
for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y)
y = self.pool2d_avg(y)
y = fluid.layers.dropout(y, dropout_prob=0.5, seed=100)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output])
out = self.out(y)
softmax_out = fluid.layers.softmax(out, use_cudnn=False)
loss = fluid.layers.cross_entropy(input=softmax_out, label=label)
avg_loss = fluid.layers.mean(x=loss)
acc_top1 = fluid.layers.accuracy(input=softmax_out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=softmax_out, label=label, k=5)
return out, avg_loss, acc_top1, acc_top5
def train(train_reader, to_static):
program_translator = ProgramTranslator()
program_translator.enable(to_static)
np.random.seed(SEED)
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
se_resnext = SeResNeXt()
optimizer = optimizer_setting(train_parameters, se_resnext.parameters())
for epoch_id in range(EPOCH_NUM):
total_loss = 0.0
total_acc1 = 0.0
total_acc5 = 0.0
total_sample = 0
step_idx = 0
speed_list = []
for step_id, data in enumerate(train_reader()):
dy_x_data = np.array([x[0].reshape(3, 224, 224)
for x in data]).astype('float32')
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
BATCH_SIZE, 1)
img = to_variable(dy_x_data)
label = to_variable(y_data)
label.stop_gradient = True
pred, avg_loss, acc_top1, acc_top5 = se_resnext(img, label)
dy_out = avg_loss.numpy()
avg_loss.backward()
optimizer.minimize(avg_loss)
se_resnext.clear_gradients()
lr = optimizer._global_learning_rate().numpy()
total_loss += dy_out
total_acc1 += acc_top1.numpy()
total_acc5 += acc_top5.numpy()
total_sample += 1
if step_id % PRINT_STEP == 0:
if step_id == 0:
logging.info( "epoch %d | step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f" % \
( epoch_id, step_id, total_loss / total_sample, \
total_acc1 / total_sample, total_acc5 / total_sample))
avg_batch_time = time.time()
else:
speed = PRINT_STEP / (time.time() - avg_batch_time)
speed_list.append(speed)
logging.info( "epoch %d | step %d, loss %0.3f, acc1 %0.3f, acc5 %0.3f, speed %.3f steps/s" % \
( epoch_id, step_id, total_loss / total_sample, \
total_acc1 / total_sample, total_acc5 / total_sample, speed))
avg_batch_time = time.time()
step_idx += 1
if step_idx == STEP_NUM:
break
return pred.numpy(), avg_loss.numpy(), acc_top1.numpy(), acc_top5.numpy(
)
class TestSeResnet(unittest.TestCase):
def setUp(self):
self.train_reader = paddle.batch(
paddle.dataset.flowers.train(
use_xmap=False, cycle=True),
batch_size=BATCH_SIZE,
drop_last=True)
def test_check_result(self):
pred_1, loss_1, acc1_1, acc5_1 = train(
self.train_reader, to_static=False)
pred_2, loss_2, acc1_2, acc5_2 = train(
self.train_reader, to_static=True)
self.assertTrue(
np.allclose(pred_1, pred_2),
msg="static pred: {} \ndygraph pred: {}".format(pred_1, pred_2))
self.assertTrue(
np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(loss_1, loss_2))
self.assertTrue(
np.allclose(acc1_1, acc1_2),
msg="static acc1: {} \ndygraph acc1: {}".format(acc1_1, acc1_2))
self.assertTrue(
np.allclose(acc5_1, acc5_2),
msg="static acc5: {} \ndygraph acc5: {}".format(acc5_1, acc5_2))
if __name__ == '__main__':
unittest.main()
| 34.094891 | 118 | 0.558838 |
acef300d26e6d11f8e5f5e8f1e00c6f9d0d06e80 | 3,253 | py | Python | demo_api/rumour_detection_twitter/model_api.py | jonheng/sgnlp | aeee85b78de2e449ca1dc6b18686a060cb938d07 | [
"MIT"
] | null | null | null | demo_api/rumour_detection_twitter/model_api.py | jonheng/sgnlp | aeee85b78de2e449ca1dc6b18686a060cb938d07 | [
"MIT"
] | null | null | null | demo_api/rumour_detection_twitter/model_api.py | jonheng/sgnlp | aeee85b78de2e449ca1dc6b18686a060cb938d07 | [
"MIT"
] | null | null | null | import pathlib
import json
from flask import Flask, jsonify, request
from sgnlp.models.rumour_detection_twitter import (
RumourDetectionTwitterConfig,
RumourDetectionTwitterModel,
RumourDetectionTwitterTokenizer,
download_tokenizer_files_from_azure,
)
import torch
from torch.nn.functional import softmax
from create_inputs import generate_structure
from download_pretrained import download_tokenizer_files_from_azure
app = Flask(__name__)
config = RumourDetectionTwitterConfig.from_pretrained(
"https://sgnlp.blob.core.windows.net/models/rumour_detection_twitter/config.json"
)
model = RumourDetectionTwitterModel.from_pretrained(
"https://sgnlp.blob.core.windows.net/models/rumour_detection_twitter/pytorch_model.bin",
config=config,
)
download_tokenizer_files_from_azure(
"https://sgnlp.blob.core.windows.net/models/rumour_detection_twitter/",
"rumour_tokenizer",
)
tokenizer = RumourDetectionTwitterTokenizer.from_pretrained("rumour_tokenizer")
id_to_string = {
0: "a false rumour",
1: "a true rumour",
2: "an unverified rumour",
3: "a non-rumour",
}
@app.route("/model-card", methods=["GET"])
def get_model_card():
"""GET method for model card
Returns:
json: return the model card in json format
"""
model_card_path = pathlib.Path(__file__).parent / "model_card/rumour.json"
with open(model_card_path) as f:
model_card = json.load(f)
return jsonify(**model_card)
@app.route("/predict", methods=["POST"])
def predict():
"""POST method to run inference against rumour detection model.
Returns:
JSON: return the probability distribution across the possible classes.
"""
# Generate the inputs in the correct formats
tweet_lst = request.get_json()["tweets"]
thread_len = len(tweet_lst)
token_ids, token_attention_mask = tokenizer.tokenize_threads(
[tweet_lst],
max_length=config.max_length,
max_posts=config.max_tweets,
truncation=True,
padding="max_length",
)
time_delay_ids, structure_ids, post_attention_mask = generate_structure(
thread_len=thread_len, max_posts=config.max_tweets
)
token_ids = torch.LongTensor(token_ids)
token_attention_mask = torch.Tensor(token_attention_mask)
time_delay_ids = torch.LongTensor(time_delay_ids)
post_attention_mask = torch.Tensor(post_attention_mask)
structure_ids = torch.LongTensor(structure_ids)
# Get the raw logits of predictions. Note that the model assumes the input exists as a batch. The returned outputs will be for a batch too.
logits = model(
token_ids=token_ids,
time_delay_ids=time_delay_ids,
structure_ids=structure_ids,
token_attention_mask=token_attention_mask,
post_attention_mask=post_attention_mask,
).logits
# Convert the outputs into the format the frontend accepts
probabilities = softmax(logits, dim=1)
predicted_y = torch.argmax(logits, dim=1)[0]
predicted_y = id_to_string[int(predicted_y)]
predicted_prob = round(float(torch.max(probabilities)), 1)
return jsonify({"predicted_y": predicted_y, "predicted_prob": predicted_prob})
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 31.892157 | 143 | 0.737166 |
acef3105984f4afac6c533616a3abc0a53af2d6a | 1,046 | py | Python | tests/cell_fabric/test_lef_parser.py | mabrains/ALIGN-public | 9a6c14310de13df369a8340f465911b629f15a3f | [
"BSD-3-Clause"
] | 119 | 2019-05-14T18:44:34.000Z | 2022-03-17T01:01:02.000Z | tests/cell_fabric/test_lef_parser.py | mabrains/ALIGN-public | 9a6c14310de13df369a8340f465911b629f15a3f | [
"BSD-3-Clause"
] | 717 | 2019-04-03T15:36:35.000Z | 2022-03-31T21:56:47.000Z | tests/cell_fabric/test_lef_parser.py | mabrains/ALIGN-public | 9a6c14310de13df369a8340f465911b629f15a3f | [
"BSD-3-Clause"
] | 34 | 2019-04-01T21:21:27.000Z | 2022-03-21T09:46:57.000Z |
from align.cell_fabric import lef_parser
from align.cell_fabric import lef_to_json
import pathlib
cd = pathlib.Path(__file__).resolve().parent
def test_flash_adc_lefparser():
with open( cd / "__lef_flash_adc_3bit", "rt") as fp:
txt = fp.read()
p = lef_parser.LEFParser()
p.parse(txt)
tbl = {}
for macro in p.macros:
assert macro.macroName not in tbl
tbl[macro.macroName] = macro
assert 'Res_4200' in tbl
assert 'Res_8000' in tbl
assert 'CMC_PMOS_S_n12_X1_Y1' in tbl
assert len(tbl) == 12
def test_capacitor_lefparser():
with open( cd / "__lef_capacitor", "rt") as fp:
txt = fp.read()
p = lef_parser.LEFParser()
p.parse(txt)
tbl = {}
for macro in p.macros:
assert macro.macroName not in tbl
tbl[macro.macroName] = macro
assert 'cap_12f' in tbl
assert len(tbl) == 1
def test_capacitor_lef_to_json():
lef_to_json.lef_to_json( cd / "__lef_capacitor")
| 24.904762 | 56 | 0.610899 |
acef31267d518830c245bbce43d7392821e75e31 | 3,424 | py | Python | src/annalist_root/annalist/tests/test_render_file_upload.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 18 | 2015-02-20T23:09:13.000Z | 2020-11-13T06:06:43.000Z | src/annalist_root/annalist/tests/test_render_file_upload.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 30 | 2015-01-03T09:56:28.000Z | 2021-06-10T20:58:55.000Z | src/annalist_root/annalist/tests/test_render_file_upload.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 5 | 2015-02-02T09:01:23.000Z | 2018-06-14T20:05:28.000Z | """
Tests for file upload field; view as hyperlink.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import unittest
import re
from annalist.views.fields.render_file_upload import (
get_file_upload_renderer,
FileUploadValueMapper
)
from .field_rendering_support import FieldRendererTestSupport
class FileUploadRenderingTest(FieldRendererTestSupport):
def setUp(self):
return
def tearDown(self):
return
# Rendering test
def test_RenderFileUploadValue(self):
def expect_render(filename, labeltext):
# render_view = '''<a href="%s" target="_blank">%s</a>'''%(filename, labeltext)
# render_edit = (
# '''<input type="file" name="repeat_prefix_test_field" '''+
# '''placeholder="(test placeholder)" '''+
# '''value="%s" /> '''
# )%filename
render_view = (
'''Uploaded file <a href="%s" target="_blank">%s</a>'''
)%(filename, labeltext)
render_edit = (
'''<input type="file" name="repeat_prefix_test_field" '''+
'''placeholder="(test placeholder)" '''+
'''value="%s" /> '''+
'''Previously uploaded: %s '''
)%(filename, labeltext)
return {'view': render_view, 'edit': render_edit}
def upload_file_value(file):
return {'resource_name': file, 'uploaded_file': "uploaded.ext"}
test_values = (
[ (upload_file_value("testfile.ext"), "uploaded.ext")
])
test_value_context_renders = (
[ (self._make_test_context(filename), expect_render(filename['resource_name'], labeltext))
for filename, labeltext in test_values
])
renderer = get_file_upload_renderer()
for render_context, expect_render in test_value_context_renders:
# print repr(render_context['field']['field_value'])
self._check_value_renderer_results(
renderer,
context=render_context,
expect_rendered_view=expect_render['view'],
expect_rendered_edit=expect_render['edit'],
collapse_whitespace=True
)
return
def test_DecodeFileUploadValue(self):
test_decode_values = (
{ None: ""
, "testfile.ext": "testfile.ext"
})
for valtext, expect_valdata in test_decode_values.items():
valdata = FileUploadValueMapper.decode(valtext)
self.assertEqual(
valdata, expect_valdata,
"Value decode(%s) = %r, expect %r"%(valtext, valdata, expect_valdata)
)
return
# End.
if __name__ == "__main__":
# import django
# django.setup() # Needed for template loader
# Runtests in this module
# runner = unittest.TextTestRunner(verbosity=2)
# tests = unittest.TestSuite()
# tests = getSuite(select=sel)
# if tests: runner.run(tests)
unittest.main()
| 32.923077 | 103 | 0.58236 |
acef31440e1bd9db569f2447d16b3ccca6bc49f8 | 5,114 | py | Python | torch_autoencoder.py | stas00/cs224u | 0db2923dd55905bb9c88cec0a2cad5e0d411c9f1 | [
"Apache-2.0"
] | null | null | null | torch_autoencoder.py | stas00/cs224u | 0db2923dd55905bb9c88cec0a2cad5e0d411c9f1 | [
"Apache-2.0"
] | 1 | 2020-02-23T05:11:20.000Z | 2020-02-23T05:11:20.000Z | torch_autoencoder.py | stas00/cs224u | 0db2923dd55905bb9c88cec0a2cad5e0d411c9f1 | [
"Apache-2.0"
] | 1 | 2021-01-04T19:22:27.000Z | 2021-01-04T19:22:27.000Z | import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data
from torch_model_base import TorchModelBase
from utils import progress_bar
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2020"
class TorchAutoencoder(TorchModelBase):
"""A simple autoencoder. The graph and parameters are identical
to those of the `TorchShallowNeuralClassifier`. The changes are that
the outputs are identical to the inputs, and we use a squared-error
loss function.
Parameters
----------
hidden_dim : int
Dimensionality of the hidden layer.
hidden_activation : vectorized activation function
The non-linear activation function used by the network for the
hidden layer. Default `nn.Tanh()`.
max_iter : int
Maximum number of training epochs.
eta : float
Learning rate.
optimizer : PyTorch optimizer
Default is `torch.optim.Adam`.
l2_strength : float
L2 regularization strength. Default 0 is no regularization.
device : 'cpu' or 'cuda'
The default is to use 'cuda' iff available
warm_start : bool
If True, calling `fit` will resume training with previously
defined trainable parameters. If False, calling `fit` will
reinitialize all trainable parameters. Default: False.
"""
def __init__(self, **kwargs):
super(TorchAutoencoder, self).__init__(**kwargs)
def define_graph(self):
return nn.Sequential(
nn.Linear(self.input_dim_, self.hidden_dim),
self.hidden_activation,
nn.Linear(self.hidden_dim, self.output_dim_))
def fit(self, X):
"""Returns the matrix of hidden representations.
Parameters
----------
X : np.array or pd.DataFrame
Returns
-------
np.array or pd.DataFrame (depending on the nature of the input)
This will have shape `(len(X), self.hidden_dim)`.
"""
# Data prep:
self.input_dim_ = X.shape[1]
self.output_dim_ = X.shape[1]
# Dataset:
X_tensor = self.convert_input_to_tensor(X)
dataset = torch.utils.data.TensorDataset(X_tensor, X_tensor)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=self.batch_size, shuffle=True,
pin_memory=True)
# Graph
if not self.warm_start or not hasattr(self, "model"):
self.model = self.define_graph()
self.model.to(self.device)
self.model.train()
# Optimization:
loss = nn.MSELoss()
optimizer = self.optimizer(self.model.parameters(), lr=self.eta)
# Train:
for iteration in range(1, self.max_iter+1):
epoch_error = 0.0
for X_batch, y_batch in dataloader:
X_batch = X_batch.to(self.device, non_blocking=True)
y_batch = y_batch.to(self.device, non_blocking=True)
batch_preds = self.model(X_batch)
err = loss(batch_preds, y_batch)
epoch_error += err.item()
optimizer.zero_grad()
err.backward()
optimizer.step()
self.errors.append(epoch_error)
progress_bar(
"Finished epoch {} of {}; error is {}".format(
iteration, self.max_iter, err))
# Hidden representations:
with torch.no_grad():
self.model.to('cpu')
H = self.model[1](self.model[0](X_tensor))
return self.convert_output(H, X)
def predict(self, X):
"""Returns the reconstructed matrix.
Parameters
----------
X : np.array or pd.DataFrame
Returns
-------
np.array or pd.DataFrame (depending on the nature of the input)
This will have the same shape as `X`.
"""
self.model.eval()
with torch.no_grad():
X_tensor = self.convert_input_to_tensor(X)
self.model.to('cpu')
X_pred = self.model(X_tensor)
return self.convert_output(X_pred, X)
def convert_input_to_tensor(self, X):
if isinstance(X, pd.DataFrame):
X = X.values
X = torch.tensor(X, dtype=torch.float)
return X
@staticmethod
def convert_output(X_pred, X):
X_pred = X_pred.cpu().numpy()
if isinstance(X, pd.DataFrame):
X_pred = pd.DataFrame(X_pred, index=X.index)
return X_pred
def simple_example():
import numpy as np
np.random.seed(seed=42)
def randmatrix(m, n, sigma=0.1, mu=0):
return sigma * np.random.randn(m, n) + mu
rank = 20
nrow = 1000
ncol = 100
X = randmatrix(nrow, rank).dot(randmatrix(rank, ncol))
ae = TorchAutoencoder(hidden_dim=rank, max_iter=200)
H = ae.fit(X)
X_pred = ae.predict(X)
mse = (0.5*(X_pred - X)**2).mean()
print("\nMSE between actual and reconstructed: {0:0.06f}".format(mse))
print("Hidden representations")
print(H)
return mse
if __name__ == '__main__':
simple_example()
| 31.763975 | 74 | 0.604419 |
acef3202d18f92d7af1433ea99148d660d5d083e | 32,554 | py | Python | diofant/tests/polys/test_factortools.py | diofant/diofant | 0677d240eb5de697f851c6c844fefc8039754edc | [
"BSD-3-Clause"
] | 57 | 2016-09-13T23:16:26.000Z | 2022-03-29T06:45:51.000Z | diofant/tests/polys/test_factortools.py | diofant/diofant | 0677d240eb5de697f851c6c844fefc8039754edc | [
"BSD-3-Clause"
] | 402 | 2016-05-11T11:11:47.000Z | 2022-03-31T14:27:02.000Z | diofant/tests/polys/test_factortools.py | diofant/diofant | 0677d240eb5de697f851c6c844fefc8039754edc | [
"BSD-3-Clause"
] | 20 | 2016-05-11T08:17:37.000Z | 2021-09-10T09:15:51.000Z | """Tests for polynomial factorization routines."""
import functools
import operator
import pytest
from diofant import (EX, FF, QQ, RR, ZZ, DomainError, ExtraneousFactors, I,
nextprime, pi, ring, sin, sqrt)
from diofant.config import using
from diofant.polys.specialpolys import f_polys, w_polys
__all__ = ()
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
w_1, w_2 = w_polys()
def test__trial_division():
R, x = ring('x', ZZ)
assert R._trial_division(x**5 + 8*x**4 + 25*x**3 + 38*x**2 + 28*x +
8, (x + 1, x + 2)) == [(x + 1, 2), (x + 2, 3)]
R, x, _ = ring('x y', ZZ)
assert R._trial_division(x**5 + 8*x**4 + 25*x**3 + 38*x**2 + 28*x +
8, (x + 1, x + 2)) == [(x + 1, 2), (x + 2, 3)]
def test__zz_mignotte_bound():
R, x = ring('x', ZZ)
assert R._zz_mignotte_bound(2*x**2 + 3*x + 4) == 32
R, x, _ = ring('x y', ZZ)
assert R._zz_mignotte_bound(2*x**2 + 3*x + 4) == 32
def test__zz_hensel_step():
R, x = ring('x', ZZ)
f = x**4 - 1
g = x**3 + 2*x**2 - x - 2
h = x - 2
s = R(-2)
t = 2*x**2 - 2*x - 1
G, H, S, T = R._zz_hensel_step(5, f, g, h, s, t)
assert G == x**3 + 7*x**2 - x - 7
assert H == x - 7
assert S == 8
assert T == -8*x**2 - 12*x - 1
def test__zz_hensel_lift():
R, x = ring('x', ZZ)
f = x**4 - 1
F = [x - 1, x - 2, x + 2, x + 1]
assert R._zz_hensel_lift(ZZ(5), f, F, 4) == [x - 1, x - 182,
x + 182, x + 1]
def test__cyclotomic_p():
R, x = ring('x', ZZ)
assert (x - 1).is_cyclotomic is True
assert (x + 1).is_cyclotomic is True
assert (x**2 + x + 1).is_cyclotomic is True
f = x**2 + 1
assert f.is_cyclotomic is True
assert R._cyclotomic_p(f, irreducible=True) is True
assert (x**4 + x**3 + x**2 + x + 1).is_cyclotomic is True
assert (x**2 - x + 1).is_cyclotomic is True
assert (x**6 + x**5 + x**4 + x**3 + x**2 + x + 1).is_cyclotomic is True
assert (x**4 + 1).is_cyclotomic is True
assert (x**6 + x**3 + 1).is_cyclotomic is True
assert R(0).is_cyclotomic is False
assert R(1).is_cyclotomic is False
assert x.is_cyclotomic is False
assert (x + 2).is_cyclotomic is False
assert (3*x + 1).is_cyclotomic is False
assert (x**2 - 1).is_cyclotomic is False
f = x**16 + x**14 - x**10 - x**6 + x**2 + 1
assert (f + x**8).is_cyclotomic is False
assert (f - x**8).is_cyclotomic is True
R, x = ring('x', QQ)
assert (x**2 + x + 1).is_cyclotomic is True
assert (x**2/2 + x + 1).is_cyclotomic is False
R, x = ring('x', ZZ.inject('y'))
assert (x**2 + x + 1).is_cyclotomic is False
def test__zz_cyclotomic_poly():
R, x = ring('x', ZZ)
assert R._zz_cyclotomic_poly(1) == x - 1
assert R._zz_cyclotomic_poly(2) == x + 1
assert R._zz_cyclotomic_poly(3) == x**2 + x + 1
assert R._zz_cyclotomic_poly(4) == x**2 + 1
assert R._zz_cyclotomic_poly(5) == x**4 + x**3 + x**2 + x + 1
assert R._zz_cyclotomic_poly(6) == x**2 - x + 1
assert R._zz_cyclotomic_poly(7) == (x**6 + x**5 + x**4 + x**3 +
x**2 + x + 1)
assert R._zz_cyclotomic_poly(8) == x**4 + 1
assert R._zz_cyclotomic_poly(9) == x**6 + x**3 + 1
def test__zz_cyclotomic_factor():
R, x = ring('x', ZZ)
assert R._zz_cyclotomic_factor(R(0)) is None
assert R._zz_cyclotomic_factor(R(1)) is None
assert R._zz_cyclotomic_factor(2*x**10 - 1) is None
assert R._zz_cyclotomic_factor(x**10 - 3) is None
assert R._zz_cyclotomic_factor(x**10 + x**5 - 1) is None
assert R._zz_cyclotomic_factor(x + 1) == [x + 1]
assert R._zz_cyclotomic_factor(x - 1) == [x - 1]
assert R._zz_cyclotomic_factor(x**2 + 1) == [x**2 + 1]
assert R._zz_cyclotomic_factor(x**2 - 1) == [x - 1, x + 1]
assert R._zz_cyclotomic_factor(x**27 + 1) == [x + 1, x**2 - x + 1,
x**6 - x**3 + 1,
x**18 - x**9 + 1]
assert R._zz_cyclotomic_factor(x**27 - 1) == [x - 1, x**2 + x + 1,
x**6 + x**3 + 1,
x**18 + x**9 + 1]
def test_dup_zz_factor():
R, x = ring('x', ZZ)
assert R(0).factor_list() == (0, [])
assert R(7).factor_list() == (7, [])
assert R(-7).factor_list() == (-7, [])
assert R._zz_factor_sqf(R(0)) == (0, [])
assert R._zz_factor_sqf(R(7)) == (7, [])
assert R._zz_factor_sqf(R(-7)) == (-7, [])
assert (2*x + 4).factor_list() == (2, [(x + 2, 1)])
assert R._zz_factor_sqf(2*x + 4) == (2, [x + 2])
f = x**4 + x + 1
for _ in range(20):
assert f.factor_list() == (1, [(f, 1)])
f = x**5 - x**3 - x**2 + 1
assert f.factor_list() == (1, [(x + 1, 1), (x - 1, 2), (x**2 + x + 1, 1)])
for test in (True, False):
with using(use_irreducible_in_factor=test):
assert (x**2 + 2*x + 2).factor_list() == (1, [(x**2 + 2*x + 2, 1)])
assert (18*x**2 + 12*x + 2).factor_list() == (2, [(3*x + 1, 2)])
f = -9*x**2 + 1
assert R._zz_factor_sqf(f) == (-1, [3*x - 1, 3*x + 1])
assert f.factor_list() == (-1, [(3*x - 1, 1), (3*x + 1, 1)])
assert R._zz_factor_sqf(3*x**4 + 2*x**3 + 6*x**2 +
8*x + 10) == (1, [3*x**4 + 2*x**3 +
6*x**2 + 8*x + 10])
with using(use_cyclotomic_factor=False):
assert R._zz_factor_sqf(-9*x**2 + 1) == (-1, [3*x - 1, 3*x + 1])
assert (x**3 - 6*x**2 + 11*x - 6).factor_list() == (1, [(x - 3, 1),
(x - 2, 1),
(x - 1, 1)])
assert R._zz_factor_sqf(x**3 - 6*x**2 + 11*x - 6) == (1, [x - 3, x - 2,
x - 1])
assert (3*x**3 + 10*x**2 + 13*x +
10).factor_list() == (1, [(x + 2, 1), (3*x**2 + 4*x + 5, 1)])
assert R._zz_factor_sqf(3*x**3 + 10*x**2 +
13*x + 10) == (1, [x + 2, 3*x**2 + 4*x + 5])
assert (-x**6 + x**2).factor_list() == (-1, [(x, 2), (x - 1, 1), (x + 1, 1),
(x**2 + 1, 1)])
f = (1080*x**8 + 5184*x**7 + 2099*x**6 + 744*x**5 + 2736*x**4 -
648*x**3 + 129*x**2 - 324)
assert f.factor_list() == (1, [(216*x**4 + 31*x**2 - 27, 1),
(5*x**4 + 24*x**3 + 9*x**2 + 12, 1)])
f = (-29802322387695312500000000000000000000*x**25 +
2980232238769531250000000000000000*x**20 +
1743435859680175781250000000000*x**15 +
114142894744873046875000000*x**10 - 210106372833251953125*x**5 +
+ 95367431640625)
assert (f.factor_list() ==
(-95367431640625,
[(5*x - 1, 1), (100*x**2 + 10*x - 1, 2),
(625*x**4 + 125*x**3 + 25*x**2 + 5*x + 1, 1),
(10000*x**4 - 3000*x**3 + 400*x**2 - 20*x + 1, 2),
(10000*x**4 + 2000*x**3 + 400*x**2 + 30*x + 1, 2)]))
f = x**10 - 1
for test in (True, False):
with using(use_cyclotomic_factor=test):
f = x**10 - 1
assert f.factor_list() == (1, [(x - 1, 1), (x + 1, 1),
(x**4 - x**3 + x**2 - x + 1, 1),
(x**4 + x**3 + x**2 + x + 1, 1)])
f = x**10 + 1
assert f.factor_list() == (1, [(x**2 + 1, 1),
(x**8 - x**6 + x**4 - x**2 + 1, 1)])
def test__zz_wang():
R, x, y, z = ring('x y z', ZZ)
UV, _x = ring('x', ZZ)
p = ZZ(nextprime(R._zz_mignotte_bound(w_1)))
assert p == 6291469
t_1, k_1, e_1 = y, 1, ZZ(-14)
t_2, k_2, e_2 = z, 2, ZZ(3)
t_3, k_3, e_3 = y + z, 2, ZZ(-11)
t_4, k_4, e_4 = y - z, 1, ZZ(-17)
T = [t_1, t_2, t_3, t_4]
K = [k_1, k_2, k_3, k_4]
E = [e_1, e_2, e_3, e_4]
T = list(zip([t.drop(x) for t in T], K))
A = [ZZ(-14), ZZ(3)]
S = w_1.eval([(y, A[0]), (z, A[1])])
cs, s = S.primitive()
assert cs == 1 and s == S == (1036728*_x**6 + 915552*_x**5 + 55748*_x**4 +
105621*_x**3 - 17304*_x**2 - 26841*_x - 644)
assert R._zz_wang_non_divisors(E, cs, ZZ(4)) == [7, 3, 11, 17]
assert s.is_squarefree and s.degree() == w_1.degree()
_, H = UV._zz_factor_sqf(s)
h_1 = 187*_x**2 - 23
h_2 = 44*_x**2 + 42*_x + 1
h_3 = 126*_x**2 - 9*_x + 28
LC = [lc.drop(x) for lc in [y**2 - z**2, -4*y - 4*z, -y*z**2]]
factors = R._zz_wang_hensel_lifting(w_1, H, LC, A, p)
assert H == [h_1, h_2, h_3]
assert R._zz_wang_lead_coeffs(w_1, T, cs, E, H, A) == (w_1, H, LC)
assert functools.reduce(operator.mul, factors) == w_1
# coverage tests
f = x**6 + 5*x**4*y - 5*x**2*y**2 - y**3
assert R._zz_wang(f, mod=4, seed=1) == [x**2 - y, x**4 + 6*x**2*y + y**2]
# This tests a bug in the Wang algorithm that occured only with a very
# specific set of random numbers; issue sympy/sympy#6355.
random_sequence = [-1, -1, 0, 0, 0, 0, -1, -1, 0, -1, 3, -1, 3, 3, 3,
3, -1, 3]
R, x, y, z = ring('x y z', ZZ)
f = 2*x**2 + y*z - y - z**2 + z
assert R._zz_wang(f, seed=random_sequence) == [f]
with using(eez_restart_if_needed=False):
pytest.raises(ExtraneousFactors,
lambda: R._zz_wang(f, seed=random_sequence))
def test__zz_diophantine():
R, x, y = ring('x y', ZZ)
H_1 = [44*x**2 + 42*x + 1, 126*x**2 - 9*x + 28, 187*x**2 - 23]
H_2 = [-4*x**2*y - 12*x**2 - 3*x*y + 1, -9*x**2*y - 9*x - 2*y,
x**2*y**2 - 9*x**2 + y - 9]
H_3 = [-4*x**2*y - 12*x**2 - 3*x*y + 1, -9*x**2*y - 9*x - 2*y,
x**2*y**2 - 9*x**2 + y - 9]
c_1 = -70686*x**5 - 5863*x**4 - 17826*x**3 + 2009*x**2 + 5031*x + 74
c_2 = (9*x**5*y**4 + 12*x**5*y**3 - 45*x**5*y**2 - 108*x**5*y -
324*x**5 + 18*x**4*y**3 - 216*x**4*y**2 - 810*x**4*y +
2*x**3*y**4 + 9*x**3*y**3 - 252*x**3*y**2 - 288*x**3*y -
945*x**3 - 30*x**2*y**2 - 414*x**2*y + 2*x*y**3 -
54*x*y**2 - 3*x*y + 81*x + 12*y)
c_3 = (-36*x**4*y**2 - 108*x**4*y - 27*x**3*y**2 - 36*x**3*y -
108*x**3 - 8*x**2*y**2 - 42*x**2*y - 6*x*y**2 + 9*x + 2*y)
p = 6291469
assert R._zz_diophantine(H_1, c_1, [ZZ(0)], 5, p) == [-3*x, -2, 1]
assert R._zz_diophantine(H_2, c_2, [ZZ(-14)], 5, p) == [-x*y, -3*x, -6]
assert R._zz_diophantine(H_3, c_3, [ZZ(-14)], 5, p) == [0, 0, -1]
R, x, y, z = ring('x y z', ZZ)
F = [47*x*y + 9*z**3 - 9, 45*x**3 - 9*y**3 - y**2 + 3*z**3 - 6*z]
c = (-270*x**3*z**3 + 270*x**3 + 94*x*y*z + 54*y**3*z**3 - 54*y**3 +
6*y**2*z**3 - 6*y**2 - 18*z**6 + 54*z**4 + 18*z**3 - 54*z)
p = 2345258188817
assert R._zz_diophantine(F, c, [ZZ(-2), ZZ(0)], 6,
p) == [-6*z**3 + 6, 2*z]
def test_dmp_zz_factor():
R, x = ring('x', ZZ)
assert R(0).factor_list() == (0, [])
assert R(7).factor_list() == (7, [])
assert R(-7).factor_list() == (-7, [])
assert (x**2 - 9).factor_list() == (1, [(x - 3, 1), (x + 3, 1)])
R, x, y = ring('x y', ZZ)
assert R(0).factor_list() == (0, [])
assert R(7).factor_list() == (7, [])
assert R(-7).factor_list() == (-7, [])
assert x.factor_list() == (1, [(x, 1)])
assert (4*x).factor_list() == (4, [(x, 1)])
assert (4*x + 2).factor_list() == (2, [(2*x + 1, 1)])
assert (x*y + 1).factor_list() == (1, [(x*y + 1, 1)])
assert (y**2 + 1).factor_list() == (1, [(y**2 + 1, 1)])
assert (y**2 - 1).factor_list() == (1, [(y - 1, 1), (y + 1, 1)])
assert (x**2*y**2 + 6*x**2*y +
9*x**2 - 1).factor_list() == (1, [(x*y + 3*x - 1, 1),
(x*y + 3*x + 1, 1)])
assert (x**2*y**2 - 9).factor_list() == (1, [(x*y - 3, 1), (x*y + 3, 1)])
f = (-12*x**16*y + 240*x**12*y**3 - 768*x**10*y**4 + 1080*x**8*y**5 -
768*x**6*y**6 + 240*x**4*y**7 - 12*y**9)
assert f.factor_list() == (-12, [(y, 1), (x**2 - y, 6),
(x**4 + 6*x**2*y + y**2, 1)])
R, x, y, z = ring('x y z', ZZ)
assert (x**2*y**2*z**2 - 9).factor_list() == (1, [(x*y*z - 3, 1),
(x*y*z + 3, 1)])
assert f_1.factor_list() == (1, [(x*y + z + 10, 1), (x + y*z + 20, 1),
(x*z + y + 30, 1)])
assert f_2.factor_list() == (1, [(x**3*y + x**3*z + z - 11, 1),
(x**2*y**2 + x**2*z**2 + y + 90, 1)])
assert f_3.factor_list() == (1, [(x**2*y**2 + x*z**4 + x + z, 1),
(x**3 + x*y*z + y**2 + y*z**3, 1)])
assert f_4.factor_list() == (-1, [(x*y**3 + z**2, 1), (x**3*y**4 + z**2, 1),
(x**3*y - z**2 - 3, 1),
(x**2*z + y**4*z**2 + 5, 1)])
assert f_5.factor_list() == (-1, [(x + y - z, 3)])
assert w_1.factor_list() == (1, [(x**2*y*z**2 + 3*x*z + 2*y, 1),
(4*x**2*y + 4*x**2*z + x*y*z - 1, 1),
(x**2*y**2 - x**2*z**2 + y - z**2, 1)])
R, x, y, z, t = ring('x y z t', ZZ)
assert (x**2*y**2*z**2*t**2 - 9).factor_list() == (1, [(x*y*z*t - 3, 1),
(x*y*z*t + 3, 1)])
assert f_6.factor_list() == (1, [(47*x*y + z**3*t**2 - t**2, 1),
(45*x**3 - 9*y**3 - y**2 + 3*z**3 +
2*z*t, 1)])
@pytest.mark.parametrize('method', ('modular', 'trager'))
def test_dmp_ext_factor(method):
with using(aa_factor_method=method):
R, x = ring('x', QQ.algebraic_field(I))
assert R(0).factor_list() == (0, [])
assert (x + 1).factor_list() == (1, [(x + 1, 1)])
assert (2*x + 2).factor_list() == (2, [(x + 1, 1)])
assert (7*x**4 + 1).factor_list() == (7, [(x**4 + QQ(1, 7), 1)])
assert (x**4 + 1).factor_list() == (1, [(x**2 - I, 1), (x**2 + I, 1)])
assert (4*x**2 + 9).factor_list() == (4, [(x - 3*I/2, 1), (x + 3*I/2, 1)])
assert (4*x**4 + 8*x**3 + 77*x**2 + 18*x +
153).factor_list() == (4, [(x - 3*I/2, 1), (x + 1 + 4*I, 1),
(x + 1 - 4*I, 1), (x + 3*I/2, 1)])
assert (x**2 + 1).factor_list() == (1, [(x - I, 1), (x + I, 1)])
R, x = ring('x', QQ.algebraic_field(sqrt(2)))
assert (x**4 + 1).factor_list() == (1, [(x**2 - sqrt(2)*x + 1, 1),
(x**2 + sqrt(2)*x + 1, 1)])
f = x**2 + 2*sqrt(2)*x + 2
assert f.factor_list() == (1, [(x + sqrt(2), 2)])
assert (f**3).factor_list() == (1, [(x + sqrt(2), 6)])
f *= 2
assert f.factor_list() == (2, [(x + sqrt(2), 2)])
assert (f**3).factor_list() == (8, [(x + sqrt(2), 6)])
R, x, y = ring('x y', QQ.algebraic_field(sqrt(2)))
assert R(0).factor_list() == (0, [])
assert (x + 1).factor_list() == (1, [(x + 1, 1)])
assert (2*x + 2).factor_list() == (2, [(x + 1, 1)])
assert (x**2 - 2*y**2).factor_list() == (1, [(x - sqrt(2)*y, 1),
(x + sqrt(2)*y, 1)])
assert (2*x**2 - 4*y**2).factor_list() == (2, [(x - sqrt(2)*y, 1),
(x + sqrt(2)*y, 1)])
def test_sympyissue_5786():
_, x, y, z, t = ring('x y z t', QQ.algebraic_field(I))
f, g = z - I*t, x - I*y
assert (f*g).factor_list() == (1, [(f, 1), (g, 1)])
assert (f**2*g).factor_list() == (1, [(g, 1), (f, 2)])
assert (f*g**3).factor_list() == (1, [(f, 1), (g, 3)])
def test_factor_list():
R, x = ring('x', FF(2))
assert (x**2 + 1).factor_list() == (1, [(x + 1, 2)])
R, x = ring('x', ZZ)
assert R(0).factor_list() == (0, [])
assert R(7).factor_list() == (7, [])
assert (x**2 + 2*x + 1).factor_list() == (1, [(x + 1, 2)])
# issue sympy/sympy#8037
assert (6*x**2 - 5*x - 6).factor_list() == (1, [(2*x - 3, 1), (3*x + 2, 1)])
R, x = ring('x', QQ)
assert R(0).factor_list() == (0, [])
assert R(QQ(1, 7)).factor_list() == (QQ(1, 7), [])
assert (x**2/2 + x + QQ(1, 2)).factor_list() == (QQ(1, 2), [(x + 1, 2)])
R, x = ring('x', QQ.algebraic_field(I))
f = x**4 + 2*x**2
assert f.factor_list() == (1, [(x, 2), (x**2 + 2, 1)])
R, x = ring('x', RR)
assert (1.0*x**2 + 2.0*x + 1.0).factor_list() == (1.0, [(1.0*x + 1.0, 2)])
assert (2.0*x**2 + 4.0*x + 2.0).factor_list() == (2.0, [(1.0*x + 1.0, 2)])
f = 6.7225336055071*x**2 - 10.6463972754741*x - 0.33469524022264
assert f.factor_list() == (1.0, [(f, 1)])
# issue diofant/diofant#238
f = 0.1*x**2 + 1.1*x + 1.0
assert f.factor_list() == (10.0, [(0.1*x + 0.1, 1), (0.1*x + 1.0, 1)])
f = 0.25 + 1.0*x + 1.0*x**2
assert f.factor_list() == (4.0, [(0.25 + 0.5*x, 2)])
Rt, t = ring('t', ZZ)
R, x = ring('x', Rt)
assert R(0).factor_list() == (0, [])
assert R(7).factor_list() == (7, [])
assert (4*t*x**2 + 4*t**2*x).factor_list() == (4*t, [(x, 1), (x + t, 1)])
Rt, t = ring('t', QQ)
R, x = ring('x', Rt)
assert R(0).factor_list() == (0, [])
assert R(QQ(1, 7)).factor_list() == (QQ(1, 7), [])
assert (t*x**2/2 + t**2*x/2).factor_list() == (t/2, [(x, 1), (x + t, 1)])
R, x = ring('x', EX)
pytest.raises(DomainError, R(EX(sin(1))).factor_list)
R, x, y = ring('x y', FF(2))
pytest.raises(NotImplementedError, (x**2 + y**2).factor_list)
R, x, y = ring('x y', ZZ)
assert R(0).factor_list() == (0, [])
assert R(7).factor_list() == (7, [])
assert (x**2 + 2*x + 1).factor_list() == (1, [(x + 1, 2)])
assert (4*x**2*y + 4*x*y**2).factor_list() == (4, [(y, 1), (x, 1),
(x + y, 1)])
R, x, y = ring('x y', QQ)
assert R(0).factor_list() == (0, [])
assert R(QQ(1, 7)).factor_list() == (QQ(1, 7), [])
assert (x**2/2 + x + QQ(1, 2)).factor_list() == (QQ(1, 2), [(x + 1, 2)])
assert (x**2*y/2 + x*y**2/2).factor_list() == (QQ(1, 2), [(y, 1), (x, 1),
(x + y, 1)])
R, x, y = ring('x y', QQ.algebraic_field(I))
f, r = x**2 + y**2, (1, [(x - I*y, 1), (x + I*y, 1)])
for method in ('trager', 'modular'):
with using(aa_factor_method=method):
assert f.factor_list() == r
R, x, y = ring('x y', RR)
f = 2.0*x**2 - 8.0*y**2
assert f.factor_list() == (2.0, [(1.0*x - 2.0*y, 1), (1.0*x + 2.0*y, 1)])
f = 6.7225336055071*x**2*y**2 - 10.6463972754741*x*y - 0.33469524022264
assert f.factor_list() == (1.0, [(f, 1)])
Rt, t = ring('t', ZZ)
R, x, y = ring('x y', Rt)
assert R(0).factor_list() == (0, [])
assert R(7).factor_list() == (7, [])
assert (4*t*x**2 + 4*t**2*x).factor_list() == (4*t, [(x, 1), (x + t, 1)])
Rt, t = ring('t', QQ)
R, x, y = ring('x y', Rt)
assert R(0).factor_list() == (0, [])
assert R(QQ(1, 7)).factor_list() == (QQ(1, 7), [])
assert (t*x**2/2 + t**2*x/2).factor_list() == (t/2, [(x, 1), (x + t, 1)])
R, x, y = ring('x y', EX)
pytest.raises(DomainError, lambda: R(EX(sin(1))).factor_list())
# issue diofant/diofant#238
R, x, y, z = ring('x y z', RR)
f = x*y + x*z + 0.1*y + 0.1*z
assert f.factor_list() == (10.0, [(x + 0.1, 1), (0.1*y + 0.1*z, 1)])
f = 0.25*x**2 + 1.0*x*y*z + 1.0*y**2*z**2
assert f.factor_list() == (4.0, [(0.25*x + 0.5*y*z, 2)])
R, *X = ring('x:200', ZZ)
f, g = X[0]**2 + 2*X[0] + 1, X[0] + 1
assert f.factor_list() == (1, [(g, 2)])
f, g = X[-1]**2 + 2*X[-1] + 1, X[-1] + 1
assert f.factor_list() == (1, [(g, 2)])
def test_gf_factor():
R, x = ring('x', FF(2))
f = x**4 + x
g = (1, [(x, 1),
(x + 1, 1),
(x**2 + x + 1, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**18 + x**17 + x**16 + x**14 + x**12 + x**11 + x**8 + x**5 + x**3 + 1
g = (1, [(x + 1, 4), (x**4 + x**3 + 1, 1),
(x**10 + x**8 + x**7 + x**5 + 1, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**63 + 1
g = (1, [(x + 1, 1), (x**2 + x + 1, 1), (x**3 + x + 1, 1),
(x**6 + x + 1, 1), (x**3 + x**2 + 1, 1),
(x**6 + x**3 + 1, 1), (x**6 + x**5 + 1, 1),
(x**6 + x**4 + x**2 + x + 1, 1), (x**6 + x**5 + x**2 + x + 1, 1),
(x**6 + x**4 + x**3 + x + 1, 1), (x**6 + x**5 + x**4 + x + 1, 1),
(x**6 + x**5 + x**3 + x**2 + 1, 1),
(x**6 + x**5 + x**4 + x**2 + 1, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = (x**28 + x**27 + x**26 + x**25 + x**24 + x**20 + x**19 + x**17 +
x**16 + x**15 + x**14 + x**13 + x**12 + x**11 + x**9 + x**8 +
x**5 + x**4 + x**2 + x)
g = (1, [(x, 1), (x + 1, 2), (x**5 + x**4 + x**3 + x + 1, 1),
(x**10 + x**9 + x**8 + x**7 + 1, 1),
(x**10 + x**9 + x**8 + x**5 + x**4 + x**2 + 1, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
R, x = ring('x', FF(3))
f = x**6 - x**5 + x**4 + x**3 - x
g = (1, [(x, 1), (x + 1, 1), (x**2 + 1, 1), (x**2 + x + 2, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**4 + x**3 + x + 2
g = (1, [(x**2 + 1, 1), (x**2 + x + 2, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
R, x = ring('x', FF(11))
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert R(0).factor_list() == (0, [])
assert R(1).factor_list() == (1, [])
assert x.factor_list() == (1, [(x, 1)])
assert (x + 1).factor_list() == (1, [(x + 1, 1)])
assert (2*x + 3).factor_list() == (2, [(x + 7, 1)])
assert (5*x**3 + 2*x**2 + 7*x +
2).factor_list() == (5, [(x + 2, 1), (x + 8, 2)])
f = x**6 + 8*x**5 + x**4 + 8*x**3 + 10*x**2 + 8*x + 1
g = (1, [(x + 1, 1),
(x**2 + 5*x + 3, 1),
(x**3 + 2*x**2 + 3*x + 4, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**3 + 5*x**2 + 8*x + 4
g = (1, [(x + 1, 1), (x + 2, 2)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**9 + x**8 + 10*x**7 + x**6 + 10*x**4 + 10*x**3 + 10*x**2
g = (1, [(x, 2), (x**2 + 9*x + 5, 1),
(x**5 + 3*x**4 + 8*x**2 + 5*x + 2, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**32 + 1
g = (1, [(x**16 + 3*x**8 + 10, 1),
(x**16 + 8*x**8 + 10, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = 8*x**32 + 5
g = (8, [(x + 3, 1), (x + 8, 1), (x**2 + 9, 1), (x**2 + 2*x + 2, 1),
(x**2 + 9*x + 2, 1), (x**8 + x**4 + 6, 1), (x**8 + 10*x**4 + 6, 1),
(x**4 + 5*x**2 + 7, 1), (x**4 + 6*x**2 + 7, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = 8*x**63 + 5
g = (8, [(x + 7, 1), (x**6 + 9*x**3 + 4, 1), (x**2 + 4*x + 5, 1),
(x**3 + 6*x**2 + 8*x + 2, 1), (x**3 + 9*x**2 + 9*x + 2, 1),
(x**6 + 2*x**5 + 6*x**4 + 8*x**2 + 4*x + 4, 1),
(x**6 + 2*x**5 + 8*x**3 + 4*x**2 + 6*x + 4, 1),
(x**6 + 5*x**5 + 6*x**4 + 8*x**2 + 6*x + 4, 1),
(x**6 + 2*x**5 + 3*x**4 + 8*x**3 + 6*x + 4, 1),
(x**6 + 10*x**5 + 4*x**4 + 7*x**3 + 10*x**2 + 7*x + 4, 1),
(x**6 + 3*x**5 + 3*x**4 + x**3 + 6*x**2 + 8*x + 4, 1),
(x**6 + 6*x**5 + 2*x**4 + 7*x**3 + 9*x**2 + 8*x + 4, 1),
(x**6 + 10*x**5 + 10*x**4 + x**3 + 4*x**2 + 9*x + 4, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**15 - 1
g = (1, [(x + 2, 1), (x + 6, 1), (x + 7, 1), (x + 8, 1), (x + 10, 1),
(x**2 + x + 1, 1), (x**2 + 5*x + 3, 1), (x**2 + 9*x + 4, 1),
(x**2 + 4*x + 5, 1), (x**2 + 3*x + 9, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
with using(gf_factor_method='other'):
pytest.raises(KeyError, (x + 1).factor_list)
R, x = ring('x', FF(13))
f = x**8 + x**6 + 10*x**4 + 10*x**3 + 8*x**2 + 2*x + 8
g = (1, [(x + 3, 1), (x**3 + 8*x**2 + 4*x + 12, 1),
(x**4 + 2*x**3 + 3*x**2 + 4*x + 6, 1)])
with using(gf_factor_method='berlekamp'):
assert f.factor_list() == g
R, x = ring('x', FF(809))
f = (x**10 + 2*x**9 + 5*x**8 + 26*x**7 + 677*x**6 + 436*x**5 +
791*x**4 + 325*x**3 + 456*x**2 + 24*x + 577)
g = (1, [(x + 701, 1), (x**9 + 110*x**8 + 559*x**7 + 532*x**6 +
694*x**5 + 151*x**4 + 110*x**3 + 70*x**2 +
735*x + 122, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
# Gathen polynomials: x**n + x + 1 (mod p > 2**n * pi)
R, x = ring('x', FF(nextprime(2**15*pi)))
f = x**15 + x + 1
g = (1, [(x**2 + 22730*x + 68144, 1),
(x**4 + 81553*x**3 + 77449*x**2 + 86810*x + 4724, 1),
(x**4 + 86276*x**3 + 56779*x**2 + 14859*x + 31575, 1),
(x**5 + 15347*x**4 + 95022*x**3 + 84569*x**2 + 94508*x + 92335, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
# Shoup polynomials: f = a_0 x**n + a_1 x**(n-1) + ... + a_n
# (mod p > 2**(n-2) * pi), where a_n = a_{n-1}**2 + 1, a_0 = 1
R, x = ring('x', FF(nextprime(2**4*pi)))
f = x**6 + 2*x**5 + 5*x**4 + 26*x**3 + 41*x**2 + 39*x + 38
g = (1, [(x**2 + 44*x + 26, 1),
(x**4 + 11*x**3 + 25*x**2 + 18*x + 30, 1)])
for method in ('zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
F8 = FF(2, [1, 1, 0, 1])
R, x = ring('x', F8)
f = x**10 + x**9 + F8(2)*x**8 + F8(2)*x**7 + F8(5)*x**6 + F8(3)*x**5
g = (F8(1), [(x, 5), (x + F8(3), 1), (x + F8(6), 1),
(x**3 + F8(4)*x**2 + x + F8(3), 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
F9 = FF(3, [2, 2, 1])
R, x = ring('x', F9)
f = x**5 + F9(2)*x**4 + F9(6)*x**3 + F9(8)*x**2 + F9(5)*x + F9(4)
g = (1, [(x + F9(8), 1), (x**2 + 2*x + F9(4), 1),
(x**2 + F9(4)*x + F9(4), 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
def test_PolyElement_is_irreducible():
R, x = ring('x', FF(5))
f = (x**10 + 4*x**9 + 2*x**8 + 2*x**7 + 3*x**6 +
2*x**5 + 4*x**4 + x**3 + 4*x**2 + 4)
g = 3*x**2 + 2*x + 4
for method in ('ben-or', 'rabin'):
with using(gf_irred_method=method):
assert f.is_irreducible is True
assert g.is_irreducible is False
R, x = ring('x', FF(11))
f = R(7)
g = 7*x + 3
h = 7*x**2 + 3*x + 1
for method in ('ben-or', 'rabin'):
with using(gf_irred_method=method):
assert f.is_irreducible is True
assert g.is_irreducible is True
assert h.is_irreducible is False
with using(gf_irred_method='other'):
pytest.raises(KeyError, lambda: f.is_irreducible)
R, x = ring('x', FF(13))
f = 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6
g = 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 8
with using(gf_irred_method='ben-or'):
assert f.is_irreducible is False
assert g.is_irreducible is True
R, x = ring('x', FF(17))
f = (x**10 + 9*x**9 + 9*x**8 + 13*x**7 + 16*x**6 + 15*x**5 +
6*x**4 + 7*x**3 + 7*x**2 + 7*x + 10)
g = (x**10 + 7*x**9 + 16*x**8 + 7*x**7 + 15*x**6 + 13*x**5 + 13*x**4 +
11*x**3 + 16*x**2 + 10*x + 9)
h = f*g
for method in ('ben-or', 'rabin'):
with using(gf_irred_method=method):
assert f.is_irreducible is True
assert g.is_irreducible is True
assert h.is_irreducible is False
F9 = FF(3, [2, 2, 1])
R, x = ring('x', F9)
f = x**3 + F9(8)*x**2 + F9(8)*x + F9(4)
for method in ('ben-or', 'rabin'):
with using(gf_irred_method=method):
assert f.is_irreducible is False
F27 = FF(3, [1, 0, 2, 1])
R, x = ring('x', F27)
f = x**3 + F27(8)*x**2 + F27(19)*x + F27(24)
for method in ('ben-or', 'rabin'):
with using(gf_irred_method=method):
assert f.is_irreducible is True
R, x = ring('x', ZZ)
assert x.is_irreducible is True
assert (x**2 + x + 1).is_irreducible is True
assert (x**2 + 2*x + 1).is_irreducible is False
assert (x**2 - 1).is_irreducible is False
f = 3*x**4 + 2*x**3 + 6*x**2 + 8*x
assert (f + 7).is_irreducible is True
assert (f + 4).is_irreducible is True
assert (f + 10).is_irreducible is True
assert (f + 14).is_irreducible is True
R, x, y = ring('x y', ZZ)
assert R(2).is_irreducible is True
assert (x**2 + x + 1).is_irreducible is True
assert (x**2 + 2*x + 1).is_irreducible is False
assert ((x - 2*y)*(x + y)).is_irreducible is False
assert (x**2 + y**2).is_irreducible is True
R, x, y, _ = ring('x y z', QQ)
assert (x**2 + x + 1).is_irreducible
assert (x**2 + 2*x + 1).is_irreducible is False
@pytest.mark.timeout(50)
def test_sympyissue_16620():
_, x = ring('x', FF(2))
f = x**17 + 1
g = (1, [(x + 1, 1),
(x**8 + x**5 + x**4 + x**3 + 1, 1),
(x**8 + x**7 + x**6 + x**4 + x**2 + x + 1, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
f = x**31 + 1
g = (1, [(x + 1, 1), (x**5 + x**2 + 1, 1), (x**5 + x**3 + 1, 1),
(x**5 + x**3 + x**2 + x + 1, 1), (x**5 + x**4 + x**2 + x + 1, 1),
(x**5 + x**4 + x**3 + x + 1, 1), (x**5 + x**4 + x**3 + x**2 + 1, 1)])
for method in ('berlekamp', 'zassenhaus', 'shoup'):
with using(gf_factor_method=method):
assert f.factor_list() == g
def test__gf_trace_map():
R, x = ring('x', FF(5))
a = x + 2
b = 4*x + 4
c = x + 1
f = 3*x**2 + 2*x + 4
assert R._gf_trace_map(a, b, c, 4, f) == (x + 3, x + 3)
R, x = ring('x', FF(11))
f = x**4 + x**3 + 4*x**2 + 9*x + 1
a = x**2 + x + 1
c = x
b = pow(c, 11, f)
assert R._gf_trace_map(a, b, c, 0, f) == (x**2 + x + 1, x**2 + x + 1)
assert R._gf_trace_map(a, b, c, 1, f) == (5*x**3 + 2*x**2 + 10*x + 3,
5*x**3 + 3*x**2 + 4)
assert R._gf_trace_map(a, b, c, 2, f) == (5*x**3 + 9*x**2 + 5*x + 3,
10*x**3 + x**2 + 5*x + 7)
assert R._gf_trace_map(a, b, c, 3, f) == (x**3 + 10*x**2 + 6*x, 7)
assert R._gf_trace_map(a, b, c, 4, f) == (x**2 + x + 1, x**2 + x + 8)
assert R._gf_trace_map(a, b, c, 5, f) == (5*x**3 + 2*x**2 + 10*x + 3,
5*x**3 + 3*x**2)
assert R._gf_trace_map(a, b, c, 11, f) == (x**3 + 10*x**2 + 6*x, 10)
| 33.804777 | 82 | 0.427597 |
acef3468a9ff988a30206b150efd6b4174a57088 | 15,989 | py | Python | tests/test_static.py | lnds/sanic | a57c127738898c47118d965ff4003aa98f17e26f | [
"MIT"
] | null | null | null | tests/test_static.py | lnds/sanic | a57c127738898c47118d965ff4003aa98f17e26f | [
"MIT"
] | null | null | null | tests/test_static.py | lnds/sanic | a57c127738898c47118d965ff4003aa98f17e26f | [
"MIT"
] | 1 | 2021-07-05T09:35:48.000Z | 2021-07-05T09:35:48.000Z | import inspect
import logging
import os
from collections import Counter
from pathlib import Path
from time import gmtime, strftime
import pytest
from sanic import text
from sanic.exceptions import FileNotFound
@pytest.fixture(scope="module")
def static_file_directory():
"""The static directory to serve"""
current_file = inspect.getfile(inspect.currentframe())
current_directory = os.path.dirname(os.path.abspath(current_file))
static_directory = os.path.join(current_directory, "static")
return static_directory
def get_file_path(static_file_directory, file_name):
return os.path.join(static_file_directory, file_name)
def get_file_content(static_file_directory, file_name):
"""The content of the static file to check"""
with open(get_file_path(static_file_directory, file_name), "rb") as file:
return file.read()
@pytest.fixture(scope="module")
def large_file(static_file_directory):
large_file_path = os.path.join(static_file_directory, "large.file")
size = 2 * 1024 * 1024
with open(large_file_path, "w") as f:
f.write("a" * size)
yield large_file_path
os.remove(large_file_path)
@pytest.fixture(autouse=True, scope="module")
def symlink(static_file_directory):
src = os.path.abspath(
os.path.join(os.path.dirname(static_file_directory), "conftest.py")
)
symlink = "symlink"
dist = os.path.join(static_file_directory, symlink)
os.symlink(src, dist)
yield symlink
os.remove(dist)
@pytest.fixture(autouse=True, scope="module")
def hard_link(static_file_directory):
src = os.path.abspath(
os.path.join(os.path.dirname(static_file_directory), "conftest.py")
)
hard_link = "hard_link"
dist = os.path.join(static_file_directory, hard_link)
os.link(src, dist)
yield hard_link
os.remove(dist)
@pytest.mark.parametrize(
"file_name",
["test.file", "decode me.txt", "python.png", "symlink", "hard_link"],
)
def test_static_file(app, static_file_directory, file_name):
app.static(
"/testing.file", get_file_path(static_file_directory, file_name)
)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
@pytest.mark.parametrize(
"file_name",
["test.file", "decode me.txt", "python.png", "symlink", "hard_link"],
)
def test_static_file_pathlib(app, static_file_directory, file_name):
file_path = Path(get_file_path(static_file_directory, file_name))
app.static("/testing.file", file_path)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
@pytest.mark.parametrize(
"file_name",
[b"test.file", b"decode me.txt", b"python.png"],
)
def test_static_file_bytes(app, static_file_directory, file_name):
bsep = os.path.sep.encode("utf-8")
file_path = static_file_directory.encode("utf-8") + bsep + file_name
app.static("/testing.file", file_path)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
@pytest.mark.parametrize(
"file_name",
[{}, [], object()],
)
def test_static_file_invalid_path(app, static_file_directory, file_name):
app.route("/")(lambda x: x)
with pytest.raises(ValueError):
app.static("/testing.file", file_name)
request, response = app.test_client.get("/testing.file")
assert response.status == 404
@pytest.mark.parametrize("file_name", ["test.html"])
def test_static_file_content_type(app, static_file_directory, file_name):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
content_type="text/html; charset=utf-8",
)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
assert response.headers["Content-Type"] == "text/html; charset=utf-8"
@pytest.mark.parametrize(
"file_name,expected",
[
("test.html", "text/html; charset=utf-8"),
("decode me.txt", "text/plain; charset=utf-8"),
("test.file", "application/octet-stream"),
],
)
def test_static_file_content_type_guessed(
app, static_file_directory, file_name, expected
):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
assert response.headers["Content-Type"] == expected
def test_static_file_content_type_with_charset(app, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, "decode me.txt"),
content_type="text/plain;charset=ISO-8859-1",
)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
assert response.headers["Content-Type"] == "text/plain;charset=ISO-8859-1"
@pytest.mark.parametrize(
"file_name", ["test.file", "decode me.txt", "symlink", "hard_link"]
)
@pytest.mark.parametrize("base_uri", ["/static", "", "/dir"])
def test_static_directory(app, file_name, base_uri, static_file_directory):
app.static(base_uri, static_file_directory)
request, response = app.test_client.get(uri=f"{base_uri}/{file_name}")
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_head_request(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
request, response = app.test_client.head("/testing.file")
assert response.status == 200
assert "Accept-Ranges" in response.headers
assert "Content-Length" in response.headers
assert int(response.headers["Content-Length"]) == len(
get_file_content(static_file_directory, file_name)
)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_correct(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
headers = {"Range": "bytes=12-19"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
12:20
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_front(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
headers = {"Range": "bytes=12-"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
12:
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_back(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
headers = {"Range": "bytes=-12"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 206
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
static_content = bytes(get_file_content(static_file_directory, file_name))[
-12:
]
assert int(response.headers["Content-Length"]) == len(static_content)
assert response.body == static_content
@pytest.mark.parametrize("use_modified_since", [True, False])
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_empty(
app, file_name, static_file_directory, use_modified_since
):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
use_modified_since=use_modified_since,
)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
assert "Content-Length" in response.headers
assert "Content-Range" not in response.headers
assert int(response.headers["Content-Length"]) == len(
get_file_content(static_file_directory, file_name)
)
assert response.body == bytes(
get_file_content(static_file_directory, file_name)
)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_error(app, file_name, static_file_directory):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
headers = {"Range": "bytes=1-0"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 416
assert "Content-Length" in response.headers
assert "Content-Range" in response.headers
assert response.headers["Content-Range"] == "bytes */%s" % (
len(get_file_content(static_file_directory, file_name)),
)
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_invalid_unit(
app, file_name, static_file_directory
):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
unit = "bit"
headers = {"Range": f"{unit}=1-0"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 416
assert f"{unit} is not a valid Range Type" in response.text
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_invalid_start(
app, file_name, static_file_directory
):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
start = "start"
headers = {"Range": f"bytes={start}-0"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 416
assert f"'{start}' is invalid for Content Range" in response.text
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_invalid_end(
app, file_name, static_file_directory
):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
end = "end"
headers = {"Range": f"bytes=1-{end}"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 416
assert f"'{end}' is invalid for Content Range" in response.text
@pytest.mark.parametrize("file_name", ["test.file", "decode me.txt"])
def test_static_content_range_invalid_parameters(
app, file_name, static_file_directory
):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_content_range=True,
)
headers = {"Range": "bytes=-"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 416
assert "Invalid for Content Range parameters" in response.text
@pytest.mark.parametrize(
"file_name", ["test.file", "decode me.txt", "python.png"]
)
def test_static_file_specified_host(app, static_file_directory, file_name):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
host="www.example.com",
)
headers = {"Host": "www.example.com"}
request, response = app.test_client.get("/testing.file", headers=headers)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
request, response = app.test_client.get("/testing.file")
assert response.status == 404
@pytest.mark.parametrize("use_modified_since", [True, False])
@pytest.mark.parametrize("stream_large_files", [True, 1024])
@pytest.mark.parametrize("file_name", ["test.file", "large.file"])
def test_static_stream_large_file(
app,
static_file_directory,
file_name,
use_modified_since,
stream_large_files,
large_file,
):
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_modified_since=use_modified_since,
stream_large_files=stream_large_files,
)
request, response = app.test_client.get("/testing.file")
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
@pytest.mark.parametrize(
"file_name", ["test.file", "decode me.txt", "python.png"]
)
def test_use_modified_since(app, static_file_directory, file_name):
file_stat = os.stat(get_file_path(static_file_directory, file_name))
modified_since = strftime(
"%a, %d %b %Y %H:%M:%S GMT", gmtime(file_stat.st_mtime)
)
app.static(
"/testing.file",
get_file_path(static_file_directory, file_name),
use_modified_since=True,
)
request, response = app.test_client.get(
"/testing.file", headers={"If-Modified-Since": modified_since}
)
assert response.status == 304
def test_file_not_found(app, static_file_directory):
app.static("/static", static_file_directory)
request, response = app.test_client.get("/static/not_found")
assert response.status == 404
assert "File not found" in response.text
@pytest.mark.parametrize("static_name", ["_static_name", "static"])
@pytest.mark.parametrize("file_name", ["test.html"])
def test_static_name(app, static_file_directory, static_name, file_name):
app.static("/static", static_file_directory, name=static_name)
request, response = app.test_client.get(f"/static/{file_name}")
assert response.status == 200
def test_nested_dir(app, static_file_directory):
app.static("/static", static_file_directory)
request, response = app.test_client.get("/static/nested/dir/foo.txt")
assert response.status == 200
assert response.text == "foo\n"
def test_stack_trace_on_not_found(app, static_file_directory, caplog):
app.static("/static", static_file_directory)
with caplog.at_level(logging.INFO):
_, response = app.test_client.get("/static/non_existing_file.file")
counter = Counter([r[1] for r in caplog.record_tuples])
assert response.status == 404
assert counter[logging.INFO] == 5
assert counter[logging.ERROR] == 1
def test_no_stack_trace_on_not_found(app, static_file_directory, caplog):
app.static("/static", static_file_directory)
@app.exception(FileNotFound)
async def file_not_found(request, exception):
return text(f"No file: {request.path}", status=404)
with caplog.at_level(logging.INFO):
_, response = app.test_client.get("/static/non_existing_file.file")
counter = Counter([r[1] for r in caplog.record_tuples])
assert response.status == 404
assert counter[logging.INFO] == 5
assert logging.ERROR not in counter
assert response.text == "No file: /static/non_existing_file.file"
| 32.432049 | 79 | 0.703796 |
acef351af42f36cdc620953d44dd35528c35f0c6 | 4,222 | py | Python | migration/snapshot.py | sagarafr/exodus | 58716a7c6905897861a2e4d52292bd09368bf592 | [
"Apache-2.0"
] | null | null | null | migration/snapshot.py | sagarafr/exodus | 58716a7c6905897861a2e4d52292bd09368bf592 | [
"Apache-2.0"
] | null | null | null | migration/snapshot.py | sagarafr/exodus | 58716a7c6905897861a2e4d52292bd09368bf592 | [
"Apache-2.0"
] | null | null | null | from novaclient import exceptions
from connections.nova_connection import NovaConnection
from connections.cinder_connection import CinderConnection
from connections.glance_connection import GlanceConnection
from utils.get_ids import get_server_id_from_nova
from utils.get_ids import get_snapshot_id_from_glance
from time import sleep
def make_hard_disk_snapshot(cinder_connection: CinderConnection, glance_connection: GlanceConnection,
nova_connection: NovaConnection, volume_id: str, snapshot_name: str):
"""
Make a snapshot named snapshot_name of volume_id in cinder_connection and glance_connection region
If there are multiple snapshot id take the first one
:param cinder_connection: CinderConnection object to upload the volume into an image
:param glance_connection: GlanceConnection object to id of volume image created previously
:param nova_connection: NovaConnection object to check if the volume is active
:param volume_id: str id of volume
:param snapshot_name: str name of the snapshot created
"""
cinder_connection.connection.volumes.upload_to_image(volume_id, True, snapshot_name, "bare", "qcow2")
uuid_snap = None
is_active = False
while uuid_snap is None:
try:
uuid_snap = get_snapshot_id_from_glance(glance_connection, snapshot_name)[0]
except IndexError:
sleep(2)
while not is_active:
try:
is_active = check_availability(nova_connection, uuid_snap)
except:
raise
if not is_active:
sleep(2)
def make_snapshot(nova_connection: NovaConnection, server_name: str, snapshot_name: str):
"""
Make a snapshot named snapshot_name of server_name in nova_connection region
If there are a multiple server_name take the first one
:param nova_connection: NovaConnection object
:param server_name: str represent the name of the server in nova_connection region
:param snapshot_name: str represent the name of the snapshot
:raise: ValueError if server_name not found
:raise: nova_connection.Conflit: This append when we try to make a multiple snapshot of the server_uuid
"""
try:
server_uuid = get_server_id_from_nova(nova_connection, server_name)[0]
except IndexError:
raise ValueError(server_name + " server not found")
try:
make_snapshot_from_uuid(nova_connection, server_uuid, snapshot_name)
except:
raise
def make_snapshot_from_uuid(nova_connection: NovaConnection, server_uuid: str, snapshot_name: str):
"""
Make a snapshot_name of server_uuid in nova_connection region
:param nova_connection: NovaConnection object
:param server_uuid: str of uuid of server in nova_connection region
:param snapshot_name: str represent the name of the snapshot
:raise: nova_connection.Conflit: This append when we try to make a multiple snapshot of the server_uuid
:raise: ValueError: Appends if the status of the image is not queue or not saving and not active
"""
try:
snapshot_image_uuid = nova_connection.connection.servers.create_image(server_uuid, snapshot_name)
except exceptions.Conflict:
raise
is_active = False
while not is_active:
try:
is_active = check_availability(nova_connection, snapshot_image_uuid)
except:
raise
if not is_active:
sleep(2)
def check_availability(nova_connection: NovaConnection, snapshot_image_uuid: str):
"""
Check the availability of snapshot_image_uuid
:param nova_connection: NovaConnection object
:param snapshot_image_uuid: str snapshot uuid to find
:return: bool True if is found else False
:raise: ValueError if the status is unknown
"""
# TODO make an enum of step
step = {"queued", "saving", "active"}
image_info = dict(nova_connection.connection.glance.find_image(snapshot_image_uuid).to_dict())
if 'status' in image_info:
if not image_info['status'] in step:
raise ValueError("Unknown status : " + image_info['status'])
if image_info['status'] == "active":
return True
return False
| 40.990291 | 108 | 0.72738 |
acef354bf342425cb60bdb73757bec3346012ab3 | 491 | py | Python | Data Structures/Arrays/Merge.py | d3xt3r0/Data-Structures-And-Algorithms | 1d0ff2eec615e29fc5d5d396b584090d7029187c | [
"MIT"
] | null | null | null | Data Structures/Arrays/Merge.py | d3xt3r0/Data-Structures-And-Algorithms | 1d0ff2eec615e29fc5d5d396b584090d7029187c | [
"MIT"
] | null | null | null | Data Structures/Arrays/Merge.py | d3xt3r0/Data-Structures-And-Algorithms | 1d0ff2eec615e29fc5d5d396b584090d7029187c | [
"MIT"
] | null | null | null | # Merge two arrays withouy any extra space i.e. in O(1) space complexity
import math
# naive approach
def merge(arr : list, brr : list) -> list :
i = 0; j = 0
while i <= len(arr) and j <= len(brr) :
pass
else :
pass
# optimised approach using the GAP Algorithm
def optimisedmerge(arr, arr2) :
gap = math.ceil((len(arr) + len(arr2)) / 2)
while gap != 0 :
gap //= 2
| 16.366667 | 74 | 0.490835 |
acef3622ea27735cf4aeb2450cf911272b751d5c | 12,534 | py | Python | conans/test/unittests/tools/google/test_bazeldeps.py | Mu-L/conan | 7c24ec4bbd6e8c16cdcd879403aae742689bc36a | [
"MIT"
] | 1 | 2019-11-04T17:23:09.000Z | 2019-11-04T17:23:09.000Z | conans/test/unittests/tools/google/test_bazeldeps.py | Mu-L/conan | 7c24ec4bbd6e8c16cdcd879403aae742689bc36a | [
"MIT"
] | 1 | 2020-11-05T16:16:49.000Z | 2020-11-05T16:16:49.000Z | conans/test/unittests/tools/google/test_bazeldeps.py | Mattlk13/conan | 005fc53485557b0a570bb71670f2ca9c66082165 | [
"MIT"
] | null | null | null | import os
import re
import platform
import mock
from mock import Mock
from conan.tools.google import BazelDeps
from conans import ConanFile
from conans.model.build_info import CppInfo
from conans.model.conanfile_interface import ConanFileInterface
from conans.model.dependencies import Requirement, ConanFileDependencies
from conans.model.ref import ConanFileReference
from conans.test.utils.mocks import MockOptions
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
class MockConanFileDeps(ConanFile):
def __init__(self, deps, *args, **kwargs):
super(MockConanFileDeps, self).__init__(*args, **kwargs)
self._deps = deps
@property
def dependencies(self):
return self._deps
def test_bazeldeps_dependency_buildfiles():
conanfile = ConanFile(Mock(), None)
cpp_info = CppInfo("mypkg", "dummy_root_folder1")
cpp_info.defines = ["DUMMY_DEFINE=\"string/value\""]
cpp_info.system_libs = ["system_lib1"]
cpp_info.libs = ["lib1"]
conanfile_dep = ConanFile(Mock(), None)
conanfile_dep.cpp_info = cpp_info
conanfile_dep._conan_node = Mock()
conanfile_dep._conan_node.ref = ConanFileReference.loads("OriginalDepName/1.0")
package_folder = temp_folder()
save(os.path.join(package_folder, "lib", "liblib1.a"), "")
conanfile_dep.folders.set_base_package(package_folder)
# FIXME: This will run infinite loop if conanfile.dependencies.host.topological_sort.
# Move to integration test
with mock.patch('conans.ConanFile.dependencies', new_callable=mock.PropertyMock) as mock_deps:
req = Requirement(ConanFileReference.loads("OriginalDepName/1.0"))
mock_deps.return_value = ConanFileDependencies({req: ConanFileInterface(conanfile_dep)})
bazeldeps = BazelDeps(conanfile)
for dependency in bazeldeps._conanfile.dependencies.host.values():
dependency_content = bazeldeps._get_dependency_buildfile_content(dependency)
assert 'cc_library(\n name = "OriginalDepName",' in dependency_content
assert """defines = ["DUMMY_DEFINE=\\\\\\"string/value\\\\\\""]""" in dependency_content
if platform.system() == "Windows":
assert 'linkopts = ["/DEFAULTLIB:system_lib1"]' in dependency_content
else:
assert 'linkopts = ["-lsystem_lib1"],' in dependency_content
assert 'deps = [\n \n ":lib1_precompiled",' in dependency_content
def test_bazeldeps_get_lib_file_path_by_basename():
conanfile = ConanFile(Mock(), None)
cpp_info = CppInfo("mypkg", "dummy_root_folder1")
cpp_info.defines = ["DUMMY_DEFINE=\"string/value\""]
cpp_info.system_libs = ["system_lib1"]
cpp_info.libs = ["liblib1.a"]
conanfile_dep = ConanFile(Mock(), None)
conanfile_dep.cpp_info = cpp_info
conanfile_dep._conan_node = Mock()
conanfile_dep._conan_node.ref = ConanFileReference.loads("OriginalDepName/1.0")
package_folder = temp_folder()
save(os.path.join(package_folder, "lib", "liblib1.a"), "")
conanfile_dep.folders.set_base_package(package_folder)
# FIXME: This will run infinite loop if conanfile.dependencies.host.topological_sort.
# Move to integration test
with mock.patch('conans.ConanFile.dependencies', new_callable=mock.PropertyMock) as mock_deps:
req = Requirement(ConanFileReference.loads("OriginalDepName/1.0"))
mock_deps.return_value = ConanFileDependencies({req: ConanFileInterface(conanfile_dep)})
bazeldeps = BazelDeps(conanfile)
for dependency in bazeldeps._conanfile.dependencies.host.values():
dependency_content = bazeldeps._get_dependency_buildfile_content(dependency)
assert 'cc_library(\n name = "OriginalDepName",' in dependency_content
assert """defines = ["DUMMY_DEFINE=\\\\\\"string/value\\\\\\""]""" in dependency_content
if platform.system() == "Windows":
assert 'linkopts = ["/DEFAULTLIB:system_lib1"]' in dependency_content
else:
assert 'linkopts = ["-lsystem_lib1"],' in dependency_content
assert 'deps = [\n \n ":liblib1.a_precompiled",' in dependency_content
def test_bazeldeps_dependency_transitive():
# Create main ConanFile
conanfile = ConanFile(Mock(), None)
cpp_info = CppInfo("mypkg", "dummy_root_folder1")
cpp_info.defines = ["DUMMY_DEFINE=\"string/value\""]
cpp_info.system_libs = ["system_lib1"]
cpp_info.libs = ["lib1"]
# Create a ConanFile for a direct dependency
conanfile_dep = ConanFile(Mock(), None)
conanfile_dep.cpp_info = cpp_info
conanfile_dep._conan_node = Mock()
conanfile_dep._conan_node.ref = ConanFileReference.loads("OriginalDepName/1.0")
package_folder = temp_folder()
save(os.path.join(package_folder, "lib", "liblib1.a"), "")
conanfile_dep.folders.set_base_package(package_folder)
# Add dependency on the direct dependency
req = Requirement(ConanFileReference.loads("OriginalDepName/1.0"))
conanfile._conan_dependencies = ConanFileDependencies({req: ConanFileInterface(conanfile_dep)})
cpp_info_transitive = CppInfo("mypkg_t", "dummy_root_folder1")
cpp_info_transitive.defines = ["DUMMY_DEFINE=\"string/value\""]
cpp_info_transitive.system_libs = ["system_lib1"]
cpp_info_transitive.libs = ["lib_t1"]
# Create a ConanFile for a transitive dependency
conanfile_dep_transitive = ConanFile(Mock(), None)
conanfile_dep_transitive.cpp_info = cpp_info_transitive
conanfile_dep_transitive._conan_node = Mock()
conanfile_dep_transitive._conan_node.ref = ConanFileReference.loads("TransitiveDepName/1.0")
conanfile_dep_transitive.folders.set_base_package("/path/to/folder_dep_t")
# Add dependency from the direct dependency to the transitive dependency
req = Requirement(ConanFileReference.loads("TransitiveDepName/1.0"))
conanfile_dep._conan_dependencies = ConanFileDependencies(
{req: ConanFileInterface(conanfile_dep_transitive)})
bazeldeps = BazelDeps(conanfile)
for dependency in bazeldeps._conanfile.dependencies.host.values():
dependency_content = bazeldeps._get_dependency_buildfile_content(dependency)
assert 'cc_library(\n name = "OriginalDepName",' in dependency_content
assert 'defines = ["DUMMY_DEFINE=\\\\\\"string/value\\\\\\""],' in dependency_content
if platform.system() == "Windows":
assert 'linkopts = ["/DEFAULTLIB:system_lib1"],' in dependency_content
else:
assert 'linkopts = ["-lsystem_lib1"],' in dependency_content
# Ensure that transitive dependency is referenced by the 'deps' attribute of the direct
# dependency
assert re.search(r'deps =\s*\[\s*":lib1_precompiled",\s*"@TransitiveDepName"',
dependency_content)
def test_bazeldeps_interface_buildfiles():
conanfile = ConanFile(Mock(), None)
cpp_info = CppInfo("mypkg", "dummy_root_folder2")
conanfile_dep = ConanFile(Mock(), None)
conanfile_dep.cpp_info = cpp_info
conanfile_dep._conan_node = Mock()
conanfile_dep.folders.set_base_package(temp_folder())
conanfile_dep._conan_node.ref = ConanFileReference.loads("OriginalDepName/2.0")
# FIXME: This will run infinite loop if conanfile.dependencies.host.topological_sort.
# Move to integration test
with mock.patch('conans.ConanFile.dependencies', new_callable=mock.PropertyMock) as mock_deps:
req = Requirement(ConanFileReference.loads("OriginalDepName/1.0"))
mock_deps.return_value = ConanFileDependencies({req: ConanFileInterface(conanfile_dep)})
bazeldeps = BazelDeps(conanfile)
dependency = next(iter(bazeldeps._conanfile.dependencies.host.values()))
dependency_content = re.sub(r"\s", "", bazeldeps._get_dependency_buildfile_content(dependency))
assert(dependency_content == 'load("@rules_cc//cc:defs.bzl","cc_import","cc_library")cc_library(name="OriginalDepName",hdrs=glob(["include/**"]),includes=["include"],visibility=["//visibility:public"],)')
def test_bazeldeps_shared_library_interface_buildfiles():
cpp_info = CppInfo("mypkg", "dummy_root_folder2")
cpp_info.libs = ["lib1"]
options = MockOptions({"shared": True})
conanfile_dep = MockConanFileDeps(ConanFileDependencies({}), Mock(), None)
conanfile_dep.options = options
conanfile_dep.cpp_info = cpp_info
conanfile_dep._conan_node = Mock()
conanfile_dep.folders.set_base_package(temp_folder())
conanfile_dep._conan_node.ref = ConanFileReference.loads("OriginalDepName/1.0")
package_folder = temp_folder()
save(os.path.join(package_folder, "lib", "lib1.lib"), "")
save(os.path.join(package_folder, "bin", "lib1.dll"), "")
conanfile_dep.folders.set_base_package(package_folder)
req = Requirement(ConanFileReference.loads("OriginalDepName/1.0"))
mock_deps = ConanFileDependencies(
{req: ConanFileInterface(conanfile_dep)})
conanfile = MockConanFileDeps(mock_deps, Mock(), None)
bazeldeps = BazelDeps(conanfile)
dependency = next(iter(bazeldeps._conanfile.dependencies.host.values()))
dependency_content = re.sub(r"\s",
"",
bazeldeps._get_dependency_buildfile_content(dependency))
expected_content = """
load("@rules_cc//cc:defs.bzl","cc_import","cc_library")
cc_import(
name = "lib1_precompiled",
interface_library = "lib/lib1.lib",
shared_library = "bin/lib1.dll",
)
cc_library(
name = "OriginalDepName",
hdrs=glob(["include/**"]),
includes=["include"],
visibility=["//visibility:public"],
deps = [
":lib1_precompiled",
],
)
"""
assert(dependency_content == re.sub(r"\s", "", expected_content))
def test_bazeldeps_main_buildfile():
expected_content = [
'def load_conan_dependencies():',
'native.new_local_repository(',
'name="OriginalDepName",',
'path="/path/to/folder_dep",',
'build_file="conandeps/OriginalDepName/BUILD",'
]
conanfile = ConanFile(Mock(), None)
cpp_info = CppInfo("mypkg", "dummy_root_folder1")
conanfile_dep = ConanFile(Mock(), None)
conanfile_dep.cpp_info = cpp_info
conanfile_dep._conan_node = Mock()
conanfile_dep._conan_node.ref = ConanFileReference.loads("OriginalDepName/1.0")
conanfile_dep.folders.set_base_package("/path/to/folder_dep")
# FIXME: This will run infinite loop if conanfile.dependencies.host.topological_sort.
# Move to integration test
with mock.patch('conans.ConanFile.dependencies', new_callable=mock.PropertyMock) as mock_deps:
req = Requirement(ConanFileReference.loads("OriginalDepName/1.0"))
mock_deps.return_value = ConanFileDependencies({req: ConanFileInterface(conanfile_dep)})
bazeldeps = BazelDeps(conanfile)
local_repositories = []
for dependency in bazeldeps._conanfile.dependencies.host.values():
content = bazeldeps._create_new_local_repository(dependency,
"conandeps/OriginalDepName/BUILD")
local_repositories.append(content)
content = bazeldeps._get_main_buildfile_content(local_repositories)
for line in expected_content:
assert line in content
def test_bazeldeps_build_dependency_buildfiles():
conanfile = ConanFile(Mock(), None)
conanfile_dep = ConanFile(Mock(), None)
conanfile_dep._conan_node = Mock()
conanfile_dep._conan_node.ref = ConanFileReference.loads("OriginalDepName/1.0")
conanfile_dep.folders.set_base_package("/path/to/folder_dep")
# FIXME: This will run infinite loop if conanfile.dependencies.host.topological_sort.
# Move to integration test
with mock.patch('conans.ConanFile.dependencies', new_callable=mock.PropertyMock) as mock_deps:
req = Requirement(ConanFileReference.loads("OriginalDepName/1.0"), build=True)
mock_deps.return_value = ConanFileDependencies({req: ConanFileInterface(conanfile_dep)})
bazeldeps = BazelDeps(conanfile)
for build_dependency in bazeldeps._conanfile.dependencies.direct_build.values():
dependency_content = bazeldeps._get_build_dependency_buildfile_content(build_dependency)
assert 'filegroup(\n name = "OriginalDepName_binaries",' in dependency_content
assert 'data = glob(["**"]),' in dependency_content
| 43.520833 | 212 | 0.710866 |
acef366e948b2d67cf7174fb950ed4b2c1b34be2 | 2,298 | py | Python | tests/samples_tests/smoke_tests/test_hello_reshape_ssd.py | evgeniya-egupova/openvino | 4b8d6c59e3444ecdc549bfdf357d19d625479b89 | [
"Apache-2.0"
] | null | null | null | tests/samples_tests/smoke_tests/test_hello_reshape_ssd.py | evgeniya-egupova/openvino | 4b8d6c59e3444ecdc549bfdf357d19d625479b89 | [
"Apache-2.0"
] | 44 | 2020-12-09T12:38:22.000Z | 2022-03-28T13:18:29.000Z | tests/samples_tests/smoke_tests/test_hello_reshape_ssd.py | rkazants/openvino | a9a583eb42d43322b39b95b164b5b22c4f341111 | [
"Apache-2.0"
] | 2 | 2021-11-18T06:09:04.000Z | 2021-11-30T07:39:47.000Z | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pytest
import logging as log
import sys
from common.samples_common_test_clas import get_tests
from common.samples_common_test_clas import SamplesCommonTestClass
from common.specific_samples_parsers import parse_hello_reshape_ssd
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
test_data_fp32 = get_tests(cmd_params={'i': [os.path.join('500x500', 'cat.bmp')],
'm': [os.path.join('ssd300',
'caffe_ssd_300_FP32_v10.xml')],
'd': ['CPU'],
'batch': [1, 2, 4]}, use_device=['d'], use_batch=True
)
class TestHelloShape(SamplesCommonTestClass):
@classmethod
def setup_class(cls):
cls.sample_name = 'hello_reshape_ssd'
super().setup_class()
@pytest.mark.parametrize("param", test_data_fp32)
def test_hello_reshape_ssd_fp32(self, param):
"""
Hello_reshape_ssd has functional testing.
This function get stdout from hello_reshape_ssd (already splitted by new line)
The test check not if resulted class of object is accurate with reference, but that demo detected class with its box
and so on and so forth.
"""
# Run _test function, that returns stdout or 0.
stdout = self._test(param, use_preffix=False, get_cmd_func=self.get_hello_shape_cmd_line)
if not stdout:
return 0
stdout = stdout.split('\n')
is_ok = parse_hello_reshape_ssd(stdout)
assert is_ok, "[ERROR] Check failed"
log.info('Functional test passed')
| 42.555556 | 124 | 0.660139 |
acef372c96a20b6b180c4b111993222b8c554d89 | 57,277 | py | Python | pyqtkeybind/x11/keysymdef.py | Tomas1337/pyqtkeybind | baf06b1d78c2cd376681270f1045ca32fa516b58 | [
"MIT"
] | 37 | 2017-05-08T07:47:48.000Z | 2022-03-25T07:39:47.000Z | pyqtkeybind/x11/keysymdef.py | Tomas1337/pyqtkeybind | baf06b1d78c2cd376681270f1045ca32fa516b58 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/pyqtkeybind/x11/keysymdef.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 13 | 2018-03-28T03:20:13.000Z | 2022-02-08T14:20:31.000Z | """
Taken from https://github.com/BurntSushi/xpybutil/blob/master/xpybutil/keysymdef.py
----
A module that creates a dictionary mapping from an english string of a key,
to a keysym (an X data type) in ``keysyms``.
The module also creates a reverse mapping called ``keysym_strings`` that maps
a keysym to a **list** of english string versions of that keysym. (There are
more string representations than there are keysyms.)
This list is taken from Xlib.
"""
from collections import defaultdict
keysyms = {
'VoidSymbol': 0xffffff,
'BackSpace': 0xff08,
'Tab': 0xff09,
'Linefeed': 0xff0a,
'Clear': 0xff0b,
'Return': 0xff0d,
'Pause': 0xff13,
'Scroll_Lock': 0xff14,
'Sys_Req': 0xff15,
'Escape': 0xff1b,
'Delete': 0xffff,
'Multi_key': 0xff20,
'Codeinput': 0xff37,
'SingleCandidate': 0xff3c,
'MultipleCandidate': 0xff3d,
'PreviousCandidate': 0xff3e,
'Kanji': 0xff21,
'Muhenkan': 0xff22,
'Henkan_Mode': 0xff23,
'Henkan': 0xff23,
'Romaji': 0xff24,
'Hiragana': 0xff25,
'Katakana': 0xff26,
'Hiragana_Katakana': 0xff27,
'Zenkaku': 0xff28,
'Hankaku': 0xff29,
'Zenkaku_Hankaku': 0xff2a,
'Touroku': 0xff2b,
'Massyo': 0xff2c,
'Kana_Lock': 0xff2d,
'Kana_Shift': 0xff2e,
'Eisu_Shift': 0xff2f,
'Eisu_toggle': 0xff30,
'Kanji_Bangou': 0xff37,
'Zen_Koho': 0xff3d,
'Mae_Koho': 0xff3e,
'Home': 0xff50,
'Left': 0xff51,
'Up': 0xff52,
'Right': 0xff53,
'Down': 0xff54,
'Prior': 0xff55,
'Page_Up': 0xff55,
'Next': 0xff56,
'Page_Down': 0xff56,
'End': 0xff57,
'Begin': 0xff58,
'Select': 0xff60,
'Print': 0xff61,
'Execute': 0xff62,
'Insert': 0xff63,
'Undo': 0xff65,
'Redo': 0xff66,
'Menu': 0xff67,
'Find': 0xff68,
'Cancel': 0xff69,
'Help': 0xff6a,
'Break': 0xff6b,
'Mode_switch': 0xff7e,
'script_switch': 0xff7e,
'Num_Lock': 0xff7f,
'KP_Space': 0xff80,
'KP_Tab': 0xff89,
'KP_Enter': 0xff8d,
'KP_F1': 0xff91,
'KP_F2': 0xff92,
'KP_F3': 0xff93,
'KP_F4': 0xff94,
'KP_Home': 0xff95,
'KP_Left': 0xff96,
'KP_Up': 0xff97,
'KP_Right': 0xff98,
'KP_Down': 0xff99,
'KP_Prior': 0xff9a,
'KP_Page_Up': 0xff9a,
'KP_Next': 0xff9b,
'KP_Page_Down': 0xff9b,
'KP_End': 0xff9c,
'KP_Begin': 0xff9d,
'KP_Insert': 0xff9e,
'KP_Delete': 0xff9f,
'KP_Equal': 0xffbd,
'KP_Multiply': 0xffaa,
'KP_Add': 0xffab,
'KP_Separator': 0xffac,
'KP_Subtract': 0xffad,
'KP_Decimal': 0xffae,
'KP_Divide': 0xffaf,
'KP_0': 0xffb0,
'KP_1': 0xffb1,
'KP_2': 0xffb2,
'KP_3': 0xffb3,
'KP_4': 0xffb4,
'KP_5': 0xffb5,
'KP_6': 0xffb6,
'KP_7': 0xffb7,
'KP_8': 0xffb8,
'KP_9': 0xffb9,
'F1': 0xffbe,
'F2': 0xffbf,
'F3': 0xffc0,
'F4': 0xffc1,
'F5': 0xffc2,
'F6': 0xffc3,
'F7': 0xffc4,
'F8': 0xffc5,
'F9': 0xffc6,
'F10': 0xffc7,
'F11': 0xffc8,
'L1': 0xffc8,
'F12': 0xffc9,
'L2': 0xffc9,
'F13': 0xffca,
'L3': 0xffca,
'F14': 0xffcb,
'L4': 0xffcb,
'F15': 0xffcc,
'L5': 0xffcc,
'F16': 0xffcd,
'L6': 0xffcd,
'F17': 0xffce,
'L7': 0xffce,
'F18': 0xffcf,
'L8': 0xffcf,
'F19': 0xffd0,
'L9': 0xffd0,
'F20': 0xffd1,
'L10': 0xffd1,
'F21': 0xffd2,
'R1': 0xffd2,
'F22': 0xffd3,
'R2': 0xffd3,
'F23': 0xffd4,
'R3': 0xffd4,
'F24': 0xffd5,
'R4': 0xffd5,
'F25': 0xffd6,
'R5': 0xffd6,
'F26': 0xffd7,
'R6': 0xffd7,
'F27': 0xffd8,
'R7': 0xffd8,
'F28': 0xffd9,
'R8': 0xffd9,
'F29': 0xffda,
'R9': 0xffda,
'F30': 0xffdb,
'R10': 0xffdb,
'F31': 0xffdc,
'R11': 0xffdc,
'F32': 0xffdd,
'R12': 0xffdd,
'F33': 0xffde,
'R13': 0xffde,
'F34': 0xffdf,
'R14': 0xffdf,
'F35': 0xffe0,
'R15': 0xffe0,
'Shift_L': 0xffe1,
'Shift_R': 0xffe2,
'Control_L': 0xffe3,
'Control_R': 0xffe4,
'Caps_Lock': 0xffe5,
'Shift_Lock': 0xffe6,
'Meta_L': 0xffe7,
'Meta_R': 0xffe8,
'Alt_L': 0xffe9,
'Alt_R': 0xffea,
'Super_L': 0xffeb,
'Super_R': 0xffec,
'Hyper_L': 0xffed,
'Hyper_R': 0xffee,
'ISO_Lock': 0xfe01,
'ISO_Level2_Latch': 0xfe02,
'ISO_Level3_Shift': 0xfe03,
'ISO_Level3_Latch': 0xfe04,
'ISO_Level3_Lock': 0xfe05,
'ISO_Level5_Shift': 0xfe11,
'ISO_Level5_Latch': 0xfe12,
'ISO_Level5_Lock': 0xfe13,
'ISO_Group_Shift': 0xff7e,
'ISO_Group_Latch': 0xfe06,
'ISO_Group_Lock': 0xfe07,
'ISO_Next_Group': 0xfe08,
'ISO_Next_Group_Lock': 0xfe09,
'ISO_Prev_Group': 0xfe0a,
'ISO_Prev_Group_Lock': 0xfe0b,
'ISO_First_Group': 0xfe0c,
'ISO_First_Group_Lock': 0xfe0d,
'ISO_Last_Group': 0xfe0e,
'ISO_Last_Group_Lock': 0xfe0f,
'ISO_Left_Tab': 0xfe20,
'ISO_Move_Line_Up': 0xfe21,
'ISO_Move_Line_Down': 0xfe22,
'ISO_Partial_Line_Up': 0xfe23,
'ISO_Partial_Line_Down': 0xfe24,
'ISO_Partial_Space_Left': 0xfe25,
'ISO_Partial_Space_Right': 0xfe26,
'ISO_Set_Margin_Left': 0xfe27,
'ISO_Set_Margin_Right': 0xfe28,
'ISO_Release_Margin_Left': 0xfe29,
'ISO_Release_Margin_Right': 0xfe2a,
'ISO_Release_Both_Margins': 0xfe2b,
'ISO_Fast_Cursor_Left': 0xfe2c,
'ISO_Fast_Cursor_Right': 0xfe2d,
'ISO_Fast_Cursor_Up': 0xfe2e,
'ISO_Fast_Cursor_Down': 0xfe2f,
'ISO_Continuous_Underline': 0xfe30,
'ISO_Discontinuous_Underline': 0xfe31,
'ISO_Emphasize': 0xfe32,
'ISO_Center_Object': 0xfe33,
'ISO_Enter': 0xfe34,
'dead_grave': 0xfe50,
'dead_acute': 0xfe51,
'dead_circumflex': 0xfe52,
'dead_tilde': 0xfe53,
'dead_perispomeni': 0xfe53,
'dead_macron': 0xfe54,
'dead_breve': 0xfe55,
'dead_abovedot': 0xfe56,
'dead_diaeresis': 0xfe57,
'dead_abovering': 0xfe58,
'dead_doubleacute': 0xfe59,
'dead_caron': 0xfe5a,
'dead_cedilla': 0xfe5b,
'dead_ogonek': 0xfe5c,
'dead_iota': 0xfe5d,
'dead_voiced_sound': 0xfe5e,
'dead_semivoiced_sound': 0xfe5f,
'dead_belowdot': 0xfe60,
'dead_hook': 0xfe61,
'dead_horn': 0xfe62,
'dead_stroke': 0xfe63,
'dead_abovecomma': 0xfe64,
'dead_psili': 0xfe64,
'dead_abovereversedcomma': 0xfe65,
'dead_dasia': 0xfe65,
'dead_doublegrave': 0xfe66,
'dead_belowring': 0xfe67,
'dead_belowmacron': 0xfe68,
'dead_belowcircumflex': 0xfe69,
'dead_belowtilde': 0xfe6a,
'dead_belowbreve': 0xfe6b,
'dead_belowdiaeresis': 0xfe6c,
'dead_invertedbreve': 0xfe6d,
'dead_belowcomma': 0xfe6e,
'dead_currency': 0xfe6f,
'dead_a': 0xfe80,
'dead_A': 0xfe81,
'dead_e': 0xfe82,
'dead_E': 0xfe83,
'dead_i': 0xfe84,
'dead_I': 0xfe85,
'dead_o': 0xfe86,
'dead_O': 0xfe87,
'dead_u': 0xfe88,
'dead_U': 0xfe89,
'dead_small_schwa': 0xfe8a,
'dead_capital_schwa': 0xfe8b,
'First_Virtual_Screen': 0xfed0,
'Prev_Virtual_Screen': 0xfed1,
'Next_Virtual_Screen': 0xfed2,
'Last_Virtual_Screen': 0xfed4,
'Terminate_Server': 0xfed5,
'AccessX_Enable': 0xfe70,
'AccessX_Feedback_Enable': 0xfe71,
'RepeatKeys_Enable': 0xfe72,
'SlowKeys_Enable': 0xfe73,
'BounceKeys_Enable': 0xfe74,
'StickyKeys_Enable': 0xfe75,
'MouseKeys_Enable': 0xfe76,
'MouseKeys_Accel_Enable': 0xfe77,
'Overlay1_Enable': 0xfe78,
'Overlay2_Enable': 0xfe79,
'AudibleBell_Enable': 0xfe7a,
'Pointer_Left': 0xfee0,
'Pointer_Right': 0xfee1,
'Pointer_Up': 0xfee2,
'Pointer_Down': 0xfee3,
'Pointer_UpLeft': 0xfee4,
'Pointer_UpRight': 0xfee5,
'Pointer_DownLeft': 0xfee6,
'Pointer_DownRight': 0xfee7,
'Pointer_Button_Dflt': 0xfee8,
'Pointer_Button1': 0xfee9,
'Pointer_Button2': 0xfeea,
'Pointer_Button3': 0xfeeb,
'Pointer_Button4': 0xfeec,
'Pointer_Button5': 0xfeed,
'Pointer_DblClick_Dflt': 0xfeee,
'Pointer_DblClick1': 0xfeef,
'Pointer_DblClick2': 0xfef0,
'Pointer_DblClick3': 0xfef1,
'Pointer_DblClick4': 0xfef2,
'Pointer_DblClick5': 0xfef3,
'Pointer_Drag_Dflt': 0xfef4,
'Pointer_Drag1': 0xfef5,
'Pointer_Drag2': 0xfef6,
'Pointer_Drag3': 0xfef7,
'Pointer_Drag4': 0xfef8,
'Pointer_Drag5': 0xfefd,
'Pointer_EnableKeys': 0xfef9,
'Pointer_Accelerate': 0xfefa,
'Pointer_DfltBtnNext': 0xfefb,
'Pointer_DfltBtnPrev': 0xfefc,
'3270_Duplicate': 0xfd01,
'3270_FieldMark': 0xfd02,
'3270_Right2': 0xfd03,
'3270_Left2': 0xfd04,
'3270_BackTab': 0xfd05,
'3270_EraseEOF': 0xfd06,
'3270_EraseInput': 0xfd07,
'3270_Reset': 0xfd08,
'3270_Quit': 0xfd09,
'3270_PA1': 0xfd0a,
'3270_PA2': 0xfd0b,
'3270_PA3': 0xfd0c,
'3270_Test': 0xfd0d,
'3270_Attn': 0xfd0e,
'3270_CursorBlink': 0xfd0f,
'3270_AltCursor': 0xfd10,
'3270_KeyClick': 0xfd11,
'3270_Jump': 0xfd12,
'3270_Ident': 0xfd13,
'3270_Rule': 0xfd14,
'3270_Copy': 0xfd15,
'3270_Play': 0xfd16,
'3270_Setup': 0xfd17,
'3270_Record': 0xfd18,
'3270_ChangeScreen': 0xfd19,
'3270_DeleteWord': 0xfd1a,
'3270_ExSelect': 0xfd1b,
'3270_CursorSelect': 0xfd1c,
'3270_PrintScreen': 0xfd1d,
'3270_Enter': 0xfd1e,
'space': 0x0020,
'exclam': 0x0021,
'quotedbl': 0x0022,
'numbersign': 0x0023,
'dollar': 0x0024,
'percent': 0x0025,
'ampersand': 0x0026,
'apostrophe': 0x0027,
'quoteright': 0x0027,
'parenleft': 0x0028,
'parenright': 0x0029,
'asterisk': 0x002a,
'plus': 0x002b,
'comma': 0x002c,
'minus': 0x002d,
'period': 0x002e,
'slash': 0x002f,
'0': 0x0030,
'1': 0x0031,
'2': 0x0032,
'3': 0x0033,
'4': 0x0034,
'5': 0x0035,
'6': 0x0036,
'7': 0x0037,
'8': 0x0038,
'9': 0x0039,
'colon': 0x003a,
'semicolon': 0x003b,
'less': 0x003c,
'equal': 0x003d,
'greater': 0x003e,
'question': 0x003f,
'at': 0x0040,
'A': 0x0041,
'B': 0x0042,
'C': 0x0043,
'D': 0x0044,
'E': 0x0045,
'F': 0x0046,
'G': 0x0047,
'H': 0x0048,
'I': 0x0049,
'J': 0x004a,
'K': 0x004b,
'L': 0x004c,
'M': 0x004d,
'N': 0x004e,
'O': 0x004f,
'P': 0x0050,
'Q': 0x0051,
'R': 0x0052,
'S': 0x0053,
'T': 0x0054,
'U': 0x0055,
'V': 0x0056,
'W': 0x0057,
'X': 0x0058,
'Y': 0x0059,
'Z': 0x005a,
'bracketleft': 0x005b,
'backslash': 0x005c,
'bracketright': 0x005d,
'asciicircum': 0x005e,
'underscore': 0x005f,
'grave': 0x0060,
'quoteleft': 0x0060,
'a': 0x0061,
'b': 0x0062,
'c': 0x0063,
'd': 0x0064,
'e': 0x0065,
'f': 0x0066,
'g': 0x0067,
'h': 0x0068,
'i': 0x0069,
'j': 0x006a,
'k': 0x006b,
'l': 0x006c,
'm': 0x006d,
'n': 0x006e,
'o': 0x006f,
'p': 0x0070,
'q': 0x0071,
'r': 0x0072,
's': 0x0073,
't': 0x0074,
'u': 0x0075,
'v': 0x0076,
'w': 0x0077,
'x': 0x0078,
'y': 0x0079,
'z': 0x007a,
'braceleft': 0x007b,
'bar': 0x007c,
'braceright': 0x007d,
'asciitilde': 0x007e,
'nobreakspace': 0x00a0,
'exclamdown': 0x00a1,
'cent': 0x00a2,
'sterling': 0x00a3,
'currency': 0x00a4,
'yen': 0x00a5,
'brokenbar': 0x00a6,
'section': 0x00a7,
'diaeresis': 0x00a8,
'copyright': 0x00a9,
'ordfeminine': 0x00aa,
'guillemotleft': 0x00ab,
'notsign': 0x00ac,
'hyphen': 0x00ad,
'registered': 0x00ae,
'macron': 0x00af,
'degree': 0x00b0,
'plusminus': 0x00b1,
'twosuperior': 0x00b2,
'threesuperior': 0x00b3,
'acute': 0x00b4,
'mu': 0x00b5,
'paragraph': 0x00b6,
'periodcentered': 0x00b7,
'cedilla': 0x00b8,
'onesuperior': 0x00b9,
'masculine': 0x00ba,
'guillemotright': 0x00bb,
'onequarter': 0x00bc,
'onehalf': 0x00bd,
'threequarters': 0x00be,
'questiondown': 0x00bf,
'Agrave': 0x00c0,
'Aacute': 0x00c1,
'Acircumflex': 0x00c2,
'Atilde': 0x00c3,
'Adiaeresis': 0x00c4,
'Aring': 0x00c5,
'AE': 0x00c6,
'Ccedilla': 0x00c7,
'Egrave': 0x00c8,
'Eacute': 0x00c9,
'Ecircumflex': 0x00ca,
'Ediaeresis': 0x00cb,
'Igrave': 0x00cc,
'Iacute': 0x00cd,
'Icircumflex': 0x00ce,
'Idiaeresis': 0x00cf,
'ETH': 0x00d0,
'Eth': 0x00d0,
'Ntilde': 0x00d1,
'Ograve': 0x00d2,
'Oacute': 0x00d3,
'Ocircumflex': 0x00d4,
'Otilde': 0x00d5,
'Odiaeresis': 0x00d6,
'multiply': 0x00d7,
'Oslash': 0x00d8,
'Ooblique': 0x00d8,
'Ugrave': 0x00d9,
'Uacute': 0x00da,
'Ucircumflex': 0x00db,
'Udiaeresis': 0x00dc,
'Yacute': 0x00dd,
'THORN': 0x00de,
'Thorn': 0x00de,
'ssharp': 0x00df,
'agrave': 0x00e0,
'aacute': 0x00e1,
'acircumflex': 0x00e2,
'atilde': 0x00e3,
'adiaeresis': 0x00e4,
'aring': 0x00e5,
'ae': 0x00e6,
'ccedilla': 0x00e7,
'egrave': 0x00e8,
'eacute': 0x00e9,
'ecircumflex': 0x00ea,
'ediaeresis': 0x00eb,
'igrave': 0x00ec,
'iacute': 0x00ed,
'icircumflex': 0x00ee,
'idiaeresis': 0x00ef,
'eth': 0x00f0,
'ntilde': 0x00f1,
'ograve': 0x00f2,
'oacute': 0x00f3,
'ocircumflex': 0x00f4,
'otilde': 0x00f5,
'odiaeresis': 0x00f6,
'division': 0x00f7,
'oslash': 0x00f8,
'ooblique': 0x00f8,
'ugrave': 0x00f9,
'uacute': 0x00fa,
'ucircumflex': 0x00fb,
'udiaeresis': 0x00fc,
'yacute': 0x00fd,
'thorn': 0x00fe,
'ydiaeresis': 0x00ff,
'Aogonek': 0x01a1,
'breve': 0x01a2,
'Lstroke': 0x01a3,
'Lcaron': 0x01a5,
'Sacute': 0x01a6,
'Scaron': 0x01a9,
'Scedilla': 0x01aa,
'Tcaron': 0x01ab,
'Zacute': 0x01ac,
'Zcaron': 0x01ae,
'Zabovedot': 0x01af,
'aogonek': 0x01b1,
'ogonek': 0x01b2,
'lstroke': 0x01b3,
'lcaron': 0x01b5,
'sacute': 0x01b6,
'caron': 0x01b7,
'scaron': 0x01b9,
'scedilla': 0x01ba,
'tcaron': 0x01bb,
'zacute': 0x01bc,
'doubleacute': 0x01bd,
'zcaron': 0x01be,
'zabovedot': 0x01bf,
'Racute': 0x01c0,
'Abreve': 0x01c3,
'Lacute': 0x01c5,
'Cacute': 0x01c6,
'Ccaron': 0x01c8,
'Eogonek': 0x01ca,
'Ecaron': 0x01cc,
'Dcaron': 0x01cf,
'Dstroke': 0x01d0,
'Nacute': 0x01d1,
'Ncaron': 0x01d2,
'Odoubleacute': 0x01d5,
'Rcaron': 0x01d8,
'Uring': 0x01d9,
'Udoubleacute': 0x01db,
'Tcedilla': 0x01de,
'racute': 0x01e0,
'abreve': 0x01e3,
'lacute': 0x01e5,
'cacute': 0x01e6,
'ccaron': 0x01e8,
'eogonek': 0x01ea,
'ecaron': 0x01ec,
'dcaron': 0x01ef,
'dstroke': 0x01f0,
'nacute': 0x01f1,
'ncaron': 0x01f2,
'odoubleacute': 0x01f5,
'udoubleacute': 0x01fb,
'rcaron': 0x01f8,
'uring': 0x01f9,
'tcedilla': 0x01fe,
'abovedot': 0x01ff,
'Hstroke': 0x02a1,
'Hcircumflex': 0x02a6,
'Iabovedot': 0x02a9,
'Gbreve': 0x02ab,
'Jcircumflex': 0x02ac,
'hstroke': 0x02b1,
'hcircumflex': 0x02b6,
'idotless': 0x02b9,
'gbreve': 0x02bb,
'jcircumflex': 0x02bc,
'Cabovedot': 0x02c5,
'Ccircumflex': 0x02c6,
'Gabovedot': 0x02d5,
'Gcircumflex': 0x02d8,
'Ubreve': 0x02dd,
'Scircumflex': 0x02de,
'cabovedot': 0x02e5,
'ccircumflex': 0x02e6,
'gabovedot': 0x02f5,
'gcircumflex': 0x02f8,
'ubreve': 0x02fd,
'scircumflex': 0x02fe,
'kra': 0x03a2,
'kappa': 0x03a2,
'Rcedilla': 0x03a3,
'Itilde': 0x03a5,
'Lcedilla': 0x03a6,
'Emacron': 0x03aa,
'Gcedilla': 0x03ab,
'Tslash': 0x03ac,
'rcedilla': 0x03b3,
'itilde': 0x03b5,
'lcedilla': 0x03b6,
'emacron': 0x03ba,
'gcedilla': 0x03bb,
'tslash': 0x03bc,
'ENG': 0x03bd,
'eng': 0x03bf,
'Amacron': 0x03c0,
'Iogonek': 0x03c7,
'Eabovedot': 0x03cc,
'Imacron': 0x03cf,
'Ncedilla': 0x03d1,
'Omacron': 0x03d2,
'Kcedilla': 0x03d3,
'Uogonek': 0x03d9,
'Utilde': 0x03dd,
'Umacron': 0x03de,
'amacron': 0x03e0,
'iogonek': 0x03e7,
'eabovedot': 0x03ec,
'imacron': 0x03ef,
'ncedilla': 0x03f1,
'omacron': 0x03f2,
'kcedilla': 0x03f3,
'uogonek': 0x03f9,
'utilde': 0x03fd,
'umacron': 0x03fe,
'Babovedot': 0x1001e02,
'babovedot': 0x1001e03,
'Dabovedot': 0x1001e0a,
'Wgrave': 0x1001e80,
'Wacute': 0x1001e82,
'dabovedot': 0x1001e0b,
'Ygrave': 0x1001ef2,
'Fabovedot': 0x1001e1e,
'fabovedot': 0x1001e1f,
'Mabovedot': 0x1001e40,
'mabovedot': 0x1001e41,
'Pabovedot': 0x1001e56,
'wgrave': 0x1001e81,
'pabovedot': 0x1001e57,
'wacute': 0x1001e83,
'Sabovedot': 0x1001e60,
'ygrave': 0x1001ef3,
'Wdiaeresis': 0x1001e84,
'wdiaeresis': 0x1001e85,
'sabovedot': 0x1001e61,
'Wcircumflex': 0x1000174,
'Tabovedot': 0x1001e6a,
'Ycircumflex': 0x1000176,
'wcircumflex': 0x1000175,
'tabovedot': 0x1001e6b,
'ycircumflex': 0x1000177,
'OE': 0x13bc,
'oe': 0x13bd,
'Ydiaeresis': 0x13be,
'overline': 0x047e,
'kana_fullstop': 0x04a1,
'kana_openingbracket': 0x04a2,
'kana_closingbracket': 0x04a3,
'kana_comma': 0x04a4,
'kana_conjunctive': 0x04a5,
'kana_middledot': 0x04a5,
'kana_WO': 0x04a6,
'kana_a': 0x04a7,
'kana_i': 0x04a8,
'kana_u': 0x04a9,
'kana_e': 0x04aa,
'kana_o': 0x04ab,
'kana_ya': 0x04ac,
'kana_yu': 0x04ad,
'kana_yo': 0x04ae,
'kana_tsu': 0x04af,
'kana_tu': 0x04af,
'prolongedsound': 0x04b0,
'kana_A': 0x04b1,
'kana_I': 0x04b2,
'kana_U': 0x04b3,
'kana_E': 0x04b4,
'kana_O': 0x04b5,
'kana_KA': 0x04b6,
'kana_KI': 0x04b7,
'kana_KU': 0x04b8,
'kana_KE': 0x04b9,
'kana_KO': 0x04ba,
'kana_SA': 0x04bb,
'kana_SHI': 0x04bc,
'kana_SU': 0x04bd,
'kana_SE': 0x04be,
'kana_SO': 0x04bf,
'kana_TA': 0x04c0,
'kana_CHI': 0x04c1,
'kana_TI': 0x04c1,
'kana_TSU': 0x04c2,
'kana_TU': 0x04c2,
'kana_TE': 0x04c3,
'kana_TO': 0x04c4,
'kana_NA': 0x04c5,
'kana_NI': 0x04c6,
'kana_NU': 0x04c7,
'kana_NE': 0x04c8,
'kana_NO': 0x04c9,
'kana_HA': 0x04ca,
'kana_HI': 0x04cb,
'kana_FU': 0x04cc,
'kana_HU': 0x04cc,
'kana_HE': 0x04cd,
'kana_HO': 0x04ce,
'kana_MA': 0x04cf,
'kana_MI': 0x04d0,
'kana_MU': 0x04d1,
'kana_ME': 0x04d2,
'kana_MO': 0x04d3,
'kana_YA': 0x04d4,
'kana_YU': 0x04d5,
'kana_YO': 0x04d6,
'kana_RA': 0x04d7,
'kana_RI': 0x04d8,
'kana_RU': 0x04d9,
'kana_RE': 0x04da,
'kana_RO': 0x04db,
'kana_WA': 0x04dc,
'kana_N': 0x04dd,
'voicedsound': 0x04de,
'semivoicedsound': 0x04df,
'kana_switch': 0xff7e,
'Farsi_0': 0x10006f0,
'Farsi_1': 0x10006f1,
'Farsi_2': 0x10006f2,
'Farsi_3': 0x10006f3,
'Farsi_4': 0x10006f4,
'Farsi_5': 0x10006f5,
'Farsi_6': 0x10006f6,
'Farsi_7': 0x10006f7,
'Farsi_8': 0x10006f8,
'Farsi_9': 0x10006f9,
'Arabic_percent': 0x100066a,
'Arabic_superscript_alef': 0x1000670,
'Arabic_tteh': 0x1000679,
'Arabic_peh': 0x100067e,
'Arabic_tcheh': 0x1000686,
'Arabic_ddal': 0x1000688,
'Arabic_rreh': 0x1000691,
'Arabic_comma': 0x05ac,
'Arabic_fullstop': 0x10006d4,
'Arabic_0': 0x1000660,
'Arabic_1': 0x1000661,
'Arabic_2': 0x1000662,
'Arabic_3': 0x1000663,
'Arabic_4': 0x1000664,
'Arabic_5': 0x1000665,
'Arabic_6': 0x1000666,
'Arabic_7': 0x1000667,
'Arabic_8': 0x1000668,
'Arabic_9': 0x1000669,
'Arabic_semicolon': 0x05bb,
'Arabic_question_mark': 0x05bf,
'Arabic_hamza': 0x05c1,
'Arabic_maddaonalef': 0x05c2,
'Arabic_hamzaonalef': 0x05c3,
'Arabic_hamzaonwaw': 0x05c4,
'Arabic_hamzaunderalef': 0x05c5,
'Arabic_hamzaonyeh': 0x05c6,
'Arabic_alef': 0x05c7,
'Arabic_beh': 0x05c8,
'Arabic_tehmarbuta': 0x05c9,
'Arabic_teh': 0x05ca,
'Arabic_theh': 0x05cb,
'Arabic_jeem': 0x05cc,
'Arabic_hah': 0x05cd,
'Arabic_khah': 0x05ce,
'Arabic_dal': 0x05cf,
'Arabic_thal': 0x05d0,
'Arabic_ra': 0x05d1,
'Arabic_zain': 0x05d2,
'Arabic_seen': 0x05d3,
'Arabic_sheen': 0x05d4,
'Arabic_sad': 0x05d5,
'Arabic_dad': 0x05d6,
'Arabic_tah': 0x05d7,
'Arabic_zah': 0x05d8,
'Arabic_ain': 0x05d9,
'Arabic_ghain': 0x05da,
'Arabic_tatweel': 0x05e0,
'Arabic_feh': 0x05e1,
'Arabic_qaf': 0x05e2,
'Arabic_kaf': 0x05e3,
'Arabic_lam': 0x05e4,
'Arabic_meem': 0x05e5,
'Arabic_noon': 0x05e6,
'Arabic_ha': 0x05e7,
'Arabic_heh': 0x05e7,
'Arabic_waw': 0x05e8,
'Arabic_alefmaksura': 0x05e9,
'Arabic_yeh': 0x05ea,
'Arabic_fathatan': 0x05eb,
'Arabic_dammatan': 0x05ec,
'Arabic_kasratan': 0x05ed,
'Arabic_fatha': 0x05ee,
'Arabic_damma': 0x05ef,
'Arabic_kasra': 0x05f0,
'Arabic_shadda': 0x05f1,
'Arabic_sukun': 0x05f2,
'Arabic_madda_above': 0x1000653,
'Arabic_hamza_above': 0x1000654,
'Arabic_hamza_below': 0x1000655,
'Arabic_jeh': 0x1000698,
'Arabic_veh': 0x10006a4,
'Arabic_keheh': 0x10006a9,
'Arabic_gaf': 0x10006af,
'Arabic_noon_ghunna': 0x10006ba,
'Arabic_heh_doachashmee': 0x10006be,
'Farsi_yeh': 0x10006cc,
'Arabic_farsi_yeh': 0x10006cc,
'Arabic_yeh_baree': 0x10006d2,
'Arabic_heh_goal': 0x10006c1,
'Arabic_switch': 0xff7e,
'Cyrillic_GHE_bar': 0x1000492,
'Cyrillic_ghe_bar': 0x1000493,
'Cyrillic_ZHE_descender': 0x1000496,
'Cyrillic_zhe_descender': 0x1000497,
'Cyrillic_KA_descender': 0x100049a,
'Cyrillic_ka_descender': 0x100049b,
'Cyrillic_KA_vertstroke': 0x100049c,
'Cyrillic_ka_vertstroke': 0x100049d,
'Cyrillic_EN_descender': 0x10004a2,
'Cyrillic_en_descender': 0x10004a3,
'Cyrillic_U_straight': 0x10004ae,
'Cyrillic_u_straight': 0x10004af,
'Cyrillic_U_straight_bar': 0x10004b0,
'Cyrillic_u_straight_bar': 0x10004b1,
'Cyrillic_HA_descender': 0x10004b2,
'Cyrillic_ha_descender': 0x10004b3,
'Cyrillic_CHE_descender': 0x10004b6,
'Cyrillic_che_descender': 0x10004b7,
'Cyrillic_CHE_vertstroke': 0x10004b8,
'Cyrillic_che_vertstroke': 0x10004b9,
'Cyrillic_SHHA': 0x10004ba,
'Cyrillic_shha': 0x10004bb,
'Cyrillic_SCHWA': 0x10004d8,
'Cyrillic_schwa': 0x10004d9,
'Cyrillic_I_macron': 0x10004e2,
'Cyrillic_i_macron': 0x10004e3,
'Cyrillic_O_bar': 0x10004e8,
'Cyrillic_o_bar': 0x10004e9,
'Cyrillic_U_macron': 0x10004ee,
'Cyrillic_u_macron': 0x10004ef,
'Serbian_dje': 0x06a1,
'Macedonia_gje': 0x06a2,
'Cyrillic_io': 0x06a3,
'Ukrainian_ie': 0x06a4,
'Ukranian_je': 0x06a4,
'Macedonia_dse': 0x06a5,
'Ukrainian_i': 0x06a6,
'Ukranian_i': 0x06a6,
'Ukrainian_yi': 0x06a7,
'Ukranian_yi': 0x06a7,
'Cyrillic_je': 0x06a8,
'Serbian_je': 0x06a8,
'Cyrillic_lje': 0x06a9,
'Serbian_lje': 0x06a9,
'Cyrillic_nje': 0x06aa,
'Serbian_nje': 0x06aa,
'Serbian_tshe': 0x06ab,
'Macedonia_kje': 0x06ac,
'Ukrainian_ghe_with_upturn': 0x06ad,
'Byelorussian_shortu': 0x06ae,
'Cyrillic_dzhe': 0x06af,
'Serbian_dze': 0x06af,
'numerosign': 0x06b0,
'Serbian_DJE': 0x06b1,
'Macedonia_GJE': 0x06b2,
'Cyrillic_IO': 0x06b3,
'Ukrainian_IE': 0x06b4,
'Ukranian_JE': 0x06b4,
'Macedonia_DSE': 0x06b5,
'Ukrainian_I': 0x06b6,
'Ukranian_I': 0x06b6,
'Ukrainian_YI': 0x06b7,
'Ukranian_YI': 0x06b7,
'Cyrillic_JE': 0x06b8,
'Serbian_JE': 0x06b8,
'Cyrillic_LJE': 0x06b9,
'Serbian_LJE': 0x06b9,
'Cyrillic_NJE': 0x06ba,
'Serbian_NJE': 0x06ba,
'Serbian_TSHE': 0x06bb,
'Macedonia_KJE': 0x06bc,
'Ukrainian_GHE_WITH_UPTURN': 0x06bd,
'Byelorussian_SHORTU': 0x06be,
'Cyrillic_DZHE': 0x06bf,
'Serbian_DZE': 0x06bf,
'Cyrillic_yu': 0x06c0,
'Cyrillic_a': 0x06c1,
'Cyrillic_be': 0x06c2,
'Cyrillic_tse': 0x06c3,
'Cyrillic_de': 0x06c4,
'Cyrillic_ie': 0x06c5,
'Cyrillic_ef': 0x06c6,
'Cyrillic_ghe': 0x06c7,
'Cyrillic_ha': 0x06c8,
'Cyrillic_i': 0x06c9,
'Cyrillic_shorti': 0x06ca,
'Cyrillic_ka': 0x06cb,
'Cyrillic_el': 0x06cc,
'Cyrillic_em': 0x06cd,
'Cyrillic_en': 0x06ce,
'Cyrillic_o': 0x06cf,
'Cyrillic_pe': 0x06d0,
'Cyrillic_ya': 0x06d1,
'Cyrillic_er': 0x06d2,
'Cyrillic_es': 0x06d3,
'Cyrillic_te': 0x06d4,
'Cyrillic_u': 0x06d5,
'Cyrillic_zhe': 0x06d6,
'Cyrillic_ve': 0x06d7,
'Cyrillic_softsign': 0x06d8,
'Cyrillic_yeru': 0x06d9,
'Cyrillic_ze': 0x06da,
'Cyrillic_sha': 0x06db,
'Cyrillic_e': 0x06dc,
'Cyrillic_shcha': 0x06dd,
'Cyrillic_che': 0x06de,
'Cyrillic_hardsign': 0x06df,
'Cyrillic_YU': 0x06e0,
'Cyrillic_A': 0x06e1,
'Cyrillic_BE': 0x06e2,
'Cyrillic_TSE': 0x06e3,
'Cyrillic_DE': 0x06e4,
'Cyrillic_IE': 0x06e5,
'Cyrillic_EF': 0x06e6,
'Cyrillic_GHE': 0x06e7,
'Cyrillic_HA': 0x06e8,
'Cyrillic_I': 0x06e9,
'Cyrillic_SHORTI': 0x06ea,
'Cyrillic_KA': 0x06eb,
'Cyrillic_EL': 0x06ec,
'Cyrillic_EM': 0x06ed,
'Cyrillic_EN': 0x06ee,
'Cyrillic_O': 0x06ef,
'Cyrillic_PE': 0x06f0,
'Cyrillic_YA': 0x06f1,
'Cyrillic_ER': 0x06f2,
'Cyrillic_ES': 0x06f3,
'Cyrillic_TE': 0x06f4,
'Cyrillic_U': 0x06f5,
'Cyrillic_ZHE': 0x06f6,
'Cyrillic_VE': 0x06f7,
'Cyrillic_SOFTSIGN': 0x06f8,
'Cyrillic_YERU': 0x06f9,
'Cyrillic_ZE': 0x06fa,
'Cyrillic_SHA': 0x06fb,
'Cyrillic_E': 0x06fc,
'Cyrillic_SHCHA': 0x06fd,
'Cyrillic_CHE': 0x06fe,
'Cyrillic_HARDSIGN': 0x06ff,
'Greek_ALPHAaccent': 0x07a1,
'Greek_EPSILONaccent': 0x07a2,
'Greek_ETAaccent': 0x07a3,
'Greek_IOTAaccent': 0x07a4,
'Greek_IOTAdieresis': 0x07a5,
'Greek_IOTAdiaeresis': 0x07a5,
'Greek_OMICRONaccent': 0x07a7,
'Greek_UPSILONaccent': 0x07a8,
'Greek_UPSILONdieresis': 0x07a9,
'Greek_OMEGAaccent': 0x07ab,
'Greek_accentdieresis': 0x07ae,
'Greek_horizbar': 0x07af,
'Greek_alphaaccent': 0x07b1,
'Greek_epsilonaccent': 0x07b2,
'Greek_etaaccent': 0x07b3,
'Greek_iotaaccent': 0x07b4,
'Greek_iotadieresis': 0x07b5,
'Greek_iotaaccentdieresis': 0x07b6,
'Greek_omicronaccent': 0x07b7,
'Greek_upsilonaccent': 0x07b8,
'Greek_upsilondieresis': 0x07b9,
'Greek_upsilonaccentdieresis': 0x07ba,
'Greek_omegaaccent': 0x07bb,
'Greek_ALPHA': 0x07c1,
'Greek_BETA': 0x07c2,
'Greek_GAMMA': 0x07c3,
'Greek_DELTA': 0x07c4,
'Greek_EPSILON': 0x07c5,
'Greek_ZETA': 0x07c6,
'Greek_ETA': 0x07c7,
'Greek_THETA': 0x07c8,
'Greek_IOTA': 0x07c9,
'Greek_KAPPA': 0x07ca,
'Greek_LAMDA': 0x07cb,
'Greek_LAMBDA': 0x07cb,
'Greek_MU': 0x07cc,
'Greek_NU': 0x07cd,
'Greek_XI': 0x07ce,
'Greek_OMICRON': 0x07cf,
'Greek_PI': 0x07d0,
'Greek_RHO': 0x07d1,
'Greek_SIGMA': 0x07d2,
'Greek_TAU': 0x07d4,
'Greek_UPSILON': 0x07d5,
'Greek_PHI': 0x07d6,
'Greek_CHI': 0x07d7,
'Greek_PSI': 0x07d8,
'Greek_OMEGA': 0x07d9,
'Greek_alpha': 0x07e1,
'Greek_beta': 0x07e2,
'Greek_gamma': 0x07e3,
'Greek_delta': 0x07e4,
'Greek_epsilon': 0x07e5,
'Greek_zeta': 0x07e6,
'Greek_eta': 0x07e7,
'Greek_theta': 0x07e8,
'Greek_iota': 0x07e9,
'Greek_kappa': 0x07ea,
'Greek_lamda': 0x07eb,
'Greek_lambda': 0x07eb,
'Greek_mu': 0x07ec,
'Greek_nu': 0x07ed,
'Greek_xi': 0x07ee,
'Greek_omicron': 0x07ef,
'Greek_pi': 0x07f0,
'Greek_rho': 0x07f1,
'Greek_sigma': 0x07f2,
'Greek_finalsmallsigma': 0x07f3,
'Greek_tau': 0x07f4,
'Greek_upsilon': 0x07f5,
'Greek_phi': 0x07f6,
'Greek_chi': 0x07f7,
'Greek_psi': 0x07f8,
'Greek_omega': 0x07f9,
'Greek_switch': 0xff7e,
'leftradical': 0x08a1,
'topleftradical': 0x08a2,
'horizconnector': 0x08a3,
'topintegral': 0x08a4,
'botintegral': 0x08a5,
'vertconnector': 0x08a6,
'topleftsqbracket': 0x08a7,
'botleftsqbracket': 0x08a8,
'toprightsqbracket': 0x08a9,
'botrightsqbracket': 0x08aa,
'topleftparens': 0x08ab,
'botleftparens': 0x08ac,
'toprightparens': 0x08ad,
'botrightparens': 0x08ae,
'leftmiddlecurlybrace': 0x08af,
'rightmiddlecurlybrace': 0x08b0,
'topleftsummation': 0x08b1,
'botleftsummation': 0x08b2,
'topvertsummationconnector': 0x08b3,
'botvertsummationconnector': 0x08b4,
'toprightsummation': 0x08b5,
'botrightsummation': 0x08b6,
'rightmiddlesummation': 0x08b7,
'lessthanequal': 0x08bc,
'notequal': 0x08bd,
'greaterthanequal': 0x08be,
'integral': 0x08bf,
'therefore': 0x08c0,
'variation': 0x08c1,
'infinity': 0x08c2,
'nabla': 0x08c5,
'approximate': 0x08c8,
'similarequal': 0x08c9,
'ifonlyif': 0x08cd,
'implies': 0x08ce,
'identical': 0x08cf,
'radical': 0x08d6,
'includedin': 0x08da,
'includes': 0x08db,
'intersection': 0x08dc,
'union': 0x08dd,
'logicaland': 0x08de,
'logicalor': 0x08df,
'partialderivative': 0x08ef,
'function': 0x08f6,
'leftarrow': 0x08fb,
'uparrow': 0x08fc,
'rightarrow': 0x08fd,
'downarrow': 0x08fe,
'blank': 0x09df,
'soliddiamond': 0x09e0,
'checkerboard': 0x09e1,
'ht': 0x09e2,
'ff': 0x09e3,
'cr': 0x09e4,
'lf': 0x09e5,
'nl': 0x09e8,
'vt': 0x09e9,
'lowrightcorner': 0x09ea,
'uprightcorner': 0x09eb,
'upleftcorner': 0x09ec,
'lowleftcorner': 0x09ed,
'crossinglines': 0x09ee,
'horizlinescan1': 0x09ef,
'horizlinescan3': 0x09f0,
'horizlinescan5': 0x09f1,
'horizlinescan7': 0x09f2,
'horizlinescan9': 0x09f3,
'leftt': 0x09f4,
'rightt': 0x09f5,
'bott': 0x09f6,
'topt': 0x09f7,
'vertbar': 0x09f8,
'emspace': 0x0aa1,
'enspace': 0x0aa2,
'em3space': 0x0aa3,
'em4space': 0x0aa4,
'digitspace': 0x0aa5,
'punctspace': 0x0aa6,
'thinspace': 0x0aa7,
'hairspace': 0x0aa8,
'emdash': 0x0aa9,
'endash': 0x0aaa,
'signifblank': 0x0aac,
'ellipsis': 0x0aae,
'doubbaselinedot': 0x0aaf,
'onethird': 0x0ab0,
'twothirds': 0x0ab1,
'onefifth': 0x0ab2,
'twofifths': 0x0ab3,
'threefifths': 0x0ab4,
'fourfifths': 0x0ab5,
'onesixth': 0x0ab6,
'fivesixths': 0x0ab7,
'careof': 0x0ab8,
'figdash': 0x0abb,
'leftanglebracket': 0x0abc,
'decimalpoint': 0x0abd,
'rightanglebracket': 0x0abe,
'marker': 0x0abf,
'oneeighth': 0x0ac3,
'threeeighths': 0x0ac4,
'fiveeighths': 0x0ac5,
'seveneighths': 0x0ac6,
'trademark': 0x0ac9,
'signaturemark': 0x0aca,
'trademarkincircle': 0x0acb,
'leftopentriangle': 0x0acc,
'rightopentriangle': 0x0acd,
'emopencircle': 0x0ace,
'emopenrectangle': 0x0acf,
'leftsinglequotemark': 0x0ad0,
'rightsinglequotemark': 0x0ad1,
'leftdoublequotemark': 0x0ad2,
'rightdoublequotemark': 0x0ad3,
'prescription': 0x0ad4,
'minutes': 0x0ad6,
'seconds': 0x0ad7,
'latincross': 0x0ad9,
'hexagram': 0x0ada,
'filledrectbullet': 0x0adb,
'filledlefttribullet': 0x0adc,
'filledrighttribullet': 0x0add,
'emfilledcircle': 0x0ade,
'emfilledrect': 0x0adf,
'enopencircbullet': 0x0ae0,
'enopensquarebullet': 0x0ae1,
'openrectbullet': 0x0ae2,
'opentribulletup': 0x0ae3,
'opentribulletdown': 0x0ae4,
'openstar': 0x0ae5,
'enfilledcircbullet': 0x0ae6,
'enfilledsqbullet': 0x0ae7,
'filledtribulletup': 0x0ae8,
'filledtribulletdown': 0x0ae9,
'leftpointer': 0x0aea,
'rightpointer': 0x0aeb,
'club': 0x0aec,
'diamond': 0x0aed,
'heart': 0x0aee,
'maltesecross': 0x0af0,
'dagger': 0x0af1,
'doubledagger': 0x0af2,
'checkmark': 0x0af3,
'ballotcross': 0x0af4,
'musicalsharp': 0x0af5,
'musicalflat': 0x0af6,
'malesymbol': 0x0af7,
'femalesymbol': 0x0af8,
'telephone': 0x0af9,
'telephonerecorder': 0x0afa,
'phonographcopyright': 0x0afb,
'caret': 0x0afc,
'singlelowquotemark': 0x0afd,
'doublelowquotemark': 0x0afe,
'cursor': 0x0aff,
'leftcaret': 0x0ba3,
'rightcaret': 0x0ba6,
'downcaret': 0x0ba8,
'upcaret': 0x0ba9,
'overbar': 0x0bc0,
'downtack': 0x0bc2,
'upshoe': 0x0bc3,
'downstile': 0x0bc4,
'underbar': 0x0bc6,
'jot': 0x0bca,
'quad': 0x0bcc,
'uptack': 0x0bce,
'circle': 0x0bcf,
'upstile': 0x0bd3,
'downshoe': 0x0bd6,
'rightshoe': 0x0bd8,
'leftshoe': 0x0bda,
'lefttack': 0x0bdc,
'righttack': 0x0bfc,
'hebrew_doublelowline': 0x0cdf,
'hebrew_aleph': 0x0ce0,
'hebrew_bet': 0x0ce1,
'hebrew_beth': 0x0ce1,
'hebrew_gimel': 0x0ce2,
'hebrew_gimmel': 0x0ce2,
'hebrew_dalet': 0x0ce3,
'hebrew_daleth': 0x0ce3,
'hebrew_he': 0x0ce4,
'hebrew_waw': 0x0ce5,
'hebrew_zain': 0x0ce6,
'hebrew_zayin': 0x0ce6,
'hebrew_chet': 0x0ce7,
'hebrew_het': 0x0ce7,
'hebrew_tet': 0x0ce8,
'hebrew_teth': 0x0ce8,
'hebrew_yod': 0x0ce9,
'hebrew_finalkaph': 0x0cea,
'hebrew_kaph': 0x0ceb,
'hebrew_lamed': 0x0cec,
'hebrew_finalmem': 0x0ced,
'hebrew_mem': 0x0cee,
'hebrew_finalnun': 0x0cef,
'hebrew_nun': 0x0cf0,
'hebrew_samech': 0x0cf1,
'hebrew_samekh': 0x0cf1,
'hebrew_ayin': 0x0cf2,
'hebrew_finalpe': 0x0cf3,
'hebrew_pe': 0x0cf4,
'hebrew_finalzade': 0x0cf5,
'hebrew_finalzadi': 0x0cf5,
'hebrew_zade': 0x0cf6,
'hebrew_zadi': 0x0cf6,
'hebrew_qoph': 0x0cf7,
'hebrew_kuf': 0x0cf7,
'hebrew_resh': 0x0cf8,
'hebrew_shin': 0x0cf9,
'hebrew_taw': 0x0cfa,
'hebrew_taf': 0x0cfa,
'Hebrew_switch': 0xff7e,
'Thai_kokai': 0x0da1,
'Thai_khokhai': 0x0da2,
'Thai_khokhuat': 0x0da3,
'Thai_khokhwai': 0x0da4,
'Thai_khokhon': 0x0da5,
'Thai_khorakhang': 0x0da6,
'Thai_ngongu': 0x0da7,
'Thai_chochan': 0x0da8,
'Thai_choching': 0x0da9,
'Thai_chochang': 0x0daa,
'Thai_soso': 0x0dab,
'Thai_chochoe': 0x0dac,
'Thai_yoying': 0x0dad,
'Thai_dochada': 0x0dae,
'Thai_topatak': 0x0daf,
'Thai_thothan': 0x0db0,
'Thai_thonangmontho': 0x0db1,
'Thai_thophuthao': 0x0db2,
'Thai_nonen': 0x0db3,
'Thai_dodek': 0x0db4,
'Thai_totao': 0x0db5,
'Thai_thothung': 0x0db6,
'Thai_thothahan': 0x0db7,
'Thai_thothong': 0x0db8,
'Thai_nonu': 0x0db9,
'Thai_bobaimai': 0x0dba,
'Thai_popla': 0x0dbb,
'Thai_phophung': 0x0dbc,
'Thai_fofa': 0x0dbd,
'Thai_phophan': 0x0dbe,
'Thai_fofan': 0x0dbf,
'Thai_phosamphao': 0x0dc0,
'Thai_moma': 0x0dc1,
'Thai_yoyak': 0x0dc2,
'Thai_rorua': 0x0dc3,
'Thai_ru': 0x0dc4,
'Thai_loling': 0x0dc5,
'Thai_lu': 0x0dc6,
'Thai_wowaen': 0x0dc7,
'Thai_sosala': 0x0dc8,
'Thai_sorusi': 0x0dc9,
'Thai_sosua': 0x0dca,
'Thai_hohip': 0x0dcb,
'Thai_lochula': 0x0dcc,
'Thai_oang': 0x0dcd,
'Thai_honokhuk': 0x0dce,
'Thai_paiyannoi': 0x0dcf,
'Thai_saraa': 0x0dd0,
'Thai_maihanakat': 0x0dd1,
'Thai_saraaa': 0x0dd2,
'Thai_saraam': 0x0dd3,
'Thai_sarai': 0x0dd4,
'Thai_saraii': 0x0dd5,
'Thai_saraue': 0x0dd6,
'Thai_sarauee': 0x0dd7,
'Thai_sarau': 0x0dd8,
'Thai_sarauu': 0x0dd9,
'Thai_phinthu': 0x0dda,
'Thai_maihanakat_maitho': 0x0dde,
'Thai_baht': 0x0ddf,
'Thai_sarae': 0x0de0,
'Thai_saraae': 0x0de1,
'Thai_sarao': 0x0de2,
'Thai_saraaimaimuan': 0x0de3,
'Thai_saraaimaimalai': 0x0de4,
'Thai_lakkhangyao': 0x0de5,
'Thai_maiyamok': 0x0de6,
'Thai_maitaikhu': 0x0de7,
'Thai_maiek': 0x0de8,
'Thai_maitho': 0x0de9,
'Thai_maitri': 0x0dea,
'Thai_maichattawa': 0x0deb,
'Thai_thanthakhat': 0x0dec,
'Thai_nikhahit': 0x0ded,
'Thai_leksun': 0x0df0,
'Thai_leknung': 0x0df1,
'Thai_leksong': 0x0df2,
'Thai_leksam': 0x0df3,
'Thai_leksi': 0x0df4,
'Thai_lekha': 0x0df5,
'Thai_lekhok': 0x0df6,
'Thai_lekchet': 0x0df7,
'Thai_lekpaet': 0x0df8,
'Thai_lekkao': 0x0df9,
'Hangul': 0xff31,
'Hangul_Start': 0xff32,
'Hangul_End': 0xff33,
'Hangul_Hanja': 0xff34,
'Hangul_Jamo': 0xff35,
'Hangul_Romaja': 0xff36,
'Hangul_Codeinput': 0xff37,
'Hangul_Jeonja': 0xff38,
'Hangul_Banja': 0xff39,
'Hangul_PreHanja': 0xff3a,
'Hangul_PostHanja': 0xff3b,
'Hangul_SingleCandidate': 0xff3c,
'Hangul_MultipleCandidate': 0xff3d,
'Hangul_PreviousCandidate': 0xff3e,
'Hangul_Special': 0xff3f,
'Hangul_switch': 0xff7e,
'Hangul_Kiyeog': 0x0ea1,
'Hangul_SsangKiyeog': 0x0ea2,
'Hangul_KiyeogSios': 0x0ea3,
'Hangul_Nieun': 0x0ea4,
'Hangul_NieunJieuj': 0x0ea5,
'Hangul_NieunHieuh': 0x0ea6,
'Hangul_Dikeud': 0x0ea7,
'Hangul_SsangDikeud': 0x0ea8,
'Hangul_Rieul': 0x0ea9,
'Hangul_RieulKiyeog': 0x0eaa,
'Hangul_RieulMieum': 0x0eab,
'Hangul_RieulPieub': 0x0eac,
'Hangul_RieulSios': 0x0ead,
'Hangul_RieulTieut': 0x0eae,
'Hangul_RieulPhieuf': 0x0eaf,
'Hangul_RieulHieuh': 0x0eb0,
'Hangul_Mieum': 0x0eb1,
'Hangul_Pieub': 0x0eb2,
'Hangul_SsangPieub': 0x0eb3,
'Hangul_PieubSios': 0x0eb4,
'Hangul_Sios': 0x0eb5,
'Hangul_SsangSios': 0x0eb6,
'Hangul_Ieung': 0x0eb7,
'Hangul_Jieuj': 0x0eb8,
'Hangul_SsangJieuj': 0x0eb9,
'Hangul_Cieuc': 0x0eba,
'Hangul_Khieuq': 0x0ebb,
'Hangul_Tieut': 0x0ebc,
'Hangul_Phieuf': 0x0ebd,
'Hangul_Hieuh': 0x0ebe,
'Hangul_A': 0x0ebf,
'Hangul_AE': 0x0ec0,
'Hangul_YA': 0x0ec1,
'Hangul_YAE': 0x0ec2,
'Hangul_EO': 0x0ec3,
'Hangul_E': 0x0ec4,
'Hangul_YEO': 0x0ec5,
'Hangul_YE': 0x0ec6,
'Hangul_O': 0x0ec7,
'Hangul_WA': 0x0ec8,
'Hangul_WAE': 0x0ec9,
'Hangul_OE': 0x0eca,
'Hangul_YO': 0x0ecb,
'Hangul_U': 0x0ecc,
'Hangul_WEO': 0x0ecd,
'Hangul_WE': 0x0ece,
'Hangul_WI': 0x0ecf,
'Hangul_YU': 0x0ed0,
'Hangul_EU': 0x0ed1,
'Hangul_YI': 0x0ed2,
'Hangul_I': 0x0ed3,
'Hangul_J_Kiyeog': 0x0ed4,
'Hangul_J_SsangKiyeog': 0x0ed5,
'Hangul_J_KiyeogSios': 0x0ed6,
'Hangul_J_Nieun': 0x0ed7,
'Hangul_J_NieunJieuj': 0x0ed8,
'Hangul_J_NieunHieuh': 0x0ed9,
'Hangul_J_Dikeud': 0x0eda,
'Hangul_J_Rieul': 0x0edb,
'Hangul_J_RieulKiyeog': 0x0edc,
'Hangul_J_RieulMieum': 0x0edd,
'Hangul_J_RieulPieub': 0x0ede,
'Hangul_J_RieulSios': 0x0edf,
'Hangul_J_RieulTieut': 0x0ee0,
'Hangul_J_RieulPhieuf': 0x0ee1,
'Hangul_J_RieulHieuh': 0x0ee2,
'Hangul_J_Mieum': 0x0ee3,
'Hangul_J_Pieub': 0x0ee4,
'Hangul_J_PieubSios': 0x0ee5,
'Hangul_J_Sios': 0x0ee6,
'Hangul_J_SsangSios': 0x0ee7,
'Hangul_J_Ieung': 0x0ee8,
'Hangul_J_Jieuj': 0x0ee9,
'Hangul_J_Cieuc': 0x0eea,
'Hangul_J_Khieuq': 0x0eeb,
'Hangul_J_Tieut': 0x0eec,
'Hangul_J_Phieuf': 0x0eed,
'Hangul_J_Hieuh': 0x0eee,
'Hangul_RieulYeorinHieuh': 0x0eef,
'Hangul_SunkyeongeumMieum': 0x0ef0,
'Hangul_SunkyeongeumPieub': 0x0ef1,
'Hangul_PanSios': 0x0ef2,
'Hangul_KkogjiDalrinIeung': 0x0ef3,
'Hangul_SunkyeongeumPhieuf': 0x0ef4,
'Hangul_YeorinHieuh': 0x0ef5,
'Hangul_AraeA': 0x0ef6,
'Hangul_AraeAE': 0x0ef7,
'Hangul_J_PanSios': 0x0ef8,
'Hangul_J_KkogjiDalrinIeung': 0x0ef9,
'Hangul_J_YeorinHieuh': 0x0efa,
'Korean_Won': 0x0eff,
'Armenian_ligature_ew': 0x1000587,
'Armenian_full_stop': 0x1000589,
'Armenian_verjaket': 0x1000589,
'Armenian_separation_mark': 0x100055d,
'Armenian_but': 0x100055d,
'Armenian_hyphen': 0x100058a,
'Armenian_yentamna': 0x100058a,
'Armenian_exclam': 0x100055c,
'Armenian_amanak': 0x100055c,
'Armenian_accent': 0x100055b,
'Armenian_shesht': 0x100055b,
'Armenian_question': 0x100055e,
'Armenian_paruyk': 0x100055e,
'Armenian_AYB': 0x1000531,
'Armenian_ayb': 0x1000561,
'Armenian_BEN': 0x1000532,
'Armenian_ben': 0x1000562,
'Armenian_GIM': 0x1000533,
'Armenian_gim': 0x1000563,
'Armenian_DA': 0x1000534,
'Armenian_da': 0x1000564,
'Armenian_YECH': 0x1000535,
'Armenian_yech': 0x1000565,
'Armenian_ZA': 0x1000536,
'Armenian_za': 0x1000566,
'Armenian_E': 0x1000537,
'Armenian_e': 0x1000567,
'Armenian_AT': 0x1000538,
'Armenian_at': 0x1000568,
'Armenian_TO': 0x1000539,
'Armenian_to': 0x1000569,
'Armenian_ZHE': 0x100053a,
'Armenian_zhe': 0x100056a,
'Armenian_INI': 0x100053b,
'Armenian_ini': 0x100056b,
'Armenian_LYUN': 0x100053c,
'Armenian_lyun': 0x100056c,
'Armenian_KHE': 0x100053d,
'Armenian_khe': 0x100056d,
'Armenian_TSA': 0x100053e,
'Armenian_tsa': 0x100056e,
'Armenian_KEN': 0x100053f,
'Armenian_ken': 0x100056f,
'Armenian_HO': 0x1000540,
'Armenian_ho': 0x1000570,
'Armenian_DZA': 0x1000541,
'Armenian_dza': 0x1000571,
'Armenian_GHAT': 0x1000542,
'Armenian_ghat': 0x1000572,
'Armenian_TCHE': 0x1000543,
'Armenian_tche': 0x1000573,
'Armenian_MEN': 0x1000544,
'Armenian_men': 0x1000574,
'Armenian_HI': 0x1000545,
'Armenian_hi': 0x1000575,
'Armenian_NU': 0x1000546,
'Armenian_nu': 0x1000576,
'Armenian_SHA': 0x1000547,
'Armenian_sha': 0x1000577,
'Armenian_VO': 0x1000548,
'Armenian_vo': 0x1000578,
'Armenian_CHA': 0x1000549,
'Armenian_cha': 0x1000579,
'Armenian_PE': 0x100054a,
'Armenian_pe': 0x100057a,
'Armenian_JE': 0x100054b,
'Armenian_je': 0x100057b,
'Armenian_RA': 0x100054c,
'Armenian_ra': 0x100057c,
'Armenian_SE': 0x100054d,
'Armenian_se': 0x100057d,
'Armenian_VEV': 0x100054e,
'Armenian_vev': 0x100057e,
'Armenian_TYUN': 0x100054f,
'Armenian_tyun': 0x100057f,
'Armenian_RE': 0x1000550,
'Armenian_re': 0x1000580,
'Armenian_TSO': 0x1000551,
'Armenian_tso': 0x1000581,
'Armenian_VYUN': 0x1000552,
'Armenian_vyun': 0x1000582,
'Armenian_PYUR': 0x1000553,
'Armenian_pyur': 0x1000583,
'Armenian_KE': 0x1000554,
'Armenian_ke': 0x1000584,
'Armenian_O': 0x1000555,
'Armenian_o': 0x1000585,
'Armenian_FE': 0x1000556,
'Armenian_fe': 0x1000586,
'Armenian_apostrophe': 0x100055a,
'Georgian_an': 0x10010d0,
'Georgian_ban': 0x10010d1,
'Georgian_gan': 0x10010d2,
'Georgian_don': 0x10010d3,
'Georgian_en': 0x10010d4,
'Georgian_vin': 0x10010d5,
'Georgian_zen': 0x10010d6,
'Georgian_tan': 0x10010d7,
'Georgian_in': 0x10010d8,
'Georgian_kan': 0x10010d9,
'Georgian_las': 0x10010da,
'Georgian_man': 0x10010db,
'Georgian_nar': 0x10010dc,
'Georgian_on': 0x10010dd,
'Georgian_par': 0x10010de,
'Georgian_zhar': 0x10010df,
'Georgian_rae': 0x10010e0,
'Georgian_san': 0x10010e1,
'Georgian_tar': 0x10010e2,
'Georgian_un': 0x10010e3,
'Georgian_phar': 0x10010e4,
'Georgian_khar': 0x10010e5,
'Georgian_ghan': 0x10010e6,
'Georgian_qar': 0x10010e7,
'Georgian_shin': 0x10010e8,
'Georgian_chin': 0x10010e9,
'Georgian_can': 0x10010ea,
'Georgian_jil': 0x10010eb,
'Georgian_cil': 0x10010ec,
'Georgian_char': 0x10010ed,
'Georgian_xan': 0x10010ee,
'Georgian_jhan': 0x10010ef,
'Georgian_hae': 0x10010f0,
'Georgian_he': 0x10010f1,
'Georgian_hie': 0x10010f2,
'Georgian_we': 0x10010f3,
'Georgian_har': 0x10010f4,
'Georgian_hoe': 0x10010f5,
'Georgian_fi': 0x10010f6,
'Xabovedot': 0x1001e8a,
'Ibreve': 0x100012c,
'Zstroke': 0x10001b5,
'Gcaron': 0x10001e6,
'Ocaron': 0x10001d1,
'Obarred': 0x100019f,
'xabovedot': 0x1001e8b,
'ibreve': 0x100012d,
'zstroke': 0x10001b6,
'gcaron': 0x10001e7,
'ocaron': 0x10001d2,
'obarred': 0x1000275,
'SCHWA': 0x100018f,
'schwa': 0x1000259,
'Lbelowdot': 0x1001e36,
'lbelowdot': 0x1001e37,
'Abelowdot': 0x1001ea0,
'abelowdot': 0x1001ea1,
'Ahook': 0x1001ea2,
'ahook': 0x1001ea3,
'Acircumflexacute': 0x1001ea4,
'acircumflexacute': 0x1001ea5,
'Acircumflexgrave': 0x1001ea6,
'acircumflexgrave': 0x1001ea7,
'Acircumflexhook': 0x1001ea8,
'acircumflexhook': 0x1001ea9,
'Acircumflextilde': 0x1001eaa,
'acircumflextilde': 0x1001eab,
'Acircumflexbelowdot': 0x1001eac,
'acircumflexbelowdot': 0x1001ead,
'Abreveacute': 0x1001eae,
'abreveacute': 0x1001eaf,
'Abrevegrave': 0x1001eb0,
'abrevegrave': 0x1001eb1,
'Abrevehook': 0x1001eb2,
'abrevehook': 0x1001eb3,
'Abrevetilde': 0x1001eb4,
'abrevetilde': 0x1001eb5,
'Abrevebelowdot': 0x1001eb6,
'abrevebelowdot': 0x1001eb7,
'Ebelowdot': 0x1001eb8,
'ebelowdot': 0x1001eb9,
'Ehook': 0x1001eba,
'ehook': 0x1001ebb,
'Etilde': 0x1001ebc,
'etilde': 0x1001ebd,
'Ecircumflexacute': 0x1001ebe,
'ecircumflexacute': 0x1001ebf,
'Ecircumflexgrave': 0x1001ec0,
'ecircumflexgrave': 0x1001ec1,
'Ecircumflexhook': 0x1001ec2,
'ecircumflexhook': 0x1001ec3,
'Ecircumflextilde': 0x1001ec4,
'ecircumflextilde': 0x1001ec5,
'Ecircumflexbelowdot': 0x1001ec6,
'ecircumflexbelowdot': 0x1001ec7,
'Ihook': 0x1001ec8,
'ihook': 0x1001ec9,
'Ibelowdot': 0x1001eca,
'ibelowdot': 0x1001ecb,
'Obelowdot': 0x1001ecc,
'obelowdot': 0x1001ecd,
'Ohook': 0x1001ece,
'ohook': 0x1001ecf,
'Ocircumflexacute': 0x1001ed0,
'ocircumflexacute': 0x1001ed1,
'Ocircumflexgrave': 0x1001ed2,
'ocircumflexgrave': 0x1001ed3,
'Ocircumflexhook': 0x1001ed4,
'ocircumflexhook': 0x1001ed5,
'Ocircumflextilde': 0x1001ed6,
'ocircumflextilde': 0x1001ed7,
'Ocircumflexbelowdot': 0x1001ed8,
'ocircumflexbelowdot': 0x1001ed9,
'Ohornacute': 0x1001eda,
'ohornacute': 0x1001edb,
'Ohorngrave': 0x1001edc,
'ohorngrave': 0x1001edd,
'Ohornhook': 0x1001ede,
'ohornhook': 0x1001edf,
'Ohorntilde': 0x1001ee0,
'ohorntilde': 0x1001ee1,
'Ohornbelowdot': 0x1001ee2,
'ohornbelowdot': 0x1001ee3,
'Ubelowdot': 0x1001ee4,
'ubelowdot': 0x1001ee5,
'Uhook': 0x1001ee6,
'uhook': 0x1001ee7,
'Uhornacute': 0x1001ee8,
'uhornacute': 0x1001ee9,
'Uhorngrave': 0x1001eea,
'uhorngrave': 0x1001eeb,
'Uhornhook': 0x1001eec,
'uhornhook': 0x1001eed,
'Uhorntilde': 0x1001eee,
'uhorntilde': 0x1001eef,
'Uhornbelowdot': 0x1001ef0,
'uhornbelowdot': 0x1001ef1,
'Ybelowdot': 0x1001ef4,
'ybelowdot': 0x1001ef5,
'Yhook': 0x1001ef6,
'yhook': 0x1001ef7,
'Ytilde': 0x1001ef8,
'ytilde': 0x1001ef9,
'Ohorn': 0x10001a0,
'ohorn': 0x10001a1,
'Uhorn': 0x10001af,
'uhorn': 0x10001b0,
'EcuSign': 0x10020a0,
'ColonSign': 0x10020a1,
'CruzeiroSign': 0x10020a2,
'FFrancSign': 0x10020a3,
'LiraSign': 0x10020a4,
'MillSign': 0x10020a5,
'NairaSign': 0x10020a6,
'PesetaSign': 0x10020a7,
'RupeeSign': 0x10020a8,
'WonSign': 0x10020a9,
'NewSheqelSign': 0x10020aa,
'DongSign': 0x10020ab,
'EuroSign': 0x20ac,
'zerosuperior': 0x1002070,
'foursuperior': 0x1002074,
'fivesuperior': 0x1002075,
'sixsuperior': 0x1002076,
'sevensuperior': 0x1002077,
'eightsuperior': 0x1002078,
'ninesuperior': 0x1002079,
'zerosubscript': 0x1002080,
'onesubscript': 0x1002081,
'twosubscript': 0x1002082,
'threesubscript': 0x1002083,
'foursubscript': 0x1002084,
'fivesubscript': 0x1002085,
'sixsubscript': 0x1002086,
'sevensubscript': 0x1002087,
'eightsubscript': 0x1002088,
'ninesubscript': 0x1002089,
'partdifferential': 0x1002202,
'emptyset': 0x1002205,
'elementof': 0x1002208,
'notelementof': 0x1002209,
'containsas': 0x100220B,
'squareroot': 0x100221A,
'cuberoot': 0x100221B,
'fourthroot': 0x100221C,
'dintegral': 0x100222C,
'tintegral': 0x100222D,
'because': 0x1002235,
'approxeq': 0x1002248,
'notapproxeq': 0x1002247,
'notidentical': 0x1002262,
'stricteq': 0x1002263,
'braille_dot_1': 0xfff1,
'braille_dot_2': 0xfff2,
'braille_dot_3': 0xfff3,
'braille_dot_4': 0xfff4,
'braille_dot_5': 0xfff5,
'braille_dot_6': 0xfff6,
'braille_dot_7': 0xfff7,
'braille_dot_8': 0xfff8,
'braille_dot_9': 0xfff9,
'braille_dot_10': 0xfffa,
'braille_blank': 0x1002800,
'braille_dots_1': 0x1002801,
'braille_dots_2': 0x1002802,
'braille_dots_12': 0x1002803,
'braille_dots_3': 0x1002804,
'braille_dots_13': 0x1002805,
'braille_dots_23': 0x1002806,
'braille_dots_123': 0x1002807,
'braille_dots_4': 0x1002808,
'braille_dots_14': 0x1002809,
'braille_dots_24': 0x100280a,
'braille_dots_124': 0x100280b,
'braille_dots_34': 0x100280c,
'braille_dots_134': 0x100280d,
'braille_dots_234': 0x100280e,
'braille_dots_1234': 0x100280f,
'braille_dots_5': 0x1002810,
'braille_dots_15': 0x1002811,
'braille_dots_25': 0x1002812,
'braille_dots_125': 0x1002813,
'braille_dots_35': 0x1002814,
'braille_dots_135': 0x1002815,
'braille_dots_235': 0x1002816,
'braille_dots_1235': 0x1002817,
'braille_dots_45': 0x1002818,
'braille_dots_145': 0x1002819,
'braille_dots_245': 0x100281a,
'braille_dots_1245': 0x100281b,
'braille_dots_345': 0x100281c,
'braille_dots_1345': 0x100281d,
'braille_dots_2345': 0x100281e,
'braille_dots_12345': 0x100281f,
'braille_dots_6': 0x1002820,
'braille_dots_16': 0x1002821,
'braille_dots_26': 0x1002822,
'braille_dots_126': 0x1002823,
'braille_dots_36': 0x1002824,
'braille_dots_136': 0x1002825,
'braille_dots_236': 0x1002826,
'braille_dots_1236': 0x1002827,
'braille_dots_46': 0x1002828,
'braille_dots_146': 0x1002829,
'braille_dots_246': 0x100282a,
'braille_dots_1246': 0x100282b,
'braille_dots_346': 0x100282c,
'braille_dots_1346': 0x100282d,
'braille_dots_2346': 0x100282e,
'braille_dots_12346': 0x100282f,
'braille_dots_56': 0x1002830,
'braille_dots_156': 0x1002831,
'braille_dots_256': 0x1002832,
'braille_dots_1256': 0x1002833,
'braille_dots_356': 0x1002834,
'braille_dots_1356': 0x1002835,
'braille_dots_2356': 0x1002836,
'braille_dots_12356': 0x1002837,
'braille_dots_456': 0x1002838,
'braille_dots_1456': 0x1002839,
'braille_dots_2456': 0x100283a,
'braille_dots_12456': 0x100283b,
'braille_dots_3456': 0x100283c,
'braille_dots_13456': 0x100283d,
'braille_dots_23456': 0x100283e,
'braille_dots_123456': 0x100283f,
'braille_dots_7': 0x1002840,
'braille_dots_17': 0x1002841,
'braille_dots_27': 0x1002842,
'braille_dots_127': 0x1002843,
'braille_dots_37': 0x1002844,
'braille_dots_137': 0x1002845,
'braille_dots_237': 0x1002846,
'braille_dots_1237': 0x1002847,
'braille_dots_47': 0x1002848,
'braille_dots_147': 0x1002849,
'braille_dots_247': 0x100284a,
'braille_dots_1247': 0x100284b,
'braille_dots_347': 0x100284c,
'braille_dots_1347': 0x100284d,
'braille_dots_2347': 0x100284e,
'braille_dots_12347': 0x100284f,
'braille_dots_57': 0x1002850,
'braille_dots_157': 0x1002851,
'braille_dots_257': 0x1002852,
'braille_dots_1257': 0x1002853,
'braille_dots_357': 0x1002854,
'braille_dots_1357': 0x1002855,
'braille_dots_2357': 0x1002856,
'braille_dots_12357': 0x1002857,
'braille_dots_457': 0x1002858,
'braille_dots_1457': 0x1002859,
'braille_dots_2457': 0x100285a,
'braille_dots_12457': 0x100285b,
'braille_dots_3457': 0x100285c,
'braille_dots_13457': 0x100285d,
'braille_dots_23457': 0x100285e,
'braille_dots_123457': 0x100285f,
'braille_dots_67': 0x1002860,
'braille_dots_167': 0x1002861,
'braille_dots_267': 0x1002862,
'braille_dots_1267': 0x1002863,
'braille_dots_367': 0x1002864,
'braille_dots_1367': 0x1002865,
'braille_dots_2367': 0x1002866,
'braille_dots_12367': 0x1002867,
'braille_dots_467': 0x1002868,
'braille_dots_1467': 0x1002869,
'braille_dots_2467': 0x100286a,
'braille_dots_12467': 0x100286b,
'braille_dots_3467': 0x100286c,
'braille_dots_13467': 0x100286d,
'braille_dots_23467': 0x100286e,
'braille_dots_123467': 0x100286f,
'braille_dots_567': 0x1002870,
'braille_dots_1567': 0x1002871,
'braille_dots_2567': 0x1002872,
'braille_dots_12567': 0x1002873,
'braille_dots_3567': 0x1002874,
'braille_dots_13567': 0x1002875,
'braille_dots_23567': 0x1002876,
'braille_dots_123567': 0x1002877,
'braille_dots_4567': 0x1002878,
'braille_dots_14567': 0x1002879,
'braille_dots_24567': 0x100287a,
'braille_dots_124567': 0x100287b,
'braille_dots_34567': 0x100287c,
'braille_dots_134567': 0x100287d,
'braille_dots_234567': 0x100287e,
'braille_dots_1234567': 0x100287f,
'braille_dots_8': 0x1002880,
'braille_dots_18': 0x1002881,
'braille_dots_28': 0x1002882,
'braille_dots_128': 0x1002883,
'braille_dots_38': 0x1002884,
'braille_dots_138': 0x1002885,
'braille_dots_238': 0x1002886,
'braille_dots_1238': 0x1002887,
'braille_dots_48': 0x1002888,
'braille_dots_148': 0x1002889,
'braille_dots_248': 0x100288a,
'braille_dots_1248': 0x100288b,
'braille_dots_348': 0x100288c,
'braille_dots_1348': 0x100288d,
'braille_dots_2348': 0x100288e,
'braille_dots_12348': 0x100288f,
'braille_dots_58': 0x1002890,
'braille_dots_158': 0x1002891,
'braille_dots_258': 0x1002892,
'braille_dots_1258': 0x1002893,
'braille_dots_358': 0x1002894,
'braille_dots_1358': 0x1002895,
'braille_dots_2358': 0x1002896,
'braille_dots_12358': 0x1002897,
'braille_dots_458': 0x1002898,
'braille_dots_1458': 0x1002899,
'braille_dots_2458': 0x100289a,
'braille_dots_12458': 0x100289b,
'braille_dots_3458': 0x100289c,
'braille_dots_13458': 0x100289d,
'braille_dots_23458': 0x100289e,
'braille_dots_123458': 0x100289f,
'braille_dots_68': 0x10028a0,
'braille_dots_168': 0x10028a1,
'braille_dots_268': 0x10028a2,
'braille_dots_1268': 0x10028a3,
'braille_dots_368': 0x10028a4,
'braille_dots_1368': 0x10028a5,
'braille_dots_2368': 0x10028a6,
'braille_dots_12368': 0x10028a7,
'braille_dots_468': 0x10028a8,
'braille_dots_1468': 0x10028a9,
'braille_dots_2468': 0x10028aa,
'braille_dots_12468': 0x10028ab,
'braille_dots_3468': 0x10028ac,
'braille_dots_13468': 0x10028ad,
'braille_dots_23468': 0x10028ae,
'braille_dots_123468': 0x10028af,
'braille_dots_568': 0x10028b0,
'braille_dots_1568': 0x10028b1,
'braille_dots_2568': 0x10028b2,
'braille_dots_12568': 0x10028b3,
'braille_dots_3568': 0x10028b4,
'braille_dots_13568': 0x10028b5,
'braille_dots_23568': 0x10028b6,
'braille_dots_123568': 0x10028b7,
'braille_dots_4568': 0x10028b8,
'braille_dots_14568': 0x10028b9,
'braille_dots_24568': 0x10028ba,
'braille_dots_124568': 0x10028bb,
'braille_dots_34568': 0x10028bc,
'braille_dots_134568': 0x10028bd,
'braille_dots_234568': 0x10028be,
'braille_dots_1234568': 0x10028bf,
'braille_dots_78': 0x10028c0,
'braille_dots_178': 0x10028c1,
'braille_dots_278': 0x10028c2,
'braille_dots_1278': 0x10028c3,
'braille_dots_378': 0x10028c4,
'braille_dots_1378': 0x10028c5,
'braille_dots_2378': 0x10028c6,
'braille_dots_12378': 0x10028c7,
'braille_dots_478': 0x10028c8,
'braille_dots_1478': 0x10028c9,
'braille_dots_2478': 0x10028ca,
'braille_dots_12478': 0x10028cb,
'braille_dots_3478': 0x10028cc,
'braille_dots_13478': 0x10028cd,
'braille_dots_23478': 0x10028ce,
'braille_dots_123478': 0x10028cf,
'braille_dots_578': 0x10028d0,
'braille_dots_1578': 0x10028d1,
'braille_dots_2578': 0x10028d2,
'braille_dots_12578': 0x10028d3,
'braille_dots_3578': 0x10028d4,
'braille_dots_13578': 0x10028d5,
'braille_dots_23578': 0x10028d6,
'braille_dots_123578': 0x10028d7,
'braille_dots_4578': 0x10028d8,
'braille_dots_14578': 0x10028d9,
'braille_dots_24578': 0x10028da,
'braille_dots_124578': 0x10028db,
'braille_dots_34578': 0x10028dc,
'braille_dots_134578': 0x10028dd,
'braille_dots_234578': 0x10028de,
'braille_dots_1234578': 0x10028df,
'braille_dots_678': 0x10028e0,
'braille_dots_1678': 0x10028e1,
'braille_dots_2678': 0x10028e2,
'braille_dots_12678': 0x10028e3,
'braille_dots_3678': 0x10028e4,
'braille_dots_13678': 0x10028e5,
'braille_dots_23678': 0x10028e6,
'braille_dots_123678': 0x10028e7,
'braille_dots_4678': 0x10028e8,
'braille_dots_14678': 0x10028e9,
'braille_dots_24678': 0x10028ea,
'braille_dots_124678': 0x10028eb,
'braille_dots_34678': 0x10028ec,
'braille_dots_134678': 0x10028ed,
'braille_dots_234678': 0x10028ee,
'braille_dots_1234678': 0x10028ef,
'braille_dots_5678': 0x10028f0,
'braille_dots_15678': 0x10028f1,
'braille_dots_25678': 0x10028f2,
'braille_dots_125678': 0x10028f3,
'braille_dots_35678': 0x10028f4,
'braille_dots_135678': 0x10028f5,
'braille_dots_235678': 0x10028f6,
'braille_dots_1235678': 0x10028f7,
'braille_dots_45678': 0x10028f8,
'braille_dots_145678': 0x10028f9,
'braille_dots_245678': 0x10028fa,
'braille_dots_1245678': 0x10028fb,
'braille_dots_345678': 0x10028fc,
'braille_dots_1345678': 0x10028fd,
'braille_dots_2345678': 0x10028fe,
'braille_dots_12345678': 0x10028ff,
}
keysym_strings = defaultdict(list)
for keysym_string, keysym in keysyms.items():
keysym_strings[keysym].append(keysym_string)
| 28.243097 | 83 | 0.647276 |
acef374a8e781e8285e190c50acfbe75b68ecf1f | 21,761 | py | Python | lib/protorpc-1.0/protorpc/descriptor.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | lib/protorpc-1.0/protorpc/descriptor.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | lib/protorpc-1.0/protorpc/descriptor.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Services descriptor definitions.
Contains message definitions and functions for converting
service classes into transmittable message format.
Describing an Enum instance, Enum class, Field class or Message class will
generate an appropriate descriptor object that describes that class.
This message can itself be used to transmit information to clients wishing
to know the description of an enum value, enum, field or message without
needing to download the source code. This format is also compatible with
other, non-Python languages.
The descriptors are modeled to be binary compatible with:
http://code.google.com/p/protobuf/source/browse/trunk/src/google/protobuf/descriptor.proto
NOTE: The names of types and fields are not always the same between these
descriptors and the ones defined in descriptor.proto. This was done in order
to make source code files that use these descriptors easier to read. For
example, it is not necessary to prefix TYPE to all the values in
FieldDescriptor.Variant as is done in descriptor.proto FieldDescriptorProto.Type.
Example:
class Pixel(messages.Message):
x = messages.IntegerField(1, required=True)
y = messages.IntegerField(2, required=True)
color = messages.BytesField(3)
# Describe Pixel class using message descriptor.
fields = []
field = FieldDescriptor()
field.name = 'x'
field.number = 1
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'y'
field.number = 2
field.label = FieldDescriptor.Label.REQUIRED
field.variant = FieldDescriptor.Variant.INT64
fields.append(field)
field = FieldDescriptor()
field.name = 'color'
field.number = 3
field.label = FieldDescriptor.Label.OPTIONAL
field.variant = FieldDescriptor.Variant.BYTES
fields.append(field)
message = MessageDescriptor()
message.name = 'Pixel'
message.fields = fields
# Describing is the equivalent of building the above message.
message == describe_message(Pixel)
Public Classes:
EnumValueDescriptor: Describes Enum values.
EnumDescriptor: Describes Enum classes.
FieldDescriptor: Describes field instances.
FileDescriptor: Describes a single 'file' unit.
FileSet: Describes a collection of file descriptors.
MessageDescriptor: Describes Message classes.
MethodDescriptor: Describes a method of a service.
ServiceDescriptor: Describes a services.
Public Functions:
describe_enum_value: Describe an individual enum-value.
describe_enum: Describe an Enum class.
describe_field: Describe a Field definition.
describe_file: Describe a 'file' unit from a Python module or object.
describe_file_set: Describe a file set from a list of modules or objects.
describe_message: Describe a Message definition.
describe_method: Describe a Method definition.
describe_service: Describe a Service definition.
"""
__author__ = 'rafek@google.com (Rafe Kaplan)'
import codecs
import types
from . import messages
from . import util
__all__ = ['EnumDescriptor',
'EnumValueDescriptor',
'FieldDescriptor',
'MessageDescriptor',
'MethodDescriptor',
'FileDescriptor',
'FileSet',
'ServiceDescriptor',
'DescriptorLibrary',
'describe_enum',
'describe_enum_value',
'describe_field',
'describe_message',
'describe_method',
'describe_file',
'describe_file_set',
'describe_service',
'describe',
'import_descriptor_loader',
]
# NOTE: MessageField is missing because message fields cannot have
# a default value at this time.
# TODO(rafek): Support default message values.
#
# Map to functions that convert default values of fields of a given type
# to a string. The function must return a value that is compatible with
# FieldDescriptor.default_value and therefore a unicode string.
_DEFAULT_TO_STRING_MAP = {
messages.IntegerField: unicode,
messages.FloatField: unicode,
messages.BooleanField: lambda value: value and u'true' or u'false',
messages.BytesField: lambda value: codecs.escape_encode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: lambda value: unicode(value.number),
}
_DEFAULT_FROM_STRING_MAP = {
messages.IntegerField: int,
messages.FloatField: float,
messages.BooleanField: lambda value: value == u'true',
messages.BytesField: lambda value: codecs.escape_decode(value)[0],
messages.StringField: lambda value: value,
messages.EnumField: int,
}
class EnumValueDescriptor(messages.Message):
"""Enum value descriptor.
Fields:
name: Name of enumeration value.
number: Number of enumeration value.
"""
# TODO(rafek): Why are these listed as optional in descriptor.proto.
# Harmonize?
name = messages.StringField(1, required=True)
number = messages.IntegerField(2,
required=True,
variant=messages.Variant.INT32)
class EnumDescriptor(messages.Message):
"""Enum class descriptor.
Fields:
name: Name of Enum without any qualification.
values: Values defined by Enum class.
"""
name = messages.StringField(1)
values = messages.MessageField(EnumValueDescriptor, 2, repeated=True)
class FieldDescriptor(messages.Message):
"""Field definition descriptor.
Enums:
Variant: Wire format hint sub-types for field.
Label: Values for optional, required and repeated fields.
Fields:
name: Name of field.
number: Number of field.
variant: Variant of field.
type_name: Type name for message and enum fields.
default_value: String representation of default value.
"""
Variant = messages.Variant
class Label(messages.Enum):
"""Field label."""
OPTIONAL = 1
REQUIRED = 2
REPEATED = 3
name = messages.StringField(1, required=True)
number = messages.IntegerField(3,
required=True,
variant=messages.Variant.INT32)
label = messages.EnumField(Label, 4, default=Label.OPTIONAL)
variant = messages.EnumField(Variant, 5)
type_name = messages.StringField(6)
# For numeric types, contains the original text representation of the value.
# For booleans, "true" or "false".
# For strings, contains the default text contents (not escaped in any way).
# For bytes, contains the C escaped value. All bytes < 128 are that are
# traditionally considered unprintable are also escaped.
default_value = messages.StringField(7)
class MessageDescriptor(messages.Message):
"""Message definition descriptor.
Fields:
name: Name of Message without any qualification.
fields: Fields defined for message.
message_types: Nested Message classes defined on message.
enum_types: Nested Enum classes defined on message.
"""
name = messages.StringField(1)
fields = messages.MessageField(FieldDescriptor, 2, repeated=True)
message_types = messages.MessageField(
'protorpc.descriptor.MessageDescriptor', 3, repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 4, repeated=True)
class MethodDescriptor(messages.Message):
"""Service method definition descriptor.
Fields:
name: Name of service method.
request_type: Fully qualified or relative name of request message type.
response_type: Fully qualified or relative name of response message type.
"""
name = messages.StringField(1)
request_type = messages.StringField(2)
response_type = messages.StringField(3)
class ServiceDescriptor(messages.Message):
"""Service definition descriptor.
Fields:
name: Name of Service without any qualification.
methods: Remote methods of Service.
"""
name = messages.StringField(1)
methods = messages.MessageField(MethodDescriptor, 2, repeated=True)
class FileDescriptor(messages.Message):
"""Description of file containing protobuf definitions.
Fields:
package: Fully qualified name of package that definitions belong to.
message_types: Message definitions contained in file.
enum_types: Enum definitions contained in file.
service_types: Service definitions contained in file.
"""
package = messages.StringField(2)
# TODO(rafek): Add dependency field
message_types = messages.MessageField(MessageDescriptor, 4, repeated=True)
enum_types = messages.MessageField(EnumDescriptor, 5, repeated=True)
service_types = messages.MessageField(ServiceDescriptor, 6, repeated=True)
class FileSet(messages.Message):
"""A collection of FileDescriptors.
Fields:
files: Files in file-set.
"""
files = messages.MessageField(FileDescriptor, 1, repeated=True)
def describe_enum_value(enum_value):
"""Build descriptor for Enum instance.
Args:
enum_value: Enum value to provide descriptor for.
Returns:
Initialized EnumValueDescriptor instance describing the Enum instance.
"""
enum_value_descriptor = EnumValueDescriptor()
enum_value_descriptor.name = unicode(enum_value.name)
enum_value_descriptor.number = enum_value.number
return enum_value_descriptor
def describe_enum(enum_definition):
"""Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class.
"""
enum_descriptor = EnumDescriptor()
enum_descriptor.name = enum_definition.definition_name().split('.')[-1]
values = []
for number in enum_definition.numbers():
value = enum_definition.lookup_by_number(number)
values.append(describe_enum_value(value))
if values:
enum_descriptor.values = values
return enum_descriptor
def describe_field(field_definition):
"""Build descriptor for Field instance.
Args:
field_definition: Field instance to provide descriptor for.
Returns:
Initialized FieldDescriptor instance describing the Field instance.
"""
field_descriptor = FieldDescriptor()
field_descriptor.name = field_definition.name
field_descriptor.number = field_definition.number
field_descriptor.variant = field_definition.variant
if isinstance(field_definition, messages.EnumField):
field_descriptor.type_name = field_definition.type.definition_name()
if isinstance(field_definition, messages.MessageField):
field_descriptor.type_name = field_definition.message_type.definition_name()
if field_definition.default is not None:
field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[
type(field_definition)](field_definition.default)
# Set label.
if field_definition.repeated:
field_descriptor.label = FieldDescriptor.Label.REPEATED
elif field_definition.required:
field_descriptor.label = FieldDescriptor.Label.REQUIRED
else:
field_descriptor.label = FieldDescriptor.Label.OPTIONAL
return field_descriptor
def describe_message(message_definition):
"""Build descriptor for Message class.
Args:
message_definition: Message class to provide descriptor for.
Returns:
Initialized MessageDescriptor instance describing the Message class.
"""
message_descriptor = MessageDescriptor()
message_descriptor.name = message_definition.definition_name().split('.')[-1]
fields = sorted(message_definition.all_fields(),
key=lambda v: v.number)
if fields:
message_descriptor.fields = [describe_field(field) for field in fields]
try:
nested_messages = message_definition.__messages__
except AttributeError:
pass
else:
message_descriptors = []
for name in nested_messages:
value = getattr(message_definition, name)
message_descriptors.append(describe_message(value))
message_descriptor.message_types = message_descriptors
try:
nested_enums = message_definition.__enums__
except AttributeError:
pass
else:
enum_descriptors = []
for name in nested_enums:
value = getattr(message_definition, name)
enum_descriptors.append(describe_enum(value))
message_descriptor.enum_types = enum_descriptors
return message_descriptor
def describe_method(method):
"""Build descriptor for service method.
Args:
method: Remote service method to describe.
Returns:
Initialized MethodDescriptor instance describing the service method.
"""
method_info = method.remote
descriptor = MethodDescriptor()
descriptor.name = method_info.method.func_name
descriptor.request_type = method_info.request_type.definition_name()
descriptor.response_type = method_info.response_type.definition_name()
return descriptor
def describe_service(service_class):
"""Build descriptor for service.
Args:
service_class: Service class to describe.
Returns:
Initialized ServiceDescriptor instance describing the service.
"""
descriptor = ServiceDescriptor()
descriptor.name = service_class.__name__
methods = []
remote_methods = service_class.all_remote_methods()
for name in sorted(remote_methods.iterkeys()):
if name == 'get_descriptor':
continue
method = remote_methods[name]
methods.append(describe_method(method))
if methods:
descriptor.methods = methods
return descriptor
def describe_file(module):
"""Build a file from a specified Python module.
Args:
module: Python module to describe.
Returns:
Initialized FileDescriptor instance describing the module.
"""
# May not import remote at top of file because remote depends on this
# file
# TODO(rafek): Straighten out this dependency. Possibly move these functions
# from descriptor to their own module.
from . import remote
descriptor = FileDescriptor()
descriptor.package = util.get_package_for_module(module)
if not descriptor.package:
descriptor.package = None
message_descriptors = []
enum_descriptors = []
service_descriptors = []
# Need to iterate over all top level attributes of the module looking for
# message, enum and service definitions. Each definition must be itself
# described.
for name in sorted(dir(module)):
value = getattr(module, name)
if isinstance(value, type):
if issubclass(value, messages.Message):
message_descriptors.append(describe_message(value))
elif issubclass(value, messages.Enum):
enum_descriptors.append(describe_enum(value))
elif issubclass(value, remote.Service):
service_descriptors.append(describe_service(value))
if message_descriptors:
descriptor.message_types = message_descriptors
if enum_descriptors:
descriptor.enum_types = enum_descriptors
if service_descriptors:
descriptor.service_types = service_descriptors
return descriptor
def describe_file_set(modules):
"""Build a file set from a specified Python modules.
Args:
modules: Iterable of Python module to describe.
Returns:
Initialized FileSet instance describing the modules.
"""
descriptor = FileSet()
file_descriptors = []
for module in modules:
file_descriptors.append(describe_file(module))
if file_descriptors:
descriptor.files = file_descriptors
return descriptor
def describe(value):
"""Describe any value as a descriptor.
Helper function for describing any object with an appropriate descriptor
object.
Args:
value: Value to describe as a descriptor.
Returns:
Descriptor message class if object is describable as a descriptor, else
None.
"""
from . import remote
if isinstance(value, types.ModuleType):
return describe_file(value)
elif callable(value) and hasattr(value, 'remote'):
return describe_method(value)
elif isinstance(value, messages.Field):
return describe_field(value)
elif isinstance(value, messages.Enum):
return describe_enum_value(value)
elif isinstance(value, type):
if issubclass(value, messages.Message):
return describe_message(value)
elif issubclass(value, messages.Enum):
return describe_enum(value)
elif issubclass(value, remote.Service):
return describe_service(value)
return None
@util.positional(1)
def import_descriptor_loader(definition_name, importer=__import__):
"""Find objects by importing modules as needed.
A definition loader is a function that resolves a definition name to a
descriptor.
The import finder resolves definitions to their names by importing modules
when necessary.
Args:
definition_name: Name of definition to find.
importer: Import function used for importing new modules.
Returns:
Appropriate descriptor for any describable type located by name.
Raises:
DefinitionNotFoundError when a name does not refer to either a definition
or a module.
"""
# Attempt to import descriptor as a module.
if definition_name.startswith('.'):
definition_name = definition_name[1:]
if not definition_name.startswith('.'):
leaf = definition_name.split('.')[-1]
if definition_name:
try:
module = importer(definition_name, '', '', [leaf])
except ImportError:
pass
else:
return describe(module)
try:
# Attempt to use messages.find_definition to find item.
return describe(messages.find_definition(definition_name,
importer=__import__))
except messages.DefinitionNotFoundError, err:
# There are things that find_definition will not find, but if the parent
# is loaded, its children can be searched for a match.
split_name = definition_name.rsplit('.', 1)
if len(split_name) > 1:
parent, child = split_name
try:
parent_definition = import_descriptor_loader(parent, importer=importer)
except messages.DefinitionNotFoundError:
# Fall through to original error.
pass
else:
# Check the parent definition for a matching descriptor.
if isinstance(parent_definition, FileDescriptor):
search_list = parent_definition.service_types or []
elif isinstance(parent_definition, ServiceDescriptor):
search_list = parent_definition.methods or []
elif isinstance(parent_definition, EnumDescriptor):
search_list = parent_definition.values or []
elif isinstance(parent_definition, MessageDescriptor):
search_list = parent_definition.fields or []
else:
search_list = []
for definition in search_list:
if definition.name == child:
return definition
# Still didn't find. Reraise original exception.
raise err
class DescriptorLibrary(object):
"""A descriptor library is an object that contains known definitions.
A descriptor library contains a cache of descriptor objects mapped by
definition name. It contains all types of descriptors except for
file sets.
When a definition name is requested that the library does not know about
it can be provided with a descriptor loader which attempt to resolve the
missing descriptor.
"""
@util.positional(1)
def __init__(self,
descriptors=None,
descriptor_loader=import_descriptor_loader):
"""Constructor.
Args:
descriptors: A dictionary or dictionary-like object that can be used
to store and cache descriptors by definition name.
definition_loader: A function used for resolving missing descriptors.
The function takes a definition name as its parameter and returns
an appropriate descriptor. It may raise DefinitionNotFoundError.
"""
self.__descriptor_loader = descriptor_loader
self.__descriptors = descriptors or {}
def lookup_descriptor(self, definition_name):
"""Lookup descriptor by name.
Get descriptor from library by name. If descriptor is not found will
attempt to find via descriptor loader if provided.
Args:
definition_name: Definition name to find.
Returns:
Descriptor that describes definition name.
Raises:
DefinitionNotFoundError if not descriptor exists for definition name.
"""
try:
return self.__descriptors[definition_name]
except KeyError:
pass
if self.__descriptor_loader:
definition = self.__descriptor_loader(definition_name)
self.__descriptors[definition_name] = definition
return definition
else:
raise messages.DefinitionNotFoundError(
'Could not find definition for %s' % definition_name)
def lookup_package(self, definition_name):
"""Determines the package name for any definition.
Determine the package that any definition name belongs to. May check
parent for package name and will resolve missing descriptors if provided
descriptor loader.
Args:
definition_name: Definition name to find package for.
"""
while True:
descriptor = self.lookup_descriptor(definition_name)
if isinstance(descriptor, FileDescriptor):
return descriptor.package
else:
index = definition_name.rfind('.')
if index < 0:
return None
definition_name = definition_name[:index]
| 30.563202 | 92 | 0.731722 |
acef37ef1af8e35646482577b5c138aa54fbd011 | 1,845 | py | Python | __init__.py | Airyzz/io_model_semodel | 7452c86928826ffdef7ef65b766cd89d8741531d | [
"MIT"
] | 30 | 2019-04-04T17:46:55.000Z | 2022-03-19T03:21:39.000Z | __init__.py | Airyzz/io_model_semodel | 7452c86928826ffdef7ef65b766cd89d8741531d | [
"MIT"
] | 18 | 2019-03-24T19:47:42.000Z | 2021-11-29T21:10:14.000Z | __init__.py | Airyzz/io_model_semodel | 7452c86928826ffdef7ef65b766cd89d8741531d | [
"MIT"
] | 28 | 2019-04-04T17:48:47.000Z | 2022-03-27T08:55:32.000Z | import bpy
import bpy_extras.io_utils
from bpy.types import Operator, AddonPreferences
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
bl_info = {
"name": "SEModel Support",
"author": "DTZxPorter",
"version": (0, 0, 3),
"blender": (2, 78, 0),
"location": "File > Import",
"description": "Import SEModel",
"wiki_url": "https://github.com/dtzxporter/io_model_semodel",
"tracker_url": "https://github.com/dtzxporter/io_model_semodel/issues",
"support": "COMMUNITY",
"category": "Import-Export"
}
class ImportSEModel(bpy.types.Operator, ImportHelper):
bl_idname = "import_scene.semodel"
bl_label = "Import SEModel"
bl_description = "Import one or more SEModel files"
bl_options = {'PRESET'}
filename_ext = ".semodel"
filter_glob = StringProperty(default="*.semodel", options={'HIDDEN'})
files = CollectionProperty(type=bpy.types.PropertyGroup)
def execute(self, context):
from . import import_semodel
result = import_semodel.load(
self, context, **self.as_keywords(ignore=("filter_glob", "files")))
if result:
self.report({'INFO'}, 'SEModel has been loaded')
return {'FINISHED'}
else:
self.report({'ERROR'}, 'Failed to load SEModel')
return {'CANCELLED'}
@classmethod
def poll(self, context):
return True
def menu_func_semodel_import(self, context):
self.layout.operator(ImportSEModel.bl_idname, text="SEModel (.semodel)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_semodel_import)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_semodel_import)
if __name__ == "__main__":
register()
| 28.828125 | 79 | 0.677507 |
acef38bea75145566697213eaa008b663aa62f2f | 11,010 | py | Python | src/preprocessing.py | utahnlp/lapa-mrp | eff21f6022a18432dd5d582637e37423bdfcd93f | [
"Apache-2.0"
] | 4 | 2020-03-27T06:27:18.000Z | 2021-04-14T04:23:51.000Z | src/preprocessing.py | utahnlp/lapa-mrp | eff21f6022a18432dd5d582637e37423bdfcd93f | [
"Apache-2.0"
] | 6 | 2020-03-27T22:21:03.000Z | 2021-09-16T19:18:39.000Z | src/preprocessing.py | utahnlp/lapa-mrp | eff21f6022a18432dd5d582637e37423bdfcd93f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3.6
# coding=utf-8
'''
Combine multiple mrp data files in the same directory into a single one
Need to specify folder containing all subfolders of training, dev and test
Then extract features for futher process based on stanford core nlp tools
@author: Chunchuan Lyu (chunchuan.lv@gmail.com)
@since: 2018-06-01
# directly read from udpipe mrp files, and then adding extra annotations, such as ner
@author: Jie Cao(jiessie.cao@gmail.com)
@since: 2019-06-01
'''
import argparse
from pycorenlp import StanfordCoreNLP
from utility.constants import *
from utility.data_helper import *
from utility.mtool.codec.mrp import read as mrp_read
from utility.mtool.codec.amr import convert_amr_id
from parser.AMRProcessors import *
from parser.DMProcessors import *
from parser.PSDProcessors import *
from parser.EDSProcessors import *
from parser.UCCAProcessors import *
import logging
logger = logging.getLogger("mrp.preprocessing")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
def featureExtract(src_text,whiteSpace=False):
"""
Using stanford nlp url to extract features from text
whiteSpace means only split workds when there is a whitespace, it can be used to keep existed tokenization
"""
data = {}
output = nlp.annotate(src_text.strip(), properties={
'annotators': "tokenize,ssplit,ner",
#"tokenize.options":"splitHyphenated=false,normalizeParentheses=false,untokenizable='allKeep'",
#"tokenize.whitespace": whiteSpace,
"tokenize.language": "Whitespace" if whiteSpace else "English",
'ssplit.isOneSentence': True,
'outputFormat': 'json'
})
snt = output['sentences'][0]["tokens"]
data["input_snt"] = src_text
data["ner"] = []
data["tok"] = []
data['pos'] = []
data['lem'] = []
data['anchors'] = []
for snt_tok in snt:
data["ner"].append(snt_tok['ner'])
data["tok"].append(snt_tok['word'])
# first add anchors as a dictionary here.
data["anchors"].append([{'from': snt_tok['characterOffsetBegin'], 'to': snt_tok['characterOffsetEnd']}])
data["pos"].append(snt_tok['pos'])
data["lem"].append(snt_tok['lemma'])
data['mwe'] = ['O'] * len(data["tok"])
assert len(data["ner"]) ==len(data["tok"]) , ( len(data["tok"]) ,len(data["ner"]),"\n",list(zip(data["tok"],data["ner"])) ,data["tok"],data["ner"])
# if whiteSpace is False:
# return self.featureExtract(" ".join(data["tok"]),True)
return data
def write_features(filepath):
if ".mrp" in opt.companion_suffix:
write_features_mrp(filepath)
elif ".amr" in opt.companion_suffix:
write_features_amr(filepath)
else:
raise NotImplementedError("Not support for reading {}".format(opt.companion_suffix))
def write_features_amr(filepath):
out = filepath.split(opt.companion_suffix)[0] + ".mrp_conllu_pre_processed"
logger.info("processing "+filepath)
with open(out,'w') as out_f:
with open(filepath,'r') as f:
n = 0
line = f.readline()
example_id = ""
while line != '' :
if line.startswith("# ::id"):
# ::id bc.cctv_0000.1 ::date 2012-12-11T19:08:03# ::id bc.cctv_0000.1 ::date 2012-12-11T19:08:03
example_id = line[7:].split(' ')[0]
try:
example_id = convert_amr_id(example_id)
except:
pass
elif line.startswith("# ::snt") or line.startswith("# ::tok"):
n = n+1
if n % 500 ==0:
logger.info(n)
text = line[7:].rstrip("\n")
if input_preprocessor:
# when using combining, only adding combined nodes, withouting using udpipe and edges.
if opt.token_combine:
data = input_preprocessor.preprocess(text, whiteSpace=False) #phrase from fixed joints.txt file
else:
data = input_preprocessor.featureExtract(text, whiteSpace=False) #phrase from fixed joints.txt file
else:
data = featureExtract(text, whiteSpace=True)
# constructing a new graph
assert example_id != "", "empty example_id in line={}".format(line)
new_graph = Graph(example_id, 2 , "amr")
for i in range(len(data['tok'])):
if "mwe" in data:
new_graph.add_node(i, label=data['tok'][i], properties=["lemma","pos","ner","mwe"], values=[data['lem'][i], data['pos'][i], data['ner'][i],data['mwe'][i]])
else:
new_graph.add_node(i, label=data['tok'][i], properties=["lemma","pos","ner"], values=[data['lem'][i], data['pos'][i], data['ner'][i]])
new_graph.add_input(text)
out_f.write(json.dumps(new_graph.encode(), indent=None, ensure_ascii = False))
out_f.write("\n")
example_id = ""
elif not line.startswith("# AMR release; "):
pass
line = f.readline()
logger.info("done processing "+filepath)
logger.info(out +" is generated")
def write_features_mrp(filepath):
"""
write preprocessed features like tok, lem, pos, ner in mrp_conllupre_prossed
"""
out = filepath.split(opt.companion_suffix)[0] + ".mrp_conllu_pre_processed"
logger.info("processing "+filepath)
with open(out,'w') as out_f:
with open(filepath, 'r') as in_file:
n = 0
for graph,_ in mrp_read(in_file):
n = n + 1
if n % 500 == 0:
logger.info(n)
# only add a ner feature from that
#if graph.id not in ['bolt-eng-DF-170-181118-8875443_0097.13','bolt-eng-DF-170-181103-8882248_0335.5']:
# continue
tokenized_text = ' '.join([node.label for node in graph.nodes])
text = graph.input
text = text.replace(u"\u0085",u"\00A0").replace("%20",u"\00A0")
tokenized_text = tokenized_text.replace(u"\u0085",u"\00A0").replace("%20",u"\00A0")
if opt.frame == 'amr' or opt.frame == 'ucca':
data = input_preprocessor.preprocess(text, whiteSpace=False, token_combine = opt.token_combine) #phrase from fixed joints.txt file
# constructing a new graph
new_graph = Graph(graph.id, graph.flavor,graph.framework)
new_graph.add_input(text)
for i in range(len(data['tok'])):
if "mwe" in data:
new_graph.add_node(i, label=data['tok'][i], properties=["lemma","pos","ner","mwe"], values=[data['lem'][i], data['pos'][i], data['ner'][i],data['mwe'][i]], anchors=data['anchors'][i])
else:
new_graph.add_node(i, label=data['tok'][i], properties=["lemma","pos","ner"], values=[data['lem'][i], data['pos'][i], data['ner'][i]], anchors=data['anchors'][i])
out_f.write(json.dumps(new_graph.encode(), indent=None, ensure_ascii = False))
out_f.write("\n")
else:
# use white space and only use ner for extra
data = input_preprocessor.preprocess(tokenized_text, whiteSpace=True, token_combine = opt.token_combine)
assert len(data['ner']) == len(graph.nodes), "preprocess data length is not equal to the input in {}, {}".format(graph.encode(), data)
assert len(data['mwe']) == len(graph.nodes), "preprocess data length is not equal to the input in {}, {}".format(graph.encode(), data)
for node in graph.nodes:
i = node.properties.index('xpos');
node.set_property('pos', node.values[i])
node.set_property('ner', data['ner'][node.id])
node.set_property('mwe', data['mwe'][node.id])
# write back ner
out_f.write(json.dumps(graph.encode(), indent=None, ensure_ascii = False))
out_f.write("\n")
logger.info("done processing "+filepath)
logger.info(out +" is generated")
def combine_arg():
# To use this preprocessing, the input is in mrp format
parser = argparse.ArgumentParser(description='preprocessing.py, input is mrp format, which is read from conllu format, and written into mrp format.')
## Data options
# combine all .txt files
parser.add_argument('--suffix', default=".mrp_conllu", type=str,
help="""suffix of files to combine""")
parser.add_argument('--companion_suffix', default=".mrp_conllu", type=str,
help="""suffix of files to combine""")
parser.add_argument('--folder', default="", type=str ,
help="""the folder""")
parser.add_argument('--build_folder', default="", type=str ,
help="""the folder for build preprocessed data""")
parser.add_argument('--frame', default="", type=str,
help="""weather to do amr preprocess""")
parser.add_argument('--token_combine', default=False, type=bool,
help="""weather to combine tokens, now it is mainly amr""")
return parser
parser = combine_arg()
opt = parser.parse_args()
nlp = StanfordCoreNLP(core_nlp_url)
if opt.frame == "amr":
input_preprocessor = AMRInputPreprocessor(opt, core_nlp_url)
elif opt.frame == "dm":
input_preprocessor = DMInputPreprocessor(opt, core_nlp_url)
elif opt.frame == "psd":
input_preprocessor = PSDInputPreprocessor(opt, core_nlp_url)
elif opt.frame == "eds":
input_preprocessor = EDSInputPreprocessor(opt, core_nlp_url)
elif opt.frame == "ucca":
input_preprocessor = UCCAInputPreprocessor(opt, core_nlp_url)
else:
input_preprocessor = AMRInputPreprocessor(opt, core_nlp_url)
trainFolderPath = opt.build_folder + "/training/"
trainingCompanionFilesPath = folder_to_files_path(trainFolderPath, opt.companion_suffix)
devFolderPath = opt.build_folder + "/dev/"
devCompanionFilesPath = folder_to_files_path(devFolderPath, opt.companion_suffix)
testFolderPath = opt.build_folder + "/test/"
testCompanionFilesPath = folder_to_files_path(testFolderPath, opt.companion_suffix)
for f in trainingCompanionFilesPath:
write_features(f)
for f in devCompanionFilesPath:
write_features(f)
for f in testCompanionFilesPath:
write_features(f)
| 46.851064 | 211 | 0.599364 |
acef38e6e5813c2ae36889ac04cb492bc4f2225f | 4,420 | py | Python | simscale_sdk/models/one_of_divergence_schemes_div_phi_ekp.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | 8 | 2021-01-22T13:41:03.000Z | 2022-01-03T09:00:10.000Z | simscale_sdk/models/one_of_divergence_schemes_div_phi_ekp.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | null | null | null | simscale_sdk/models/one_of_divergence_schemes_div_phi_ekp.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | 3 | 2021-03-18T15:52:52.000Z | 2022-01-03T08:59:30.000Z | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class OneOfDivergenceSchemesDivPhiEkp(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
discriminator_value_class_map = {
'GAUSS_LINEAR': 'GaussLinearDivergenceScheme',
'GAUSS_LINEARUPWIND_UNLIMITED': 'GaussLinearUpwindUnlimitedDivergenceScheme',
'GAUSS_LINEARUPWIND_LIMITEDGRAD': 'GaussLinearUpwindLimitedGradDivergenceScheme',
'GAUSS_LIMITEDLINEAR_1': 'GaussLimitedLinear1DivergenceScheme',
'BOUNDED_GAUSS_UPWIND': 'BoundedGaussUpwindDivergenceScheme',
'GAUSS_UPWIND': 'GaussUpwindDivergenceScheme',
'GAUSS_VANLEER': 'GaussVanleerDivergenceScheme'
}
def __init__(self, type='GAUSS_VANLEER', local_vars_configuration=None): # noqa: E501
"""OneOfDivergenceSchemesDivPhiEkp - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = 'type'
self.type = type
@property
def type(self):
"""Gets the type of this OneOfDivergenceSchemesDivPhiEkp. # noqa: E501
Schema name: GaussVanleerDivergenceScheme # noqa: E501
:return: The type of this OneOfDivergenceSchemesDivPhiEkp. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OneOfDivergenceSchemesDivPhiEkp.
Schema name: GaussVanleerDivergenceScheme # noqa: E501
:param type: The type of this OneOfDivergenceSchemesDivPhiEkp. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfDivergenceSchemesDivPhiEkp):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OneOfDivergenceSchemesDivPhiEkp):
return True
return self.to_dict() != other.to_dict()
| 32.028986 | 95 | 0.622172 |
acef3904a11078cbdb4bae65393aa1c4a9e3239c | 12,690 | py | Python | training_api/gluon_files/packages/gluoncv/model_zoo/model_zoo.py | michaelnguyen11/BMW-Classification-Training-GUI | 315e6f4a4c3cd7136ef965ab089abda051110a27 | [
"Apache-2.0"
] | 69 | 2021-01-22T18:09:15.000Z | 2022-03-29T09:38:03.000Z | gluoncv/model_zoo/model_zoo.py | Z-Z-J/gluon-cv | a78965fe0f4d6003a1bbd7a9553f6d71205daa5c | [
"Apache-2.0"
] | null | null | null | gluoncv/model_zoo/model_zoo.py | Z-Z-J/gluon-cv | a78965fe0f4d6003a1bbd7a9553f6d71205daa5c | [
"Apache-2.0"
] | 3 | 2021-06-29T13:29:25.000Z | 2022-01-27T13:42:35.000Z | # pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from .alexnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .deeplabv3 import *
from .deeplabv3b_plus import *
from .densenet import *
from .faster_rcnn import *
from .fcn import *
from .googlenet import *
from .inception import *
from .mask_rcnn import *
from .mobilenet import *
from .mobilenetv3 import *
from .nasnet import *
from .pruned_resnet.resnetv1b_pruned import *
from .pspnet import *
from .quantized import *
from .residual_attentionnet import *
from .resnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .simple_pose.simple_pose_resnet import *
from .simple_pose.mobile_pose import *
from .squeezenet import *
from .ssd import *
from .vgg import *
from .xception import *
from .yolo import *
from .alpha_pose import *
from .action_recognition import *
__all__ = ['get_model', 'get_model_list']
_models = {
'resnet18_v1': resnet18_v1,
'resnet34_v1': resnet34_v1,
'resnet50_v1': resnet50_v1,
'resnet101_v1': resnet101_v1,
'resnet152_v1': resnet152_v1,
'resnet18_v2': resnet18_v2,
'resnet34_v2': resnet34_v2,
'resnet50_v2': resnet50_v2,
'resnet101_v2': resnet101_v2,
'resnet152_v2': resnet152_v2,
'se_resnet18_v1': se_resnet18_v1,
'se_resnet34_v1': se_resnet34_v1,
'se_resnet50_v1': se_resnet50_v1,
'se_resnet101_v1': se_resnet101_v1,
'se_resnet152_v1': se_resnet152_v1,
'se_resnet18_v2': se_resnet18_v2,
'se_resnet34_v2': se_resnet34_v2,
'se_resnet50_v2': se_resnet50_v2,
'se_resnet101_v2': se_resnet101_v2,
'se_resnet152_v2': se_resnet152_v2,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'vgg11_bn': vgg11_bn,
'vgg13_bn': vgg13_bn,
'vgg16_bn': vgg16_bn,
'vgg19_bn': vgg19_bn,
'alexnet': alexnet,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'squeezenet1.0': squeezenet1_0,
'squeezenet1.1': squeezenet1_1,
'googlenet': googlenet,
'inceptionv3': inception_v3,
'xception': get_xcetption,
'xception71': get_xcetption_71,
'mobilenet1.0': mobilenet1_0,
'mobilenet0.75': mobilenet0_75,
'mobilenet0.5': mobilenet0_5,
'mobilenet0.25': mobilenet0_25,
'mobilenetv2_1.0': mobilenet_v2_1_0,
'mobilenetv2_0.75': mobilenet_v2_0_75,
'mobilenetv2_0.5': mobilenet_v2_0_5,
'mobilenetv2_0.25': mobilenet_v2_0_25,
'mobilenetv3_large': mobilenet_v3_large,
'mobilenetv3_small': mobilenet_v3_small,
'mobile_pose_resnet18_v1b': mobile_pose_resnet18_v1b,
'mobile_pose_resnet50_v1b': mobile_pose_resnet50_v1b,
'mobile_pose_mobilenet1.0': mobile_pose_mobilenet1_0,
'mobile_pose_mobilenetv2_1.0': mobile_pose_mobilenetv2_1_0,
'mobile_pose_mobilenetv3_large': mobile_pose_mobilenetv3_large,
'mobile_pose_mobilenetv3_small': mobile_pose_mobilenetv3_small,
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_300_vgg16_atrous_custom': ssd_300_vgg16_atrous_custom,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_custom': ssd_512_vgg16_atrous_custom,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet18_v1_coco': ssd_512_resnet18_v1_coco,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet50_v1_custom': ssd_512_resnet50_v1_custom,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1.0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1.0_coco': ssd_512_mobilenet1_0_coco,
'ssd_512_mobilenet1.0_custom': ssd_512_mobilenet1_0_custom,
'ssd_300_mobilenet0.25_voc': ssd_300_mobilenet0_25_voc,
'ssd_300_mobilenet0.25_coco': ssd_300_mobilenet0_25_coco,
'ssd_300_mobilenet0.25_custom': ssd_300_mobilenet0_25_custom,
'faster_rcnn_resnet50_v1b_voc': faster_rcnn_resnet50_v1b_voc,
'mask_rcnn_resnet18_v1b_coco': mask_rcnn_resnet18_v1b_coco,
'faster_rcnn_resnet50_v1b_coco': faster_rcnn_resnet50_v1b_coco,
'faster_rcnn_fpn_resnet50_v1b_coco': faster_rcnn_fpn_resnet50_v1b_coco,
'faster_rcnn_fpn_bn_resnet50_v1b_coco': faster_rcnn_fpn_bn_resnet50_v1b_coco,
'faster_rcnn_resnet50_v1b_custom': faster_rcnn_resnet50_v1b_custom,
'faster_rcnn_resnet101_v1d_voc': faster_rcnn_resnet101_v1d_voc,
'faster_rcnn_resnet101_v1d_coco': faster_rcnn_resnet101_v1d_coco,
'faster_rcnn_fpn_resnet101_v1d_coco': faster_rcnn_fpn_resnet101_v1d_coco,
'faster_rcnn_resnet101_v1d_custom': faster_rcnn_resnet101_v1d_custom,
'mask_rcnn_resnet50_v1b_coco': mask_rcnn_resnet50_v1b_coco,
'mask_rcnn_fpn_resnet50_v1b_coco': mask_rcnn_fpn_resnet50_v1b_coco,
'mask_rcnn_resnet101_v1d_coco': mask_rcnn_resnet101_v1d_coco,
'mask_rcnn_fpn_resnet101_v1d_coco': mask_rcnn_fpn_resnet101_v1d_coco,
'mask_rcnn_fpn_resnet18_v1b_coco': mask_rcnn_fpn_resnet18_v1b_coco,
'mask_rcnn_fpn_bn_resnet18_v1b_coco': mask_rcnn_fpn_bn_resnet18_v1b_coco,
'mask_rcnn_fpn_bn_mobilenet1_0_coco': mask_rcnn_fpn_bn_mobilenet1_0_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc': get_fcn_resnet50_voc,
'fcn_resnet101_coco': get_fcn_resnet101_coco,
'fcn_resnet101_voc': get_fcn_resnet101_voc,
'fcn_resnet50_ade': get_fcn_resnet50_ade,
'fcn_resnet101_ade': get_fcn_resnet101_ade,
'psp_resnet101_coco': get_psp_resnet101_coco,
'psp_resnet101_voc': get_psp_resnet101_voc,
'psp_resnet50_ade': get_psp_resnet50_ade,
'psp_resnet101_ade': get_psp_resnet101_ade,
'psp_resnet101_citys': get_psp_resnet101_citys,
'deeplab_resnet101_coco': get_deeplab_resnet101_coco,
'deeplab_resnet101_voc': get_deeplab_resnet101_voc,
'deeplab_resnet152_coco': get_deeplab_resnet152_coco,
'deeplab_resnet152_voc': get_deeplab_resnet152_voc,
'deeplab_resnet50_ade': get_deeplab_resnet50_ade,
'deeplab_resnet101_ade': get_deeplab_resnet101_ade,
'deeplab_v3b_plus_wideresnet_citys': get_deeplab_v3b_plus_wideresnet_citys,
'resnet18_v1b': resnet18_v1b,
'resnet34_v1b': resnet34_v1b,
'resnet50_v1b': resnet50_v1b,
'resnet50_v1b_gn': resnet50_v1b_gn,
'resnet101_v1b_gn': resnet101_v1b_gn,
'resnet101_v1b': resnet101_v1b,
'resnet152_v1b': resnet152_v1b,
'resnet50_v1c': resnet50_v1c,
'resnet101_v1c': resnet101_v1c,
'resnet152_v1c': resnet152_v1c,
'resnet50_v1d': resnet50_v1d,
'resnet101_v1d': resnet101_v1d,
'resnet152_v1d': resnet152_v1d,
'resnet50_v1e': resnet50_v1e,
'resnet101_v1e': resnet101_v1e,
'resnet152_v1e': resnet152_v1e,
'resnet50_v1s': resnet50_v1s,
'resnet101_v1s': resnet101_v1s,
'resnet152_v1s': resnet152_v1s,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'resnext101b_64x4d': resnext101e_64x4d,
'se_resnext50_32x4d': se_resnext50_32x4d,
'se_resnext101_32x4d': se_resnext101_32x4d,
'se_resnext101_64x4d': se_resnext101_64x4d,
'se_resnext101e_64x4d': se_resnext101e_64x4d,
'senet_154': senet_154,
'senet_154e': senet_154e,
'darknet53': darknet53,
'yolo3_darknet53_coco': yolo3_darknet53_coco,
'yolo3_darknet53_voc': yolo3_darknet53_voc,
'yolo3_darknet53_custom': yolo3_darknet53_custom,
'yolo3_mobilenet1.0_coco': yolo3_mobilenet1_0_coco,
'yolo3_mobilenet1.0_voc': yolo3_mobilenet1_0_voc,
'yolo3_mobilenet1.0_custom': yolo3_mobilenet1_0_custom,
'yolo3_mobilenet0.25_coco': yolo3_mobilenet0_25_coco,
'yolo3_mobilenet0.25_voc': yolo3_mobilenet0_25_voc,
'yolo3_mobilenet0.25_custom': yolo3_mobilenet0_25_custom,
'nasnet_4_1056': nasnet_4_1056,
'nasnet_5_1538': nasnet_5_1538,
'nasnet_7_1920': nasnet_7_1920,
'nasnet_6_4032': nasnet_6_4032,
'simple_pose_resnet18_v1b': simple_pose_resnet18_v1b,
'simple_pose_resnet50_v1b': simple_pose_resnet50_v1b,
'simple_pose_resnet101_v1b': simple_pose_resnet101_v1b,
'simple_pose_resnet152_v1b': simple_pose_resnet152_v1b,
'simple_pose_resnet50_v1d': simple_pose_resnet50_v1d,
'simple_pose_resnet101_v1d': simple_pose_resnet101_v1d,
'simple_pose_resnet152_v1d': simple_pose_resnet152_v1d,
'residualattentionnet56': residualattentionnet56,
'residualattentionnet92': residualattentionnet92,
'residualattentionnet128': residualattentionnet128,
'residualattentionnet164': residualattentionnet164,
'residualattentionnet200': residualattentionnet200,
'residualattentionnet236': residualattentionnet236,
'residualattentionnet452': residualattentionnet452,
'cifar_residualattentionnet56': cifar_residualattentionnet56,
'cifar_residualattentionnet92': cifar_residualattentionnet92,
'cifar_residualattentionnet452': cifar_residualattentionnet452,
'resnet18_v1b_0.89': resnet18_v1b_89,
'resnet50_v1d_0.86': resnet50_v1d_86,
'resnet50_v1d_0.48': resnet50_v1d_48,
'resnet50_v1d_0.37': resnet50_v1d_37,
'resnet50_v1d_0.11': resnet50_v1d_11,
'resnet101_v1d_0.76': resnet101_v1d_76,
'resnet101_v1d_0.73': resnet101_v1d_73,
'mobilenet1.0_int8': mobilenet1_0_int8,
'resnet50_v1_int8': resnet50_v1_int8,
'ssd_300_vgg16_atrous_voc_int8': ssd_300_vgg16_atrous_voc_int8,
'ssd_512_mobilenet1.0_voc_int8': ssd_512_mobilenet1_0_voc_int8,
'ssd_512_resnet50_v1_voc_int8': ssd_512_resnet50_v1_voc_int8,
'ssd_512_vgg16_atrous_voc_int8': ssd_512_vgg16_atrous_voc_int8,
'alpha_pose_resnet101_v1b_coco': alpha_pose_resnet101_v1b_coco,
'vgg16_ucf101': vgg16_ucf101,
'inceptionv3_ucf101': inceptionv3_ucf101,
'inceptionv3_kinetics400': inceptionv3_kinetics400,
'i3d_resnet50_v1_kinetics400': i3d_resnet50_v1_kinetics400,
'i3d_resnet101_v1_kinetics400': i3d_resnet101_v1_kinetics400,
'i3d_inceptionv1_kinetics400': i3d_inceptionv1_kinetics400,
'i3d_inceptionv3_kinetics400': i3d_inceptionv3_kinetics400,
'i3d_nl5_resnet50_v1_kinetics400': i3d_nl5_resnet50_v1_kinetics400,
'i3d_nl10_resnet50_v1_kinetics400': i3d_nl10_resnet50_v1_kinetics400,
'i3d_nl5_resnet101_v1_kinetics400': i3d_nl5_resnet101_v1_kinetics400,
'i3d_nl10_resnet101_v1_kinetics400': i3d_nl10_resnet101_v1_kinetics400,
'i3d_resnet50_v1_sthsthv2': i3d_resnet50_v1_sthsthv2,
'resnet50_v1b_sthsthv2': resnet50_v1b_sthsthv2,
'fcn_resnet101_voc_int8': fcn_resnet101_voc_int8,
'fcn_resnet101_coco_int8': fcn_resnet101_coco_int8,
'psp_resnet101_voc_int8': psp_resnet101_voc_int8,
'psp_resnet101_coco_int8': psp_resnet101_coco_int8,
'deeplab_resnet101_voc_int8': deeplab_resnet101_voc_int8,
'deeplab_resnet101_coco_int8': deeplab_resnet101_coco_int8
}
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
name = name.lower()
if name not in _models:
err_str = '"%s" is not among the following model list:\n\t' % (name)
err_str += '%s' % ('\n\t'.join(sorted(_models.keys())))
raise ValueError(err_str)
net = _models[name](**kwargs)
return net
def get_model_list():
"""Get the entire list of model names in model_zoo.
Returns
-------
list of str
Entire list of model names in model_zoo.
"""
return _models.keys()
| 42.727273 | 88 | 0.775571 |
acef39e9cf3bbc11e08125314c586a13921eaa68 | 1,709 | py | Python | airflow/contrib/operators/vertica_to_mysql.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | airflow/contrib/operators/vertica_to_mysql.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | airflow/contrib/operators/vertica_to_mysql.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated.
Please use `airflow.providers.mysql.transfers.vertica_to_mysql`.
"""
import warnings
# pylint: disable=unused-import
from airflow.providers.mysql.transfers.vertica_to_mysql import VerticaToMySqlOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.mysql.transfers.vertica_to_mysql`.",
DeprecationWarning,
stacklevel=2,
)
class VerticaToMySqlTransfer(VerticaToMySqlOperator):
"""This class is deprecated.
Please use:
`airflow.providers.mysql.transfers.vertica_to_mysql.VerticaToMySqlOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.mysql.transfers.vertica_to_mysql.VerticaToMySqlOperator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
| 33.509804 | 98 | 0.733177 |
acef3a8e80bfd02fd9228a5aa9f101ca041abef0 | 928 | py | Python | isi_sdk_8_0/test/test_job_policies_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_0/test/test_job_policies_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_0/test/test_job_policies_extended.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0
from isi_sdk_8_0.models.job_policies_extended import JobPoliciesExtended # noqa: E501
from isi_sdk_8_0.rest import ApiException
class TestJobPoliciesExtended(unittest.TestCase):
"""JobPoliciesExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJobPoliciesExtended(self):
"""Test JobPoliciesExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0.models.job_policies_extended.JobPoliciesExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.634146 | 94 | 0.713362 |
acef3acb4ba7ef1cdea48b96cfc8cc9eae846019 | 1,957 | py | Python | examples/progressbar/setup.py | czoop/toga | 2b490e1b723d1fc1af451b8348d87c9c64c92678 | [
"BSD-3-Clause"
] | null | null | null | examples/progressbar/setup.py | czoop/toga | 2b490e1b723d1fc1af451b8348d87c9c64c92678 | [
"BSD-3-Clause"
] | null | null | null | examples/progressbar/setup.py | czoop/toga | 2b490e1b723d1fc1af451b8348d87c9c64c92678 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import io
import re
from setuptools import setup, find_packages
with io.open('./progressbar/__init__.py', encoding='utf8') as version_file:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M)
if version_match:
version = version_match.group(1)
else:
raise RuntimeError("Unable to find version string.")
with io.open('README.rst', encoding='utf8') as readme:
long_description = readme.read()
setup(
name='progressbar',
version=version,
description='Test app for the ProgressBar widget.',
long_description=long_description,
author='BeeWare Project',
author_email='contact@beeware.org',
license='BSD license',
packages=find_packages(
exclude=[
'docs', 'tests',
'windows', 'macOS', 'linux',
'iOS', 'android',
'django'
]
),
python_requires='>=3.5',
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: BSD license',
],
install_requires=[
],
options={
'app': {
'formal_name': 'ProgressBar',
'bundle': 'org.beeware.widgets'
},
# Desktop/laptop deployments
'macos': {
'app_requires': [
'toga-cocoa',
]
},
'linux': {
'app_requires': [
'toga-gtk',
]
},
'windows': {
'app_requires': [
'toga-winforms',
]
},
# Mobile deployments
'ios': {
'app_requires': [
'toga-ios',
]
},
'android': {
'app_requires': [
'toga-android',
]
},
# Web deployments
'django': {
'app_requires': [
'toga-django',
]
},
}
)
| 23.297619 | 95 | 0.481349 |
acef3e71bb00b01262ae15a62100202460a38bf9 | 1,140 | py | Python | multi_thread.py | dickrsunny/common-tools | cdc7d645360efe51ada497624b2f33e15efafe34 | [
"MIT"
] | 1 | 2018-01-07T03:35:19.000Z | 2018-01-07T03:35:19.000Z | multi_thread.py | dickrsunny/common-tools | cdc7d645360efe51ada497624b2f33e15efafe34 | [
"MIT"
] | null | null | null | multi_thread.py | dickrsunny/common-tools | cdc7d645360efe51ada497624b2f33e15efafe34 | [
"MIT"
] | null | null | null | import threading
import time
class Seeker(threading.Thread):
def __init__(self, cond, name):
super(Seeker, self).__init__()
self.cond = cond
self.name = name
def run(self):
time.sleep(1) # 确保先运行Seeker中的方法
self.cond.acquire()
print(self.name + ': 我已经把眼睛蒙上了')
self.cond.notify()
self.cond.wait()
print(self.name + ': 我找到你了 ~_~')
self.cond.notify()
self.cond.wait()
print(self.name + ': 我赢了')
self.cond.release()
class Hider(threading.Thread):
def __init__(self, cond, name):
super(Hider, self).__init__()
self.cond = cond
self.name = name
def run(self):
self.cond.acquire()
self.cond.wait() # 释放对琐的占用,同时线程挂起在这里,直到被notify并重新占有琐。
print(self.name + ': 我已经藏好了,你快来找我吧')
self.cond.notify()
self.cond.wait()
print(self.name + ': 被你找到了,哎~~~')
self.cond.notify()
self.cond.release()
cond = threading.Condition()
seeker = Seeker(cond, 'seeker')
hider = Hider(cond, 'hider')
seeker.start()
hider.start()
seeker.join()
hider.join()
| 23.265306 | 64 | 0.57807 |
acef3e88ff68e346cfcb2926610419ba95ee7dbb | 7,915 | py | Python | rmgpy/cantherm/geometryTest.py | vrlambert/RMG-Py | 0937b2e0a955dcf21b79674a4e89f43941c0dd85 | [
"MIT"
] | 1 | 2021-11-15T10:30:48.000Z | 2021-11-15T10:30:48.000Z | rmgpy/cantherm/geometryTest.py | vrlambert/RMG-Py | 0937b2e0a955dcf21b79674a4e89f43941c0dd85 | [
"MIT"
] | null | null | null | rmgpy/cantherm/geometryTest.py | vrlambert/RMG-Py | 0937b2e0a955dcf21b79674a4e89f43941c0dd85 | [
"MIT"
] | 1 | 2019-02-22T01:16:13.000Z | 2019-02-22T01:16:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import unittest
from rmgpy.cantherm.geometry import Geometry
import rmgpy.constants as constants
################################################################################
class GeometryTest(unittest.TestCase):
def testEthaneInternalReducedMomentOfInertia(self):
"""
Uses an optimum geometry for ethane (CC) to test that the
proper moments of inertia for its internal hindered rotor is
calculated.
"""
# Masses should be in kg/mol
mass = numpy.array([12.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0], numpy.float64) * 0.001
# Atomic numbers
number = numpy.array([6, 1, 1, 1, 6, 1, 1, 1], numpy.int)
# Coordinates should be in m
position = numpy.zeros((8,3), numpy.float64)
position[0,:] = numpy.array([ 0.001294, 0.002015, 0.000152]) * 1e-10
position[1,:] = numpy.array([ 0.397758, 0.629904, -0.805418]) * 1e-10
position[2,:] = numpy.array([-0.646436, 0.631287, 0.620549]) * 1e-10
position[3,:] = numpy.array([ 0.847832, -0.312615, 0.620435]) * 1e-10
position[4,:] = numpy.array([-0.760734, -1.204707, -0.557036]) * 1e-10
position[5,:] = numpy.array([-1.15728 , -1.832718, 0.248402]) * 1e-10
position[6,:] = numpy.array([-1.607276, -0.890277, -1.177452]) * 1e-10
position[7,:] = numpy.array([-0.11271 , -1.833701, -1.177357]) * 1e-10
geometry = Geometry(position, number, mass)
pivots = [0, 4]
top = [0, 1, 2, 3]
# Returned moment of inertia is in kg*m^2; convert to amu*A^2
I = geometry.getInternalReducedMomentOfInertia(pivots, top) * 1e23 * constants.Na
self.assertAlmostEqual(I / 1.5595197928, 1.0, 2)
def testButanolInternalReducedMomentOfInertia(self):
"""
Uses an optimum geometry for s-butanol (CCC(O)C) to test that the
proper moments of inertia for its internal hindered rotors are
calculated.
"""
# Masses should be in kg/mol
mass = numpy.array([12.0107, 1.00794, 1.00794, 1.00794, 12.0107, 1.00794, 1.00794, 12.0107, 1.00794, 12.0107, 1.00794, 1.00794, 1.00794, 15.9994, 1.00794], numpy.float64) * 0.001
# Atomic numbers
number = numpy.array([6, 1, 1, 1, 6, 1, 1, 6, 1, 6, 1, 1, 1, 8, 1], numpy.int)
# Coordinates should be in m
position = numpy.zeros((15,3), numpy.float64)
position[0,:] = numpy.array([-2.066968, -0.048470, -0.104326]) * 1e-10
position[1,:] = numpy.array([-2.078133, 1.009166, 0.165745]) * 1e-10
position[2,:] = numpy.array([-2.241129, -0.116565, -1.182661]) * 1e-10
position[3,:] = numpy.array([-2.901122, -0.543098, 0.400010]) * 1e-10
position[4,:] = numpy.array([-0.729030, -0.686020, 0.276105]) * 1e-10
position[5,:] = numpy.array([-0.614195, -0.690327, 1.369198]) * 1e-10
position[6,:] = numpy.array([-0.710268, -1.736876, -0.035668]) * 1e-10
position[7,:] = numpy.array([ 0.482521, 0.031583, -0.332519]) * 1e-10
position[8,:] = numpy.array([ 0.358535, 0.069368, -1.420087]) * 1e-10
position[9,:] = numpy.array([ 1.803404, -0.663583, -0.006474]) * 1e-10
position[10,:] = numpy.array([ 1.825001, -1.684006, -0.400007]) * 1e-10
position[11,:] = numpy.array([ 2.638619, -0.106886, -0.436450]) * 1e-10
position[12,:] = numpy.array([ 1.953652, -0.720890, 1.077945]) * 1e-10
position[13,:] = numpy.array([ 0.521504, 1.410171, 0.056819]) * 1e-10
position[14,:] = numpy.array([ 0.657443, 1.437685, 1.010704]) * 1e-10
geometry = Geometry(position, number, mass)
pivots = [0, 4]
top = [0, 1, 2, 3]
I = geometry.getInternalReducedMomentOfInertia(pivots, top) * 1e23 * constants.Na
self.assertAlmostEqual(I / 2.73090431938, 1.0, 3)
pivots = [4, 7]
top = [4, 5, 6, 0, 1, 2, 3]
I = geometry.getInternalReducedMomentOfInertia(pivots, top) * 1e23 * constants.Na
self.assertAlmostEqual(I / 12.1318136515, 1.0, 3)
pivots = [13, 7]
top = [13, 14]
I = geometry.getInternalReducedMomentOfInertia(pivots, top) * 1e23 * constants.Na
self.assertAlmostEqual(I / 0.853678578741, 1.0, 3)
pivots = [9, 7]
top = [9, 10, 11, 12]
I = geometry.getInternalReducedMomentOfInertia(pivots, top) * 1e23 * constants.Na
self.assertAlmostEqual(I / 2.97944840397, 1.0, 3)
def testPickle(self):
"""
Test that a Geometry object can be successfully pickled and unpickled
with no loss of information.
"""
# Masses should be in kg/mol
mass = numpy.array([12.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0], numpy.float64) * 0.001
# Atomic numbers
number = numpy.array([6, 1, 1, 1, 6, 1, 1, 1], numpy.int)
# Coordinates should be in m
position = numpy.zeros((8,3), numpy.float64)
position[0,:] = numpy.array([ 0.001294, 0.002015, 0.000152]) * 1e-10
position[1,:] = numpy.array([ 0.397758, 0.629904, -0.805418]) * 1e-10
position[2,:] = numpy.array([-0.646436, 0.631287, 0.620549]) * 1e-10
position[3,:] = numpy.array([ 0.847832, -0.312615, 0.620435]) * 1e-10
position[4,:] = numpy.array([-0.760734, -1.204707, -0.557036]) * 1e-10
position[5,:] = numpy.array([-1.15728 , -1.832718, 0.248402]) * 1e-10
position[6,:] = numpy.array([-1.607276, -0.890277, -1.177452]) * 1e-10
position[7,:] = numpy.array([-0.11271 , -1.833701, -1.177357]) * 1e-10
g0 = Geometry(position, number, mass)
import cPickle
g = cPickle.loads(cPickle.dumps(g0))
Natoms = len(g.number)
self.assertEqual(len(g0.number), len(g.number))
for i in range(Natoms):
for j in range(3):
self.assertEqual(g0.coordinates[i,j], g.coordinates[i,j])
self.assertEqual(g0.number[i], g.number[i])
self.assertEqual(g0.mass[i], g.mass[i])
def testOutput(self):
"""
Test that a Geometry object can be successfully reconstructed
from its repr() output with no loss of information.
"""
# Masses should be in kg/mol
mass = numpy.array([12.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0], numpy.float64) * 0.001
# Atomic numbers
number = numpy.array([6, 1, 1, 1, 6, 1, 1, 1], numpy.int)
# Coordinates should be in m
position = numpy.zeros((8,3), numpy.float64)
position[0,:] = numpy.array([ 0.001294, 0.002015, 0.000152]) * 1e-10
position[1,:] = numpy.array([ 0.397758, 0.629904, -0.805418]) * 1e-10
position[2,:] = numpy.array([-0.646436, 0.631287, 0.620549]) * 1e-10
position[3,:] = numpy.array([ 0.847832, -0.312615, 0.620435]) * 1e-10
position[4,:] = numpy.array([-0.760734, -1.204707, -0.557036]) * 1e-10
position[5,:] = numpy.array([-1.15728 , -1.832718, 0.248402]) * 1e-10
position[6,:] = numpy.array([-1.607276, -0.890277, -1.177452]) * 1e-10
position[7,:] = numpy.array([-0.11271 , -1.833701, -1.177357]) * 1e-10
g0 = Geometry(position, number, mass)
exec('g = %r' % g0)
Natoms = len(g.number)
self.assertEqual(len(g0.number), len(g.number))
for i in range(Natoms):
for j in range(3):
self.assertAlmostEqual(g0.coordinates[i,j], g.coordinates[i,j], 6)
self.assertEqual(g0.number[i], g.number[i])
self.assertAlmostEqual(g0.mass[i], g.mass[i], 6)
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
| 46.558824 | 186 | 0.557549 |
acef3fd779a0f3d1a10ba75fe2f336ddf4bfdc63 | 1,448 | py | Python | setup.py | rukku/rio-cogeo | be6761cef4cd048a99452579de3400dcb5db01b8 | [
"BSD-3-Clause"
] | null | null | null | setup.py | rukku/rio-cogeo | be6761cef4cd048a99452579de3400dcb5db01b8 | [
"BSD-3-Clause"
] | null | null | null | setup.py | rukku/rio-cogeo | be6761cef4cd048a99452579de3400dcb5db01b8 | [
"BSD-3-Clause"
] | null | null | null | """Setup."""
from setuptools import setup, find_packages
with open("rio_cogeo/__init__.py") as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
continue
with open("README.rst") as f:
readme = f.read()
# Runtime requirements.
inst_reqs = ["rasterio[s3]~=1.0"]
extra_reqs = {
"test": ["pytest", "pytest-cov"],
"dev": ["pytest", "pytest-cov", "pre-commit"],
}
setup(
name="rio-cogeo",
version=version,
description=u"CloudOptimized GeoTIFF (COGEO) creation plugin for rasterio",
long_description=readme,
classifiers=[
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: GIS",
],
keywords="COGEO CloudOptimized Geotiff rasterio",
author=u"Vincent Sarago",
author_email="vincent@mapbox.com",
url="https://github.com/mapbox/rio-cogeo",
license="BSD-3",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
install_requires=inst_reqs,
extras_require=extra_reqs,
entry_points="""
[rasterio.rio_plugins]
cogeo=rio_cogeo.scripts.cli:cogeo
""",
)
| 28.392157 | 79 | 0.618785 |
acef40aaa6a668df2df08a133b27a4f4b7807516 | 5,148 | py | Python | src/eduid_action/mfa/idp.py | SUNET/eduid-action | 16c7c737de604ae7376d0b44ef4ff0f274810139 | [
"BSD-3-Clause"
] | null | null | null | src/eduid_action/mfa/idp.py | SUNET/eduid-action | 16c7c737de604ae7376d0b44ef4ff0f274810139 | [
"BSD-3-Clause"
] | 2 | 2018-09-11T06:06:30.000Z | 2018-12-20T15:02:52.000Z | src/eduid_action/mfa/idp.py | SUNET/eduid-action | 16c7c737de604ae7376d0b44ef4ff0f274810139 | [
"BSD-3-Clause"
] | 1 | 2018-09-11T10:11:55.000Z | 2018-09-11T10:11:55.000Z | #
# Copyright (c) 2017 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'ft'
import datetime
from eduid_userdb.credentials import U2F, Webauthn
from . import RESULT_CREDENTIAL_KEY_NAME
def add_actions(idp_app, user, ticket):
"""
Add an action requiring the user to login using one or more additional
authentication factors.
This function is called by the IdP when it iterates over all the registered
action plugins entry points.
:param idp_app: IdP application instance
:param user: the authenticating user
:param ticket: the SSO login data
:type idp_app: eduid_idp.idp.IdPApplication
:type user: eduid_idp.idp_user.IdPUser
:type ticket: eduid_idp.loginstate.SSOLoginData
:return: None
"""
u2f_tokens = user.credentials.filter(U2F).to_list()
webauthn_tokens = user.credentials.filter(Webauthn).to_list()
tokens = u2f_tokens + webauthn_tokens
if not tokens:
idp_app.logger.debug('User does not have any U2F or Webauthn tokens registered')
return None
if not idp_app.actions_db:
idp_app.logger.warning('No actions_db - aborting MFA action')
return None
existing_actions = idp_app.actions_db.get_actions(user.eppn, ticket.key,
action_type = 'mfa',
)
if existing_actions and len(existing_actions) > 0:
idp_app.logger.debug('User has existing MFA actions - checking them')
if check_authn_result(idp_app, user, ticket, existing_actions):
for this in ticket.mfa_action_creds:
idp_app.authn.log_authn(user, success=[this.key], failure=[])
return
idp_app.logger.error('User returned without MFA credentials')
idp_app.logger.debug('User must authenticate with a token (has {} token(s))'.format(len(tokens)))
idp_app.actions_db.add_action(
user.eppn,
action_type = 'mfa',
preference = 1,
session = ticket.key, # XXX double-check that ticket.key is not sensitive to disclose to the user
params = {})
def check_authn_result(idp_app, user, ticket, actions):
"""
The user returned to the IdP after being sent to actions. Check if actions has
added the results of authentication to the action in the database.
:param idp_app: IdP application instance
:param user: the authenticating user
:param ticket: the SSO login data
:param actions: Actions in the ActionDB matching this user and session
:type idp_app: eduid_idp.idp.IdPApplication
:type user: eduid_idp.idp_user.IdPUser
:type ticket: eduid_idp.loginstate.SSOLoginData
:type actions: list of eduid_userdb.actions.Action
:return: MFA action with proof of completion found
:rtype: bool
"""
for this in actions:
idp_app.logger.debug('Action {} authn result: {}'.format(this, this.result))
if this.result.get('success') is True:
key = this.result.get(RESULT_CREDENTIAL_KEY_NAME)
cred = user.credentials.find(key)
if cred:
utc_now = datetime.datetime.utcnow().replace(tzinfo = None) # thanks for not having timezone.utc, Python2
ticket.mfa_action_creds[cred] = utc_now
idp_app.logger.debug('Removing MFA action completed with {}'.format(cred))
idp_app.actions_db.remove_action_by_id(this.action_id)
return True
else:
idp_app.logger.error('MFA action completed with unknown key {}'.format(key))
return False
| 42.196721 | 122 | 0.693473 |
acef41c30e2dc37b2c8028a313f474edbb4022fc | 343 | py | Python | Dataset/Leetcode/train/83/116.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/83/116.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/83/116.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution(object):
def XXX(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
p = head
while p and p.val == head.val:
p = p.next
head.next = self.XXX(p)
return head
| 20.176471 | 38 | 0.440233 |
acef424b2f3318485b7c8371d627ffb7126bf54b | 2,309 | py | Python | prettyqt/gui/painterpath.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 7 | 2019-05-01T01:34:36.000Z | 2022-03-08T02:24:14.000Z | prettyqt/gui/painterpath.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 141 | 2019-04-16T11:22:01.000Z | 2021-04-14T15:12:36.000Z | prettyqt/gui/painterpath.py | phil65/PrettyQt | 26327670c46caa039c9bd15cb17a35ef5ad72e6c | [
"MIT"
] | 5 | 2019-04-17T11:48:19.000Z | 2021-11-21T10:30:19.000Z | from __future__ import annotations
from typing import Iterator, Literal
from prettyqt import constants, core
from prettyqt.qt import QtCore, QtGui
from prettyqt.utils import InvalidParamError, bidict
ELEMENT_TYPES = bidict(
move_to_element=QtGui.QPainterPath.ElementType.MoveToElement,
line_to_element=QtGui.QPainterPath.ElementType.LineToElement,
curve_to_element=QtGui.QPainterPath.ElementType.CurveToElement,
curve_to_data_element=QtGui.QPainterPath.ElementType.CurveToDataElement,
)
ElementTypeStr = Literal[
"move_to_element", "line_to_element", "curve_to_element", "curve_to_data_element"
]
class PainterPath(QtGui.QPainterPath):
def serialize_fields(self):
return dict(fill_rule=self.get_fill_rule(), elements=list(self))
def __len__(self):
return self.elementCount()
def __getitem__(self, index: int) -> QtGui.QPainterPath.Element:
return self.elementAt(index)
def __iter__(self) -> Iterator[QtGui.QPainterPath.Element]:
return iter(self.elementAt(i) for i in range(self.elementCount()))
def __setitem__(self, index: int, value: tuple[int, int]):
self.setElementPositionAt(index, *value)
def __bool__(self):
return not self.isEmpty()
def __contains__(self, item: QtCore.QPointF | QtCore.QRectF | QtGui.QPainterPath):
return self.contains(item)
def add_rect(self, rect: QtCore.QRectF | QtCore.QRect):
if isinstance(rect, QtCore.QRect):
rect = QtCore.QRectF(rect)
self.addRect(rect)
def set_fill_rule(self, rule: constants.FillRuleStr):
"""Set fill rule.
Args:
rule: fill rule to use
Raises:
InvalidParamError: fill rule does not exist
"""
if rule not in constants.FILL_RULE:
raise InvalidParamError(rule, constants.FILL_RULE)
self.setFillRule(constants.FILL_RULE[rule])
def get_fill_rule(self) -> constants.FillRuleStr:
"""Return current fill rule.
Returns:
fill rule
"""
return constants.FILL_RULE.inverse[self.fillRule()]
def get_bounding_rect(self) -> core.RectF:
return core.RectF(self.boundingRect())
if __name__ == "__main__":
p = PainterPath(QtCore.QPoint(1, 1))
print(type(p[0]))
| 29.987013 | 86 | 0.692508 |
acef429da7d326b4b3fe892dcae2825d64371e46 | 389 | py | Python | wepost/wsgi.py | Hannidiot/wepost | 98c8a30812bef1a7682b3641048f7c3aa65ba8f9 | [
"MIT"
] | null | null | null | wepost/wsgi.py | Hannidiot/wepost | 98c8a30812bef1a7682b3641048f7c3aa65ba8f9 | [
"MIT"
] | 1 | 2022-03-02T18:16:12.000Z | 2022-03-02T23:39:14.000Z | wepost/wsgi.py | Hannidiot/wepost | 98c8a30812bef1a7682b3641048f7c3aa65ba8f9 | [
"MIT"
] | null | null | null | """
WSGI config for wepost project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wepost.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 |
acef4370399b6cd8ec60b5fddc360c62dc1daa51 | 32,908 | py | Python | cvttools/posix/7z2john.py | kedpter/hash-extr | c68f7844ea84a47979741bffdcf6c7f306fe4f76 | [
"MIT"
] | 5 | 2019-07-08T06:48:35.000Z | 2020-08-27T12:31:55.000Z | cvttools/posix/7z2john.py | kedpter/hash-extr | c68f7844ea84a47979741bffdcf6c7f306fe4f76 | [
"MIT"
] | null | null | null | cvttools/posix/7z2john.py | kedpter/hash-extr | c68f7844ea84a47979741bffdcf6c7f306fe4f76 | [
"MIT"
] | 3 | 2019-08-13T08:15:30.000Z | 2020-01-07T03:29:53.000Z | #!/usr/bin/python -u
#
# Copyright (c) 2013 by Dhiru Kholia, <dhiru (at) openwall.com>
#
# Python Bindings for LZMA
#
# Copyright (c) 2004-2010 by Joachim Bauch, mail@joachim-bauch.de
# 7-Zip Copyright (C) 1999-2010 Igor Pavlov
# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
"""Read from and write to 7zip format archives.
"""
from binascii import unhexlify
from datetime import datetime
try:
import pylzma
# To install pylzma on Ubuntu:
# apt-get install python-pip python-dev
# pip install pylzma # may do as non-root user in group staff
except ImportError:
pass
from struct import pack, unpack
from zlib import crc32
import zlib
import bz2
import binascii
import StringIO
import sys
import os
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
from functools import reduce
except ImportError:
# reduce is available in functools starting with Python 2.6
pass
try:
from pytz import UTC
except ImportError:
# pytz is optional, define own "UTC" timestamp
# reference implementation from Python documentation
from datetime import timedelta, tzinfo
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
try:
unicode
except NameError:
# Python 3.x
def unicode(s, encoding):
return s
else:
def bytes(s, encoding):
return s
READ_BLOCKSIZE = 16384
MAGIC_7Z = unhexlify('377abcaf271c') # '7z\xbc\xaf\x27\x1c'
PROPERTY_END = unhexlify('00') # '\x00'
PROPERTY_HEADER = unhexlify('01') # '\x01'
PROPERTY_ARCHIVE_PROPERTIES = unhexlify('02') # '\x02'
PROPERTY_ADDITIONAL_STREAMS_INFO = unhexlify('03') # '\x03'
PROPERTY_MAIN_STREAMS_INFO = unhexlify('04') # '\x04'
PROPERTY_FILES_INFO = unhexlify('05') # '\x05'
PROPERTY_PACK_INFO = unhexlify('06') # '\x06'
PROPERTY_UNPACK_INFO = unhexlify('07') # '\x07'
PROPERTY_SUBSTREAMS_INFO = unhexlify('08') # '\x08'
PROPERTY_SIZE = unhexlify('09') # '\x09'
PROPERTY_CRC = unhexlify('0a') # '\x0a'
PROPERTY_FOLDER = unhexlify('0b') # '\x0b'
PROPERTY_CODERS_UNPACK_SIZE = unhexlify('0c') # '\x0c'
PROPERTY_NUM_UNPACK_STREAM = unhexlify('0d') # '\x0d'
PROPERTY_EMPTY_STREAM = unhexlify('0e') # '\x0e'
PROPERTY_EMPTY_FILE = unhexlify('0f') # '\x0f'
PROPERTY_ANTI = unhexlify('10') # '\x10'
PROPERTY_NAME = unhexlify('11') # '\x11'
PROPERTY_CREATION_TIME = unhexlify('12') # '\x12'
PROPERTY_LAST_ACCESS_TIME = unhexlify('13') # '\x13'
PROPERTY_LAST_WRITE_TIME = unhexlify('14') # '\x14'
PROPERTY_ATTRIBUTES = unhexlify('15') # '\x15'
PROPERTY_COMMENT = unhexlify('16') # '\x16'
PROPERTY_ENCODED_HEADER = unhexlify('17') # '\x17'
COMPRESSION_METHOD_COPY = unhexlify('00') # '\x00'
COMPRESSION_METHOD_LZMA = unhexlify('03') # '\x03'
COMPRESSION_METHOD_CRYPTO = unhexlify('06') # '\x06'
COMPRESSION_METHOD_MISC = unhexlify('04') # '\x04'
COMPRESSION_METHOD_MISC_ZIP = unhexlify('0401') # '\x04\x01'
COMPRESSION_METHOD_MISC_BZIP = unhexlify('0402') # '\x04\x02'
COMPRESSION_METHOD_7Z_AES256_SHA256 = unhexlify('06f10701') # '\x06\xf1\x07\x01'
# number of seconds between 1601/01/01 and 1970/01/01 (UTC)
# used to adjust 7z FILETIME to Python timestamp
TIMESTAMP_ADJUST = -11644473600
def toTimestamp(filetime):
"""Convert 7z FILETIME to Python timestamp."""
# FILETIME is 100-nanosecond intervals since 1601/01/01 (UTC)
return (filetime / 10000000.0) + TIMESTAMP_ADJUST
class ArchiveError(Exception):
pass
class FormatError(ArchiveError):
pass
class EncryptedArchiveError(ArchiveError):
pass
class UnsupportedCompressionMethodError(ArchiveError):
pass
class DecryptionError(ArchiveError):
pass
class NoPasswordGivenError(DecryptionError):
pass
class WrongPasswordError(DecryptionError):
pass
class ArchiveTimestamp(long):
"""Windows FILETIME timestamp."""
def __repr__(self):
return '%s(%d)' % (type(self).__name__, self)
def as_datetime(self):
"""Convert FILETIME to Python datetime object."""
return datetime.fromtimestamp(toTimestamp(self), UTC)
class Base(object):
""" base class with support for various basic read/write functions """
def _readReal64Bit(self, file):
res = file.read(8)
a, b = unpack('<LL', res)
return b << 32 | a, res
def _read64Bit(self, file):
b = ord(file.read(1))
mask = 0x80
for i in range(8):
if b & mask == 0:
bytes = list(unpack('%dB' % i, file.read(i)))
bytes.reverse()
value = (bytes and reduce(lambda x, y: x << 8 | y, bytes)) or 0
highpart = b & (mask - 1)
return value + (highpart << (i * 8))
mask >>= 1
def _readBoolean(self, file, count, checkall=0):
if checkall:
alldefined = file.read(1)
if alldefined != unhexlify('00'):
return [True] * count
result = []
b = 0
mask = 0
for i in range(count):
if mask == 0:
b = ord(file.read(1))
mask = 0x80
result.append(b & mask != 0)
mask >>= 1
return result
def checkcrc(self, crc, data):
check = crc32(data) & 0xffffffff
return crc == check
class PackInfo(Base):
""" informations about packed streams """
def __init__(self, file):
self.packpos = self._read64Bit(file)
self.numstreams = self._read64Bit(file)
id = file.read(1)
if id == PROPERTY_SIZE:
self.packsizes = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id == PROPERTY_CRC:
self.crcs = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class Folder(Base):
""" a "Folder" represents a stream of compressed data """
def __init__(self, file):
numcoders = self._read64Bit(file)
self.numcoders = numcoders
self.coders = []
self.digestdefined = False
totalin = 0
self.totalout = 0
for i in range(numcoders):
while True:
b = ord(file.read(1))
methodsize = b & 0xf
issimple = b & 0x10 == 0
noattributes = b & 0x20 == 0
last_alternative = b & 0x80 == 0
c = {}
c['method'] = file.read(methodsize)
if not issimple:
c['numinstreams'] = self._read64Bit(file)
c['numoutstreams'] = self._read64Bit(file)
else:
c['numinstreams'] = 1
c['numoutstreams'] = 1
totalin += c['numinstreams']
self.totalout += c['numoutstreams']
if not noattributes:
c['properties'] = file.read(self._read64Bit(file))
self.coders.append(c)
if last_alternative:
break
numbindpairs = self.totalout - 1
self.bindpairs = []
for i in range(numbindpairs):
self.bindpairs.append((self._read64Bit(file), self._read64Bit(file), ))
numpackedstreams = totalin - numbindpairs
self.numpackedstreams = numpackedstreams
self.packed_indexes = []
if numpackedstreams == 1:
for i in range(totalin):
if self.findInBindPair(i) < 0:
self.packed_indexes.append(i)
elif numpackedstreams > 1:
for i in range(numpackedstreams):
self.packed_indexes.append(self._read64Bit(file))
def getUnpackSize(self):
if not self.unpacksizes:
return 0
r = list(range(len(self.unpacksizes)))
r.reverse()
for i in r:
if self.findOutBindPair(i):
return self.unpacksizes[i]
raise TypeError('not found')
def findInBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if a == index:
return idx
return -1
def findOutBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if b == index:
return idx
return -1
class Digests(Base):
""" holds a list of checksums """
def __init__(self, file, count):
self.defined = self._readBoolean(file, count, checkall=1)
self.crcs = [unpack('<L', file.read(4))[0] for x in range(count)]
UnpackDigests = Digests
class UnpackInfo(Base):
""" combines multiple folders """
def __init__(self, file):
id = file.read(1)
if id != PROPERTY_FOLDER:
raise FormatError('folder id expected but %s found' % repr(id))
self.numfolders = self._read64Bit(file)
self.folders = []
external = file.read(1)
if external == unhexlify('00'):
self.folders = [Folder(file) for x in range(self.numfolders)]
elif external == unhexlify('01'):
self.datastreamidx = self._read64Bit(file)
else:
raise FormatError('0x00 or 0x01 expected but %s found' % repr(external))
id = file.read(1)
if id != PROPERTY_CODERS_UNPACK_SIZE:
raise FormatError('coders unpack size id expected but %s found' % repr(id))
for folder in self.folders:
folder.unpacksizes = [self._read64Bit(file) for x in range(folder.totalout)]
id = file.read(1)
if id == PROPERTY_CRC:
digests = UnpackDigests(file, self.numfolders)
for idx in range(self.numfolders):
folder = self.folders[idx]
folder.digestdefined = digests.defined[idx]
folder.crc = digests.crcs[idx]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class SubstreamsInfo(Base):
""" defines the substreams of a folder """
def __init__(self, file, numfolders, folders):
self.digests = []
self.digestsdefined = []
id = file.read(1)
if id == PROPERTY_NUM_UNPACK_STREAM:
self.numunpackstreams = [self._read64Bit(file) for x in range(numfolders)]
id = file.read(1)
else:
self.numunpackstreams = []
for idx in range(numfolders):
self.numunpackstreams.append(1)
if id == PROPERTY_SIZE:
sum = 0
self.unpacksizes = []
for i in range(len(self.numunpackstreams)):
for j in range(1, self.numunpackstreams[i]):
size = self._read64Bit(file)
self.unpacksizes.append(size)
sum += size
self.unpacksizes.append(folders[i].getUnpackSize() - sum)
id = file.read(1)
numdigests = 0
numdigeststotal = 0
for i in range(numfolders):
numsubstreams = self.numunpackstreams[i]
if numsubstreams != 1 or not folders[i].digestdefined:
numdigests += numsubstreams
numdigeststotal += numsubstreams
if id == PROPERTY_CRC:
digests = Digests(file, numdigests)
didx = 0
for i in range(numfolders):
folder = folders[i]
numsubstreams = self.numunpackstreams[i]
if numsubstreams == 1 and folder.digestdefined:
self.digestsdefined.append(True)
self.digests.append(folder.crc)
else:
for j in range(numsubstreams):
self.digestsdefined.append(digests.defined[didx])
self.digests.append(digests.crcs[didx])
didx += 1
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %r found' % id)
if not self.digestsdefined:
self.digestsdefined = [False] * numdigeststotal
self.digests = [0] * numdigeststotal
class StreamsInfo(Base):
""" informations about compressed streams """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_PACK_INFO:
self.packinfo = PackInfo(file)
id = file.read(1)
if id == PROPERTY_UNPACK_INFO:
self.unpackinfo = UnpackInfo(file)
id = file.read(1)
if id == PROPERTY_SUBSTREAMS_INFO:
self.substreamsinfo = SubstreamsInfo(file, self.unpackinfo.numfolders, self.unpackinfo.folders)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class FilesInfo(Base):
""" holds file properties """
def _readTimes(self, file, files, name):
defined = self._readBoolean(file, len(files), checkall=1)
# NOTE: the "external" flag is currently ignored, should be 0x00
external = file.read(1)
for i in range(len(files)):
if defined[i]:
files[i][name] = ArchiveTimestamp(self._readReal64Bit(file)[0])
else:
files[i][name] = None
def __init__(self, file):
self.numfiles = self._read64Bit(file)
self.files = [{'emptystream': False} for x in range(self.numfiles)]
numemptystreams = 0
while True:
typ = self._read64Bit(file)
if typ > 255:
raise FormatError('invalid type, must be below 256, is %d' % typ)
typ = pack('B', typ)
if typ == PROPERTY_END:
break
size = self._read64Bit(file)
buffer = BytesIO(file.read(size))
if typ == PROPERTY_EMPTY_STREAM:
isempty = self._readBoolean(buffer, self.numfiles)
list(map(lambda x, y: x.update({'emptystream': y}), self.files, isempty))
for x in isempty:
if x: numemptystreams += 1
emptyfiles = [False] * numemptystreams
antifiles = [False] * numemptystreams
elif typ == PROPERTY_EMPTY_FILE:
emptyfiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_ANTI:
antifiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_NAME:
external = buffer.read(1)
if external != unhexlify('00'):
self.dataindex = self._read64Bit(buffer)
# XXX: evaluate external
raise NotImplementedError
for f in self.files:
name = ''
while True:
ch = buffer.read(2)
if ch == unhexlify('0000'):
f['filename'] = name
break
name += ch.decode('utf-16')
elif typ == PROPERTY_CREATION_TIME:
self._readTimes(buffer, self.files, 'creationtime')
elif typ == PROPERTY_LAST_ACCESS_TIME:
self._readTimes(buffer, self.files, 'lastaccesstime')
elif typ == PROPERTY_LAST_WRITE_TIME:
self._readTimes(buffer, self.files, 'lastwritetime')
elif typ == PROPERTY_ATTRIBUTES:
defined = self._readBoolean(buffer, self.numfiles, checkall=1)
for i in range(self.numfiles):
f = self.files[i]
if defined[i]:
f['attributes'] = unpack('<L', buffer.read(4))[0]
else:
f['attributes'] = None
else:
raise FormatError('invalid type %r' % (typ))
class Header(Base):
""" the archive header """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_ARCHIVE_PROPERTIES:
self.properties = ArchiveProperties(file)
id = file.read(1)
if id == PROPERTY_ADDITIONAL_STREAMS_INFO:
self.additional_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_MAIN_STREAMS_INFO:
self.main_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_FILES_INFO:
self.files = FilesInfo(file)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % (repr(id)))
class ArchiveFile(Base):
""" wrapper around a file in the archive """
def __init__(self, info, start, src_start, size, folder, archive, maxsize=None):
self.digest = None
self._archive = archive
self._file = archive._file
self._start = start
self._src_start = src_start
self._folder = folder
self.size = size
# maxsize is only valid for solid archives
self._maxsize = maxsize
for k, v in info.items():
setattr(self, k, v)
self.reset()
self._decoders = {
COMPRESSION_METHOD_COPY: '_read_copy',
COMPRESSION_METHOD_LZMA: '_read_lzma',
COMPRESSION_METHOD_MISC_ZIP: '_read_zip',
COMPRESSION_METHOD_MISC_BZIP: '_read_bzip',
COMPRESSION_METHOD_7Z_AES256_SHA256: '_read_7z_aes256_sha256',
}
def _is_encrypted(self):
return COMPRESSION_METHOD_7Z_AES256_SHA256 in [x['method'] for x in self._folder.coders]
def reset(self):
self.pos = 0
def read(self):
if not self._folder.coders:
raise TypeError("file has no coder informations")
data = None
for coder in self._folder.coders:
method = coder['method']
decoder = None
while method and decoder is None:
decoder = self._decoders.get(method, None)
method = method[:-1]
if decoder is None:
raise UnsupportedCompressionMethodError(repr(coder['method']))
data = getattr(self, decoder)(coder, data)
return data
def _read_copy(self, coder, input):
if not input:
self._file.seek(self._src_start)
input = self._file.read(self.uncompressed)
return input[self._start:self._start+self.size]
def _read_from_decompressor(self, coder, decompressor, input, checkremaining=False, with_cache=False):
data = ''
idx = 0
cnt = 0
properties = coder.get('properties', None)
if properties:
decompressor.decompress(properties)
total = self.compressed
if not input and total is None:
remaining = self._start+self.size
out = BytesIO()
cache = getattr(self._folder, '_decompress_cache', None)
if cache is not None:
data, pos, decompressor = cache
out.write(data)
remaining -= len(data)
self._file.seek(pos)
else:
self._file.seek(self._src_start)
checkremaining = checkremaining and not self._folder.solid
while remaining > 0:
data = self._file.read(READ_BLOCKSIZE)
if checkremaining or (with_cache and len(data) < READ_BLOCKSIZE):
tmp = decompressor.decompress(data, remaining)
else:
tmp = decompressor.decompress(data)
assert len(tmp) > 0
out.write(tmp)
remaining -= len(tmp)
data = out.getvalue()
if with_cache and self._folder.solid:
# don't decompress start of solid archive for next file
# TODO: limit size of cached data
self._folder._decompress_cache = (data, self._file.tell(), decompressor)
else:
if not input:
self._file.seek(self._src_start)
input = self._file.read(total)
if checkremaining:
data = decompressor.decompress(input, self._start+self.size)
else:
data = decompressor.decompress(input)
return data[self._start:self._start+self.size]
def _read_lzma(self, coder, input):
dec = pylzma.decompressobj(maxlength=self._start+self.size)
try:
return self._read_from_decompressor(coder, dec, input, checkremaining=True, with_cache=True)
except ValueError:
if self._is_encrypted():
raise WrongPasswordError('invalid password')
raise
def _read_zip(self, coder, input):
dec = zlib.decompressobj(-15)
return self._read_from_decompressor(coder, dec, input, checkremaining=True)
def _read_bzip(self, coder, input):
dec = bz2.BZ2Decompressor()
return self._read_from_decompressor(coder, dec, input)
def read_7z_aes256_sha256(self, coder, input):
if not self._archive.password:
raise NoPasswordGivenError()
# TODO: this needs some sanity checks
firstbyte = ord(coder['properties'][0])
numcyclespower = firstbyte & 0x3f
if firstbyte & 0xc0 != 0:
saltsize = (firstbyte >> 7) & 1
ivsize = (firstbyte >> 6) & 1
secondbyte = ord(coder['properties'][1])
saltsize += (secondbyte >> 4)
ivsize += (secondbyte & 0x0f)
assert len(coder['properties']) == 2+saltsize+ivsize
salt = coder['properties'][2:2+saltsize]
iv = coder['properties'][2+saltsize:2+saltsize+ivsize]
assert len(salt) == saltsize
assert len(iv) == ivsize
assert numcyclespower <= 24
if ivsize < 16:
iv += '\x00'*(16-ivsize)
else:
salt = iv = ''
password = self._archive.password.encode('utf-16-le')
key = pylzma.calculate_key(password, numcyclespower, salt=salt)
cipher = pylzma.AESDecrypt(key, iv=iv)
if not input:
self._file.seek(self._src_start)
uncompressed_size = self.uncompressed
if uncompressed_size & 0x0f:
# we need a multiple of 16 bytes
uncompressed_size += 16 - (uncompressed_size & 0x0f)
input = self._file.read(uncompressed_size)
result = cipher.decrypt(input)
return result
def checkcrc(self):
if self.digest is None:
return True
self.reset()
data = self.read()
return super(ArchiveFile, self).checkcrc(self.digest, data)
# XXX global state
iv = None
ivSize = None
Salt = None
NumCyclesPower = None
SaltSize = None
def SetDecoderProperties2(data):
global iv, ivSize, Salt, NumCyclesPower, SaltSize
pos = 0
data = bytearray(data)
firstByte = data[pos]
pos = pos + 1
NumCyclesPower = firstByte & 0x3F;
if NumCyclesPower > 24:
# print "Bad NumCyclesPower value"
return None
if ((firstByte & 0xC0) == 0):
# XXX
return "S_OK"
SaltSize = (firstByte >> 7) & 1;
ivSize = (firstByte >> 6) & 1;
secondByte = data[pos]
pos = pos + 1
SaltSize += (secondByte >> 4);
ivSize += (secondByte & 0x0F);
# get salt
Salt = data[pos:pos+SaltSize]
Salt = str(Salt)
pos = pos + SaltSize
# get iv
iv = data[pos:pos+ivSize]
iv = str(iv)
if len(iv) < 16:
iv = iv + "\x00" * (16 - len(iv))
return "OK"
class Archive7z(Base):
""" the archive itself """
def __init__(self, file, password=None):
self._file = file
self.password = password
self.header = file.read(len(MAGIC_7Z))
if self.header != MAGIC_7Z:
raise FormatError('not a 7z file')
self.version = unpack('BB', file.read(2))
self.startheadercrc = unpack('<L', file.read(4))[0]
self.nextheaderofs, data = self._readReal64Bit(file)
crc = crc32(data)
self.nextheadersize, data = self._readReal64Bit(file)
crc = crc32(data, crc)
data = file.read(4)
self.nextheadercrc = unpack('<L', data)[0]
crc = crc32(data, crc) & 0xffffffff
if crc != self.startheadercrc:
raise FormatError('invalid header data')
self.afterheader = file.tell()
file.seek(self.nextheaderofs, 1)
buffer = BytesIO(file.read(self.nextheadersize))
if not self.checkcrc(self.nextheadercrc, buffer.getvalue()):
raise FormatError('invalid header data')
while True:
id = buffer.read(1)
if not id or id == PROPERTY_HEADER:
break
if id != PROPERTY_ENCODED_HEADER:
raise TypeError('Unknown field: %r' % (id))
# ReadAndDecodePackedStreams (7zIn.cpp)
streams = StreamsInfo(buffer)
file.seek(self.afterheader + 0)
data = bytes('', 'ascii')
for folder in streams.unpackinfo.folders:
file.seek(streams.packinfo.packpos, 1)
props = folder.coders[0]['properties']
# decode properties
if SetDecoderProperties2(props):
# derive keys
# password = "password".encode('utf-16-le')
# print NumCyclesPower, Salt, password
# key = pylzma.calculate_key(password, NumCyclesPower, salt=Salt)
# cipher = pylzma.AESDecrypt(key, iv=str(iv))
global Salt
if len(Salt) == 0:
Salt = "\x11\x22" # fake salt
for idx in range(len(streams.packinfo.packsizes)):
tmp = file.read(streams.packinfo.packsizes[idx])
fname = os.path.basename(self._file.name)
print "%s:$7z$0$%s$%s$%s$%s$%s$%s$%s$%s$%s" % (fname,
NumCyclesPower, SaltSize, binascii.hexlify(Salt),
ivSize, binascii.hexlify(iv), folder.crc, len(tmp),
folder.unpacksizes[idx], binascii.hexlify(tmp))
# print binascii.hexlify(tmp)
# result = cipher.decrypt(tmp)
# print folder.unpacksizes
# print folder.coders
# XXX we don't now how to handle unpacksizes of size > 1
# XXX we need to locate correct data and pass it to correct decompressor
# XXX correct decompressor can be located from folder.coders
# data = result # for checksum check
size = folder.unpacksizes[idx] # for checksum check
if len(folder.unpacksizes) > 1:
sys.stderr.write("%s : multiple unpacksizes found, not supported fully yet!\n" % fname)
# print binascii.hexlify(result)
# flds = Folder(BytesIO(result))
# print flds.coders
# print flds.packed_indexes, flds.totalout
# XXX return can't be right
return
# else:
# for idx in range(len(streams.packinfo.packsizes)):
# tmp = file.read(streams.packinfo.packsizes[idx])
# data += pylzma.decompress(props+tmp, maxlength=folder.unpacksizes[idx])
#
# if folder.digestdefined:
# if not self.checkcrc(folder.crc, data[0:size]):
# raise FormatError('invalid block data')
# # XXX return can't be right
# return
# XXX this part is not done yet
sys.stderr.write("%s : 7-Zip files without header encryption are *not* supported yet!\n" % (file.name))
return
buffer = BytesIO(file.read())
id = buffer.read(1)
self.files = []
if not id:
# empty archive
self.solid = False
self.numfiles = 0
self.filenames = []
return
xx = FilesInfo(buffer)
self.header = Header(buffer)
files = self.header.files
folders = self.header.main_streams.unpackinfo.folders
packinfo = self.header.main_streams.packinfo
subinfo = self.header.main_streams.substreamsinfo
packsizes = packinfo.packsizes
self.solid = packinfo.numstreams == 1
if hasattr(subinfo, 'unpacksizes'):
unpacksizes = subinfo.unpacksizes
else:
unpacksizes = [x.unpacksizes[0] for x in folders]
fidx = 0
obidx = 0
src_pos = self.afterheader
pos = 0
folder_start = 0
folder_pos = src_pos
maxsize = (self.solid and packinfo.packsizes[0]) or None
for idx in range(files.numfiles):
info = files.files[idx]
if info['emptystream']:
continue
folder = folders[fidx]
folder.solid = subinfo.numunpackstreams[fidx] > 1
maxsize = (folder.solid and packinfo.packsizes[fidx]) or None
if folder.solid:
# file is part of solid archive
info['compressed'] = None
elif obidx < len(packsizes):
# file is compressed
info['compressed'] = packsizes[obidx]
else:
# file is not compressed
info['compressed'] = unpacksizes[obidx]
info['uncompressed'] = unpacksizes[obidx]
file = ArchiveFile(info, pos, src_pos, unpacksizes[obidx], folder, self, maxsize=maxsize)
if subinfo.digestsdefined[obidx]:
file.digest = subinfo.digests[obidx]
self.files.append(file)
if folder.solid:
pos += unpacksizes[obidx]
else:
src_pos += info['compressed']
obidx += 1
if idx >= subinfo.numunpackstreams[fidx]+folder_start:
folder_pos += packinfo.packsizes[fidx]
src_pos = folder_pos
folder_start = idx
fidx += 1
self.numfiles = len(self.files)
self.filenames = map(lambda x: x.filename, self.files)
# interface like TarFile
def getmember(self, name):
# XXX: store files in dictionary
for f in self.files:
if f.filename == name:
return f
return None
def getmembers(self):
return self.files
def getnames(self):
return self.filenames
def list(self, verbose=True):
print ('total %d files in %sarchive' % (self.numfiles, (self.solid and 'solid ') or ''))
if not verbose:
print ('\n'.join(self.filenames))
return
for f in self.files:
extra = (f.compressed and '%10d ' % (f.compressed)) or ' '
print ('%10d%s%.8x %s' % (f.size, extra, f.digest, f.filename))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stdout.write("Usage: %s < encrypted 7-Zip files >\n" % \
sys.argv[0])
signature = 'skdlxnoe2390es98d9jlsa0932jkndlod'
if (sys.argv[1] != signature):
sys.exit()
for filename in sys.argv[2:]:
f = Archive7z(open(filename, 'rb'))
| 35.461207 | 115 | 0.560502 |
acef444b6c478b85169b6585a6990aecb539b80f | 1,688 | py | Python | _tests/test_blockchain.py | YannickSF/blob | 66e9de68984b66165701481fe71dc332b2b4e017 | [
"MIT"
] | 1 | 2022-01-06T17:27:38.000Z | 2022-01-06T17:27:38.000Z | _tests/test_blockchain.py | YannickSF/Blobchain | 66e9de68984b66165701481fe71dc332b2b4e017 | [
"MIT"
] | null | null | null | _tests/test_blockchain.py | YannickSF/Blobchain | 66e9de68984b66165701481fe71dc332b2b4e017 | [
"MIT"
] | null | null | null |
import unittest
from core.blockchain import Blobchain, Block, Txion
class BlockchainTest(unittest.TestCase):
def test_circulated_coins(self):
t_blobchain = Blobchain()
self.assertNotEqual(0, t_blobchain.current_circulated_coins)
def test_create_block(self):
t_blobchain = Blobchain()
t_proof = 23
t_miner = 'tester'
t_block = t_blobchain.create_block(t_proof, t_miner)
self.assertIsNotNone(t_block)
self.assertEqual(t_proof, t_block.proof)
self.assertIsNotNone(t_block.hash)
def test_last_block(self):
t_blobchain = Blobchain()
t_res_block = t_blobchain.last_block()
self.assertIsNotNone(t_res_block)
def test_get_block(self):
t_blobchain = Blobchain()
t_hash = '1eb1c48d7d6fdd9c530c6e9ca3a556cfdbe96101a26554a785ac097f686460ab'
t_res_block = t_blobchain.block(t_hash)
self.assertIsNotNone(t_res_block)
def test_forge(self):
t_blobchain = Blobchain()
tester = 'tester_address'
res = [r for r in t_blobchain.forge(tester)]
self.assertEqual(type(res[0]), Block)
self.assertEqual(type(res[1]), Txion)
self.assertEqual(type(res[2]), int)
def test_exchanges(self):
t_blobchain = Blobchain()
expeditor = 'exp1'
destinator = 'destinator1'
amount = 100
t_res_txion = t_blobchain.exchanges(expeditor, destinator, amount)
self.assertIsNotNone(t_res_txion)
self.assertEqual(expeditor, t_res_txion.expeditor)
self.assertEqual(destinator, t_res_txion.destinator)
self.assertEqual(amount, t_res_txion.amount)
| 27.225806 | 83 | 0.675948 |
acef4568cdee32801f2badecbb88f2a4bd640086 | 2,414 | py | Python | english/urls.py | xgxofdream/Notebook_by_Tag | 226b304a37564ad68092864266f1c769dcba7473 | [
"MIT"
] | null | null | null | english/urls.py | xgxofdream/Notebook_by_Tag | 226b304a37564ad68092864266f1c769dcba7473 | [
"MIT"
] | null | null | null | english/urls.py | xgxofdream/Notebook_by_Tag | 226b304a37564ad68092864266f1c769dcba7473 | [
"MIT"
] | null | null | null | """jays URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
app_name = 'english'
urlpatterns = [
# 返回首页
path('', views.index, name='index'),
# 来源列表
# Reference
path('reference_list/<int:source_id>/', views.reference_list, name='reference_list'),
# Reference input
path('submit_reference/', views.submit_reference, name='reference_list'),
# Tag列表
path('tag_list/<int:id>/', views.tag_list, name='tag_list'),
# Source列表
path('source_list/<str:source_type>/', views.source_list, name='source_list'),
# 英语笔记列表by Source
path('list_by_source/<int:id>/', views.list_by_source, name='english_list'),
# 英语笔记列表by Tag
# get方法
path('list_by_tag_get/<int:id>/', views.list_by_tag_get, name='list_by_tag'),
# post方法
path('list_by_tag_post/', views.list_by_tag_post, name='list_by_tag'),
# 英语笔记详情
path('detail/<int:id>/', views.english_detail, name='english_detail'),
# 英语笔记录入界面
path('input/<int:id>/', views.input, name='input'),
# 提交英语笔记
path('submit/', views.submit, name='submit'),
# 提交到Word Bench
path('word_bench/<str:method>/<str:source_type>/', views.word_bench, name='submit'),
# 提交到Word Bench
path('list_by_word/<str:id>/', views.list_by_word, name='submit'),
# 我的总结
path('summary/', views.summary, name='submit'),
# Note review, 对关键字和关键表达逐一review,匹配tag,给summary做准备
path('element_review/<int:id>/', views.element_review, name='element_review'),
# List for element
path('list_for_element/', views.list_for_element, name='list_for_element'),
# Update
path('update/<int:english_id>/', views.update, name='element_review'),
# Submit update
path('submit_update/<int:english_id>/', views.submit_update, name='submit_update'),
]
| 27.747126 | 89 | 0.676056 |
acef45dbf3978dc47eac406d68923e0eb85a287b | 4,518 | py | Python | experiments/texshare/main.py | enjalot/adventures_in_opencl | c222d15c076ee3f5f81b529eb47e87c8d8057096 | [
"MIT"
] | 152 | 2015-01-04T00:58:08.000Z | 2022-02-02T00:11:58.000Z | experiments/texshare/main.py | ahmadm-atallah/adventures_in_opencl | c222d15c076ee3f5f81b529eb47e87c8d8057096 | [
"MIT"
] | 1 | 2017-09-21T13:36:15.000Z | 2017-09-21T13:36:15.000Z | experiments/texshare/main.py | ahmadm-atallah/adventures_in_opencl | c222d15c076ee3f5f81b529eb47e87c8d8057096 | [
"MIT"
] | 71 | 2015-02-11T17:12:09.000Z | 2021-12-06T14:05:28.000Z | #Port from Adventures in OpenCL Part2 to PyOpenCL
# http://enja.org/2010/08/27/adventures-in-opencl-part-2-particles-with-opengl/
#Author: Ian Johnson
#referenced:
# http://documen.tician.de/pyopencl/
# http://www.geometrian.com/Tutorials.php
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
from pygame.locals import *
#utility functions for drawing OpenGL stuff
import glutil as gl
#wrapper for numpy array that gives us float4 like behavior
from vector import Vec
import os, sys
from math import sqrt, sin, cos
pygame.init()
pygame.display.set_caption("PyOpenCL with PyOpenGL interop")
screen = (800, 600)
surface = pygame.display.set_mode(screen, OPENGL|DOUBLEBUF)
#should just have an interaction class for controlling the window
#global mouse_old, rotate, translate, mouse_down
mouse_down = False
mouse_old = Vec([0.,0.])
rotate = Vec([0., 0., 0.])
translate = Vec([0., 0., 0.])
initrans = Vec([0, 0, -2])
gl.init(screen)
num = 20000
#setup initial values of arrays
import numpy
pos = numpy.ndarray((num, 4), dtype=numpy.float32)
col = numpy.ndarray((num, 4), dtype=numpy.float32)
vel = numpy.ndarray((num, 4), dtype=numpy.float32)
import random
random.seed()
for i in xrange(0, num):
rad = random.uniform(.2, .5);
x = rad*sin(2*3.14 * i/num)
z = 0.
y = rad*cos(2*3.14 * i/num)
pos[i,0] = x
pos[i,1] = y
pos[i,2] = z
pos[i,3] = 1.
col[i,0] = 0.
col[i,1] = 0.
col[i,2] = 1.
col[i,3] = 1.
life = random.random()
vel[i,0] = x*2.
vel[i,1] = y*2.
vel[i,2] = 3.
vel[i,3] = life
#print pos
#print col
#print vel
#for some reason trying to do this inside CL.loadData gives me errors on mac
from OpenGL.arrays import vbo
pos_vbo = vbo.VBO(data=pos, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)
pos_vbo.bind()
col_vbo = vbo.VBO(data=col, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)
col_vbo.bind()
import part2
example = part2.part2CL("part2.cl")
example.loadData(pos_vbo, col_vbo, vel)
#print example.pos_vbo.data
def get_input():
global mouse_down, mouse_old, translate, rotate
key = pygame.key.get_pressed()
#print key
trans = 2.0
for event in pygame.event.get():
if event.type == QUIT or key[K_ESCAPE] or key[K_q]:
print "quit!"
pygame.quit(); sys.exit()
elif event.type == MOUSEBUTTONDOWN:
mouse_down = True
mouse_old = Vec([event.pos[0]*1., event.pos[1]*1.])
elif event.type == MOUSEMOTION:
if(mouse_down):
m = Vec([event.pos[0]*1., event.pos[1]*1.])
dx = m.x - mouse_old.x
dy = m.y - mouse_old.y
button1, button2, button3 = pygame.mouse.get_pressed()
if button1:
rotate.x += dy * .2
rotate.y += dx * .2
elif button3:
translate .z -= dy * .01
mouse_old = m
#print "rotate", rotate, "translate", translate
elif event.type == MOUSEBUTTONUP:
mouse_down = False
elif key[K_w]:
translate.z += .1*trans #y is z and z is y
elif key[K_s]:
translate.z -= .1*trans
elif key[K_a]:
translate.x += .1*trans
elif key[K_d]:
translate.x -= .1*trans
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(initrans.x, initrans.y, initrans.z)
glRotatef(rotate.x, 1, 0, 0)
glRotatef(rotate.y, 0, 1, 0) #we switched around the axis so make this rotate_z
glTranslatef(translate.x, translate.y, translate.z)
def draw():
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
example.execute()
#glColor3f(1,0,0)
glEnable(GL_POINT_SMOOTH)
glPointSize(2)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
example.col_vbo.bind()
glColorPointer(4, GL_FLOAT, 0, example.col_vbo)
example.pos_vbo.bind()
glVertexPointer(4, GL_FLOAT, 0, example.pos_vbo)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glDrawArrays(GL_POINTS, 0, num)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisable(GL_BLEND)
gl.draw_axes()
pygame.display.flip()
def main():
clock = pygame.time.Clock()
while True:
print clock.tick(60)
get_input()
draw()
if __name__ == '__main__': main()
| 25.240223 | 83 | 0.631253 |
acef465d94f7bd3492fdfade8796f2ab78fc1710 | 3,305 | py | Python | project/lit_image_classifier.py | grok-phantom/deep-learning-project-template | 52e6b9082fee94be62976973578a6482b9696a41 | [
"Apache-2.0"
] | null | null | null | project/lit_image_classifier.py | grok-phantom/deep-learning-project-template | 52e6b9082fee94be62976973578a6482b9696a41 | [
"Apache-2.0"
] | null | null | null | project/lit_image_classifier.py | grok-phantom/deep-learning-project-template | 52e6b9082fee94be62976973578a6482b9696a41 | [
"Apache-2.0"
] | null | null | null | from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from torchvision.datasets.mnist import MNIST
class Backbone(torch.nn.Module):
def __init__(self, hidden_dim=128):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, hidden_dim)
self.l2 = torch.nn.Linear(hidden_dim, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
class LitClassifier(pl.LightningModule):
def __init__(self, backbone, learning_rate=1e-3):
super().__init__()
self.save_hyperparameters()
self.backbone = backbone
def forward(self, x):
# use forward for inference/predictions
embedding = self.backbone(x)
return embedding
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
loss = F.cross_entropy(y_hat, y)
self.log('train_loss', loss, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
loss = F.cross_entropy(y_hat, y)
self.log('valid_loss', loss, on_step=True)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.backbone(x)
loss = F.cross_entropy(y_hat, y)
self.log('test_loss', loss)
def configure_optimizers(self):
# self.hparams available because we called self.save_hyperparameters()
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', type=float, default=0.0001)
return parser
def cli_main():
pl.seed_everything(1234)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--hidden_dim', type=int, default=128)
parser = pl.Trainer.add_argparse_args(parser)
parser = LitClassifier.add_model_specific_args(parser)
args = parser.parse_args()
# ------------
# data
# ------------
dataset = MNIST('', train=True, download=True, transform=transforms.ToTensor())
mnist_test = MNIST('', train=False, download=True, transform=transforms.ToTensor())
mnist_train, mnist_val = random_split(dataset, [55000, 5000])
train_loader = DataLoader(mnist_train, batch_size=args.batch_size)
val_loader = DataLoader(mnist_val, batch_size=args.batch_size)
test_loader = DataLoader(mnist_test, batch_size=args.batch_size)
# ------------
# model
# ------------
model = LitClassifier(Backbone(hidden_dim=args.hidden_dim), args.learning_rate)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
result = trainer.test(test_dataloaders=test_loader)
print(result)
if __name__ == '__main__':
cli_main()
| 30.321101 | 87 | 0.64236 |
acef469228472889e1b50abe70074c467bfe52e9 | 1,408 | py | Python | airflow/migrations/versions/1507a7289a2f_create_is_encrypted.py | rahul342/airflow | 2107dc97ca0b17131ad5cbda6c91301acf5a6079 | [
"Apache-2.0"
] | 1 | 2021-07-07T17:15:16.000Z | 2021-07-07T17:15:16.000Z | airflow/migrations/versions/1507a7289a2f_create_is_encrypted.py | rahul342/airflow | 2107dc97ca0b17131ad5cbda6c91301acf5a6079 | [
"Apache-2.0"
] | 1 | 2020-11-17T18:49:50.000Z | 2020-11-17T18:49:50.000Z | airflow/migrations/versions/1507a7289a2f_create_is_encrypted.py | rahul342/airflow | 2107dc97ca0b17131ad5cbda6c91301acf5a6079 | [
"Apache-2.0"
] | 2 | 2015-12-22T15:48:28.000Z | 2016-01-20T22:11:39.000Z | """create is_encrypted
Revision ID: 1507a7289a2f
Revises: e3a246e0dc1
Create Date: 2015-08-18 18:57:51.927315
"""
# revision identifiers, used by Alembic.
revision = '1507a7289a2f'
down_revision = 'e3a246e0dc1'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
from airflow import settings
connectionhelper = sa.Table(
'connection',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('is_encrypted')
)
def upgrade():
# first check if the user already has this done. This should only be
# true for users who are upgrading from a previous version of Airflow
# that predates Alembic integration
inspector = Inspector.from_engine(settings.engine)
# this will only be true if 'connection' already exists in the db,
# but not if alembic created it in a previous migration
if 'connection' in inspector.get_table_names():
col_names = [c['name'] for c in inspector.get_columns('connection')]
if 'is_encrypted' in col_names:
return
op.add_column(
'connection',
sa.Column('is_encrypted', sa.Boolean, unique=False, default=False))
conn = op.get_bind()
conn.execute(
connectionhelper.update().values(is_encrypted=False)
)
def downgrade():
op.drop_column('connection', 'is_encrypted')
| 26.074074 | 76 | 0.708097 |
acef47036d00cad96bf1bf491eb6765c9c354de5 | 321 | py | Python | tests/extmod/uctypes_native_float.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 692 | 2016-12-19T23:25:35.000Z | 2022-03-31T14:20:48.000Z | tests/extmod/uctypes_native_float.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 509 | 2017-03-28T19:37:18.000Z | 2022-03-31T20:31:43.000Z | tests/extmod/uctypes_native_float.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 228 | 2016-12-19T05:03:30.000Z | 2022-03-22T18:13:00.000Z | try:
import uctypes
except ImportError:
print("SKIP")
raise SystemExit
desc = {
"f32": uctypes.FLOAT32 | 0,
"f64": uctypes.FLOAT64 | 0,
}
data = bytearray(8)
S = uctypes.struct(uctypes.addressof(data), desc, uctypes.NATIVE)
S.f32 = 12.34
print('%.4f' % S.f32)
S.f64 = 12.34
print('%.4f' % S.f64)
| 15.285714 | 65 | 0.623053 |
acef4789bb462d600d398a08a4daaccdd01759a3 | 79,365 | py | Python | icon.py | ygwoods/pythontree | df3f0b28be6b90f4bbc94d3d69437dfa6a01b1cc | [
"MIT"
] | null | null | null | icon.py | ygwoods/pythontree | df3f0b28be6b90f4bbc94d3d69437dfa6a01b1cc | [
"MIT"
] | null | null | null | icon.py | ygwoods/pythontree | df3f0b28be6b90f4bbc94d3d69437dfa6a01b1cc | [
"MIT"
] | null | null | null | import base64
icon = b'AAABAAEAcIAAAAEAIAAo6AAAFgAAACgAAABwAAAAAAEAAAEAIAAAAAAAAOAAABAnAAAQJwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq1/wCqvf8Aqrz/AKy//wCjt/4Ak6r7AJCn+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AI+m+gCPpvoAj6b6AJCn+gCSqfsAlqz7AJWr+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr//AKiq/wCqvv8Aqr7/AKq+/wCsv/8Go7f+F5Oq+yqQp/owj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCPpvowj6b6MI+m+jCQp/ovkqn7GIae+QCTqvsAk6r7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqv/8Aqb3/AKq+/wCqvv8Aqr7/D6q+/0Kqvv+Bqr7/taS5/taRqPrpj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67o+m+u6Ppvruj6b67pCn+tSRp/pml6z7BJSq+wCar/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvv8Aqr7/AKq+/wCqvv8Vqr7/Z6q+/8Kqvv/0qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b69JGo+lOPpvoAuMf/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqb//AKq+/wCqvv8Aqr7/AKq+/wuqvv9Yqr7/xqq+//2qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/qql678A5Sr+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr7/AKq+/wCqvv8Aqr7/A6q+/0Gqvv+xqr7/+Kq+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wJSr+wqUq/sAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqvv8Aqr//AKq+/wCrv/8Aqr7/Lqq+/5mqvv/vqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKm//wCrvv8Aqr7/AKq+/wCqvv8dqr7/gKq+/+Kqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqbz/AKq+/wCqvv8Aqr7/AKq+/xGqvv9nqr7/0qq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACmtv8Aqr7/AKq+/wCqvv8Aqr7/CKq+/0+qvv+/qr7//Kq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqvv8Aq77/AKq+/wCqvv8Cqr7/Oqq+/6iqvv/1qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCswP8Aqr7/AKq+/wCqvv8nqr7/kKq+/+uqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqb//AKq+/wCqvv8Aqr7/AKq+/xiqvv93qr7/3aq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACrvv8Aqr7/AKq+/wCqvv8Aqr7/Daq+/16qvv/Lqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqvv8Aqr7/AKq+/wCqvv8Fqr7/SKq+/7eqvv/6qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6e8//+juP7/n7T9/5yx/P+asPz/mq/8/5qv/P+ar/z/mq/8/5qv/P+ar/z/mq/8/5qv/P+ar/z/mrD8/5Wr+P+HnOv/hprq/4aa6v+Gmur/hprq/4aa6v+Gmur/hprq/4aa6v+Gmur/h5zs/4yi9P+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAV1dXAKq+/wCpv/8Aqr7/AKG9/wCqvv8zqr7/oKq+//Kqvv//qr7//6q+//+qvv//qr7//6q+//+pvf//o7f+/5qw/P+Tqvv/kKf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+LovT/gZTh/4CT3/+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+BleL/iqDx/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqb//AKu9/wCqvv8Aqr7/AKq+/yKqvv+Hqr7/5qq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+juP7/l637/5Cn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/jKL1/4GU4v+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4OX5f+OpPj/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACpvP8Aqr7/AKq+/wCqvv8Aqr7/FKq+/26qvv/Xqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6i8//+bsfz/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4yi9f+BlOL/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+BlOH/jKL0/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKi5/wCqvv8Aqr7/AKq+/wCqvf8Kqr7/Vqq+/8Sqvv/9qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6a6/v+Wrfv/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+MovX/gZTi/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gZXi/4yj9v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvv8Aqr7/AKq+/wOqvv9Aqr7/r6q+//eqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6W6/v+Vq/v/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/jKL1/4GU4f+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4CT4P+Ak+D/gJPg/4Wa6f+Ppfn/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr//AKjB/wCqvv8Aqb//AKq+/yyqvv+Xqr7/7qq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6e7/v+Vq/v/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/r/kKf6/5Cn+v+Qp/r/kKf6/5Cn+v+Qp/r/kKf6/5Cn+v+Qp/r/kKf7/42j9f+BleP/gZTh/4GU4f+BlOH/gZTh/4GU4f+BlOH/gZTh/4GU4f+BlOH/gZXi/4Wa6f+NpPf/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACpv/8Aqr7/AKq+/wCqvv8Aqr7/HKq+/36qvv/hqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6m9//+Yrvz/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qpvr/lKv7/5ux/P+gtf3/o7j+/6S4/v+kuP7/pLj+/6S4/v+kuP7/pLj+/6S4/v+kuP7/pLj+/6S5/v+ftPz/jaP0/4uh9P+LovT/i6L0/4ui9P+LovT/i6L0/4ui9P+LovT/i6L0/4yj9v+Ppfn/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq9/wCqvv8Aqr7/AKq+/wCqvv8Qqr7/Zaq+/9Gqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+ftP3/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Uq/v/oLX9/6i8//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+rv///pbn+/5Go+/+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvv8Aqr7/AKq+/weqvv9Oqr7/vaq+//uqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+nu/7/k6r7/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Zr/z/p7v+/6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr7/AKq+/wCqvv8Aqb7/Aaq+/ziqvv+nqr7/9Kq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//nbP9/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+asPz/qb3//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr7/AKq+/wCqvv8Aqr7/H6q+/42qvv/qqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qLz//5Sr+/+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Xrfv/qLz//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr7/AKq+/wCqvv8Aqr7/QKq+/82qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6O3/v+Qp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+RqPr/pLn+/6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArLv/AKy1/wCqvv8Aqr7/Q6q+/9+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+csvz/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/ma/8/6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvv8Aqr7/Jqq+/9Kqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//mK78/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6/6K2/f+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKe6/wCqvv8Aqr7/BKq+/5iqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qL3//5Wr+/+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Kp+/+mu/7/qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACrvv8Aqr7/AKq+/zaqvv/sqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6e8//+Tqvv/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Uqvv/qLz//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAq77/AKq+/wCqvv+Dqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+nu/7/k6n7/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/lav7/6m9//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvv8Kqr7/vqq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qLz//5Oq+/+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Sq+/+ovP//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqvv8Aqr7/HKq+/9yqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6m9//+Vq/v/j6b6/4+m+v+Ppvr/j6b6/4+m+v+RqPr/pbr+/6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr7/AKq+/yiqvv/nqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//mK78/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/6C1/f+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlaz7C5Ws+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvv8mqr7/5qq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//52z/f+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Yrvz/qb3//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6wZWs+wuVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqvv8Aqr7/GKq+/9eqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+juP7/kKf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr+kaj6xqe7/s2qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sGVrPsLlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr7/AKq+/waqvv+yqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qL3//5Ws+/+Ppvr/j6b6/4+m+v+QpvrZkKf6cY+m+hOqvv84qr7/4qq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rBlKv7C5Wr+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKe+/wCqvv8Aqr7/cKq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+ftP3/j6b6/4+m+ueQp/qJkqn7I4uj+QCUqvsAqr//AKq+/1Sqvv/oqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6S5/v+Rp/r/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKb6upOq+weTqvsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACpvv8Aqr7/AKq+/yWqvv/eqr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//p7v+8JWr+6CQp/o00uP/AJGo+gCbsfwAorb9AKq+/wCrvv8Aqr7/Saq+/9Cqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+kuf7/kaf6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+oeAmPYAlq38AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvf8Aqr7/eqq+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv/4qr7/saq+/0Kyxf8ElKr7AJas+wCUq/sAAAAAAAAAAACqvv8Aqr7/AKq+/wCqvv8fqr7/g6W7/N6mvP3/rL///63A//+twP//rcD//63A//+rv///qr7//6q+//+qvv//qr7//6q+//+qvv//pLn+/5Gn+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+saSqPohkaj6AJat/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACqvv8Aqr7/AKq+/xOqvv+1qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//6qvv/Gqr7/WKq+/wuqvv8Aprr+AKq+/wCZsP0AAAAAAAAAAAAAAAAAAAAAAKq//wCou/8Aqr7/AC1urQA5drVmP3q5/lqLyv9ok9P/a5DM/2qPyv9qj8r/gaDezau//5eqvv+Yqr7/mKq+/5iqvv+Yqr7/mKS5/piRp/qYj6b6nI+m+t2Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+suRqPstj6b6AIuj+QChsf4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqqr/AKq+/wCqvv8Aqr7/Jqq+/8Kqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+/9iqvv9wqr7/Faq+/wCqvv8Aqr7/AKq9/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAn7//AKq+/wATXZsADFmXWg1Zl/0NWpj/DlmW/wxMgf8LSXz/C0l8/wpKfYM8a6EAqb7+AKq+/wCqvv8Aqr7/AKq+/wCkuf4Akaj6AIqg+QCRqPs1kKf6wo+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/5Cn+sOSqPsskKf6AP///wCXrfsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACsvv8Aqr7/AKq+/wCqvv8hqr7/pqq+//mqvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7//6q+//+qvv//qr7/5qq+/4iqvv8jqr7/AKq+/wCqvv8Aqr7/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD1uZAA9bmVsPW5n9D1uZ/w9Zl/8MTYL/DEp9/wxKff8MS36DDUd4AHGX1QCqvv8Aqr7/AKq+/wCqvv8ApLn+AIyj+QCZsPwAkKf6AJKp+xyQp/qgj6b6+4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/pCn+qqSqfsgkKf6AEdg5gCVrfwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKq+/wCqvv8Aqr7/AKq+/wuqvv9dqr7/wKq+//Oqvv//qr7//6q+//+qvv//qr7//6q+//6qvv/kqr7/mqq+/zOsvP8Aqr7/AKu+/wCqvv8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9bmQAPW5lbD1uZ/Q9bmf8PWZf/DE2C/wxKff8MSn3/DEt+gwxHeAAMUYkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmaz8AI2k+gCRqPsAlav7CpGn+nSPpvrpj6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b67pCn+oCTqvsPkaj6AIui+QCYrfwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr//AKq+/wCqvv8Aqr7/AKq+/w6qvv9Aqr7/d6q+/5yqvv+tqr7/qaq+/5Cqvv9iqr7/Kaq9/wOqvv8Aqr7/AKq+/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPW5kAD1uZWw9bmf0PW5n/D1mX/wxNgv8MSn3/DEp9/wxLfoMMR3gADFGJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACZsPwAkqj7AJOp+wBiefEAkaj6PpCn+rqPpvr9j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/kKf6w5Go+kiitv8Bkqn7AJGn+gCZr/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqr7/AKnA/wCqvv8Aqr7/AKq+/wCwwv8Aqr7/A6q+/wKqvv8Aqr7/AKq+/wCqvv8Aqr7/AKO5/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD1uZAA9bmVsPW5n9D1uZ/w9Zl/8MTYL/DEp9/wxKff8MS36DDEd4AAxRiQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUq/sAlq37AJCn+gCTqvsQkaf6a5Cn+tSPpvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Qp/rbkKf6dpKp+xWPpvoAlav7AJOq+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKm+/wCpvf8Aqr7/AKq+/wCqvv8Aqr7/AKq+/wCqvv8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9bmQAPW5lbD1uZ/Q9bmf8PWZf/DE2C/wxKff8MSn3/DEt+gwxHeAAMUYkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJeu/ACPpvoAk6n7AIyj+gCSqfsekKf6dpCn+tGPpvr8j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/ZCn+taQp/p/kqn6JIig+QCSqfsAjaT5AJet/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPW5kAD1uZWw9bmf0PW5n/D1mX/wxNgv8MSn3/DEp9/wxLfoMMR3gADFGJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJWs+wCetvwAkqn7AIqi+QCTqfsYkaf6XZCn+q+Ppvrmj6b6/o+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+PpvrpkKf6s5Gn+mSSqfsdh574AJKo+gCZr/wAlaz7AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADe2gAA39YAAN/XAADf1wAAc5ZQAAhXnlgPW5n9D1uZ/w9Zl/8MTYL/DEp9/wxKff8JSYCBfpIyAM/IAADRygAAxb8AANTNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJas+wCcsfwAkqn7AI+m+gCWrPsGkqn7KpGn+mSQp/qhkKf6z4+m+uyPpvr7j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+v+Ppvr/j6b6/4+m+vqPpvrskKf60JCn+qORp/pokaj7LZWs+wiPpfoAkqn7AJiv/ACVrPsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADg2wAA39cAAN/XAADf2AAE39cAH+zeAEdokFefEFyY/w9bmf8PWZf/DE2C/wxKff8LSX7/RnBZu9bNAFTQyQAp0ssACM3GAADQyAAA08wAANLLAADPzwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJiu/ACPp/oAlKv7AJGo+gCPpvoAorb+ApOq+xORqPsvkaj6UJCn+m+Qp/qKkKf6m5Cn+qqQp/qxkKf6sZCn+qmQp/qZkKf6iZCn+m6RqPpPkaj6L5Oq+xSgtP0Cj6b6AJGo+gCUqvsAjaT5AJau/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4NgAAN/XAADf1wAA39cAAN/XABXf1wBX39cApt/XAN3j2QD4las2/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2uHQf/QyQD8zscA58/IALbPyABq0MkAIMe/AADQyQAA//8AANLLAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACYrvwAk6j6AJas+wCTqvsAkaj6AJCn+gCOpfoAiqH5AGeE8wCgtP0DmrH8BJqx/ASgtf0DeZP2AIqh+QCOpvoAkKf6AJGo+gCTqvsAlq37AJKp+gCZrfwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADe1gAA3L8AAN/XAADf1wAA39cAAN/XAADf1wAA39cAAN/XAADf1wAA39cAAN/XAADf1wAA39cAAN/XAADf1wAA39cAEd/XAGjf1wDK39cA+t/XAP/f1wD/4tkA/5WrN/8QXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9qh0H/0MgA/87HAP/OxwD/zscA/87HANrPyAB/0MkAHs3GAADRygAA2tUAANHKAADQyQAAz8gAAM/IAADOxwAAzscAAM7HAADPyAAAz8gAANDJAADRygAA1s4AANPMAAAAAAAAAAAAAAAAAACasfwAmrL8AI+n+gCbsPwAmq/8AJeu+wCXrvsAma/8AJuw/ACRpvsAnLH8AJuw/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39gAAN/XAADf1wAA39cAAN/XAADf1wAE39cAF9/XAC/f1wBG39cAVt/XAF7f1wBc39cAUN/XADzf1wAj39cADN/XAADd1QAA39cAQt/XAL3f1wD839cA/9/XAP/f1wD/39cA/+LZAP+Vqzf/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//aodB/9DIAP/OxwD/zscA/87HAP/OxwD/zscA/87HANLPyABa08wABcnCAADRygAM0MkAI8/IAD3PyABRz8gAYM/IAGbPyABdz8gATs/IADfQyQAe0soACMzFAADPyAAA0ckAALy4AADUzAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADe1gAA4NcAAN/XAADf1wAA39cABt/XADDf1wBw39cArN/XANXf1wDs39cA+N/XAP3f1wD+39cA/t/XAPvf1wDz39cA49/XAMPf1wCN39cAiN/XAOrf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/i2QD/las3/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2qHQf/QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA9c/IAJzPyACLzscAws7HAOPOxwD0zscA/M7HAP/OxwD/zscA/s7HAPvOxwDxzscA3s7HALnPyACAz8gAPtHKAA3NxgAA0MkAAMG/AADTywAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN7TAADf1wAA39cAAN/XAADf1wAL39cAT9/XAKvf1wDq39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4tkA/5WrN/8QXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9qh0H/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAPLPxwC/z8gAZdDJABXNxwAA0coAANDJAADZyQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANzXAADf1wAA39cAAN/XAALf1wBC39cAtN/XAPff1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+LZAP+Vqzf/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//aodB/9DIAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP3OxwDIz8gAWdLLAAjQyAAAwL0AANPLAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN7XAADm1gAA39cAAN/XABTf1wCG39cA7t/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/i2QD/las3/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2qHQf/QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAPfPyACi0MkAJM7HAADTzAAA0csAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA39cAAN/XACnf1wC239cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4tkA/5WrN/8QXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9qh0H/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAM/QyABBy8QAANLKAADRygAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADg1wAA39cAAN/XADXf1wDO39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+LZAP+Vqzf/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//aodB/9DIAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA48/IAFHJwgAA0ssAANLKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN7VAADf1wAA39cAAN/XADHf1wDS39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/i2QD/las3/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2qHQf/QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwDnz8gATczGAADUzQAA084AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAN/XACDf1wDF39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4tkA/5WrN/8QXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9qh0H/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAN7PyAA4zscAAMzFAADbyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAN/XAArf1wCi39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+PZAP+WrDX/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//a4hA/9HJAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscAw9DJABnQyQAA0coAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1QAA4NcAAN/XAADf1wBm39cA+t/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+DYAP/c1QL/cpZQ/w5bmf8PW5n/D1mX/wxNgv8MSn3/Ckl+/0x0Vf/DwAf/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyACL2M8AAtLKAADVzgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wAj39cA2N/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+HYAP/X0gb/dZhO/xdgk/8OW5n/D1uZ/w9Zl/8MTYL/DEp9/wxKff8OS3z/U3hQ/7y8DP/QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA7c/IAD/PyAAAz8gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1gAA39cAhd/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+HYAP/Qzgv/ZI1b/xJdl/8OWpr/D1uZ/w9bmf8PWZf/DE2C/wxKff8MSn3/DEp9/wtJfv9Eblr/srUS/9DJAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyACr0ssAB9HKAADY0wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA39cAI9/XAN3f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+HYAP/HyBL/U4Nn/w9bmf8OW5n/D1uZ/w9bmf8PW5n/D1mX/wxNgv8MSn3/DEp9/wxKff8MSn3/Ckl+/zZlY/+nrhr/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA8c/IAEDPyAAA0coAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADV0gAA39cAAN/XAG7f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+LZAP+7whr/Q3pz/w1amv8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w9Zl/8MTYL/DEp9/wxKff8MSn3/DEp9/wxKff8KSH//Kl5q/5mlI//PyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyACV8OwAANPMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAAnf1wC539cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+HYAP+uuiT/NXJ9/w1am/8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w9bmf8PWZf/DE2C/wxKff8MSn3/DEp9/wxKff8MSn3/DEp9/wpJfv8gV3H/ipws/87HAP/PxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA2NDJABvQyQAA080AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4NoAAN/XAADf1wAs39cA6N/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4NcA/+DXAP+fsS//KWuG/wxam/8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w9bmf8PW5n/D1mX/wxNgv8MSn3/DEp9/wxKff8MSn3/DEp9/wxKff8MSn3/Ckl+/xhSdv96kjf/y8UC/8/IAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAPjPyABMz8gAANLMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANzXAADf1wAA39cAWt/XAP3f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4NgA/93WAf+Opzz/IGWN/w1am/8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w9Zl/8MTYL/DEp9/wxKff8MSn3/DEp9/wxKff8MSn3/DEp9/wxKff8LSX7/Ek55/2qHQf/Gwgb/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/z8gAgczFAADX0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADc1wAA39cAAN/XAIXf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4NgA/9nTBf98nEn/GWGS/w1amv8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w9bmf8OWpr/D1uZ/w9bmf8PWZf/DE2C/wxKff8MSn3/C0l+/wxKff8MSn3/DEp9/wxKff8MSn3/DEp9/wtJfv8OS3z/WXxM/7++Cv/QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/8/IAKzUzQAE0ssAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN7YAALf1wCm39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4dgA/9PQCf9qkVb/E12W/w5amv8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w5bmv8TXZb/K2yF/xFcmP8PW5n/D1mX/wxNgv8MSn3/DEp9/yBXcf8STnn/C0l+/wxKff8MSn3/DEp9/wxKff8MSn3/C0p9/wxKff9Kclb/t7gP/9DIAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwDJ0coAD9HKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAI39cAu9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4dgA/8vLD/9Zh2L/EFuY/w5bmv8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w5amv8UXpb/cJVS/4ijQP8RXJj/D1uZ/w9Zl/8MTYL/DEp9/wpJfv9jg0b/dI47/xVQeP8KSX7/DEp9/wxKff8MSn3/DEp9/wxKff8MSn3/Ckl+/ztpX/+ssRf/0MkA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA2dDJABnQyQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cADd/XAMXf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4tkA/8DFF/9Jfm7/Dlqa/w9bmf8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w1amv8ZYZH/fp1H/93WAv+WrDb/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//a4hB/8/HAP+ClzH/HFRz/wpJfv8MSn3/DEp9/wxKff8MSn3/DEp9/wxKff8KSH//LmBn/5+pH//QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAOLQyQAh0MkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAA7f1wDH39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4dgA/7S9IP86dXn/DVqb/w9bmf8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w1am/8hZoz/kKg6/93WAf/j2gD/las3/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2qHQf/RyQD/z8cA/5KhJ/8lWm7/Ckh//wxKff8MSn3/DEp9/wxKff8MSn3/DEp9/wpIfv8kWW7/kaAo/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwDk0MkAJNDJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAL39cAwd/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4dgA/7rBG/8ycID/DFmb/w9bmf8PW5n/D1uZ/w9bmf8PW5n/D1uZ/wxZm/8rbIX/obIu/+DYAP/f1wD/4tkA/5WrN/8QXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9qh0H/0MgA/87HAP/QyAD/oKoe/zBhZ/8KSH//DEp9/wxKff8MSn3/DEp9/wxKff8MSn3/Ckl+/xxVc/+WoyX/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA3dDIAB3PyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cABd/XALHf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP9okFf/C1mc/w9bmf8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w1am/83c3z/sLsj/+HYAP/f1wD/39cA/+LZAP+Vqzf/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//aodB/9DIAP/OxwD/zscA/9DJAP+tshb/PGlf/wpJfv8MSn3/DEp9/wxKff8MSn3/DEp9/wxKff8ISH//Q25a/8fDBf/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HANLQyQAU0ckAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN7XAADf1wCW39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/Z0wX/R3xw/wxZm/8PW5n/D1uZ/w9bmf8PW5n/D1uZ/w1amv9Fe3H/vcMZ/+LZAP/f1wD/39cA/9/XAP/i2QD/las3/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2qHQf/QyAD/zscA/87HAP/OxwD/0MgA/7e5D/9Lc1X/DEp9/wtKff8MSn3/DEp9/wxKff8MSn3/Ckl+/ydcbP+6ug3/z8gA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwC70coACNHKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA39cAbt/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3dYB/1uIYf8LWZz/D1uZ/w9bmf8PW5n/DluZ/w9bmf9VhGb/yMkR/+HYAP/f1wD/39cA/9/XAP/f1wD/4tkA/5WrN/8QXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9qh0H/0MgA/87HAP/OxwD/zscA/87HAP/QyAD/wL4J/1t9S/8OTHz/C0l+/wxKff8MSn3/DEp9/wlIf/84ZmH/w8AH/8/HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/z8gAlrm2AADUywAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAN/XAEDf1wD039cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+HYAP+ntin/H2WN/wtZnP8NWpr/DFmb/xJdl/9mj1n/0c4K/+HYAP/f1wD/39cA/9/XAP/f1wD/39cA/+LZAP+Vqzf/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//aodB/9DIAP/OxwD/zscA/87HAP/OxwD/zscA/8/IAP/GwgX/a4hA/xJOef8JSH//C0l+/wlIf/8RTXr/gJUz/8/IAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/8/IAGTOxwAA1MsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAX39cA09/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYB/52wMP9DenP/KGqH/zhze/+EoUP/19IG/+HYAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/i2QD/las3/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2qHQf/QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/z8gA/8rFAv+ElzH/NWVj/yFYcP8yYmX/fJM1/8nEBP/PxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAOvQyQAw0MgAANXNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAAxKMAAN/XAJPf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/h2AD/1dEI/8bIEv/Qzgv/4NgA/+DXAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4tkA/5WrN/8QXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9qh0H/0MgA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/z8gA/8G/Cf+0txH/wL4J/8/IAP/PxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyAC40ssACdHKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wBD39cA89/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+DXAP/h2AD/4NgA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+LZAP+Vqzf/EFyY/w9bmf8PWZf/DE2C/wxKff8JSH//aodB/9DIAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyAD/0MgA/8/IAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD+z8gAZ83GAADHvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN7ZAADf1wAA39cACt/XALTf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/i2QD/las3/xBcmP8PW5n/D1mX/wxNgv8MSn3/CUh//2qHQf/QyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA1NDJABvQyQAA1M0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wBN39cA9N/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/49kA/5esNf8RXJj/D1uZ/w9Zl/8MTYL/DEp9/wlIf/9tiT//0ckA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/s/IAHLLxAAA1s8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA39cAB9/XAKLf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+LZAP+zvSD/HWOP/w5amv8PWpf/DE2C/wxKff8OS3z/i5ws/9HJAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAMTQyQAV0MkAANLLAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wAq39cA2d/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/2tQD/2qRVv8QXJn/C1eZ/wpLhP8JSH7/R3FY/8G+Cf/PyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAO7PyABHzscAAMzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN7ZAADf1wAA39cAAN/XAFbf1wDx39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/+DYAP/U0Aj/hqJC/0h8bP88a2P/aodB/7q6Dv/PyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAPzPyAB529cAAdHKAADUzAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wAC39cAdt/XAPnf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/4NgA/+HYAP/Y0gT/ycUF/8/HAP/PyAD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyACY0coACtDJAADQygAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAN/XAAbf1wCA39cA+N/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyACg0MkAEdDJAADQyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA39cABt/XAHPf1wDv39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAPrPyACS0MkAEM/IAADPyAAA28wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1gAB39cAUd/XANXf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAObPyABu0ssACNDJAADPyAAA1c4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAN/XAADf1wAl39cAmd/XAPDf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA+c/IALLQyAA5wr0AANHJAADQyQAAz88AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/YAADf2wAA39cAAN/XAATf1wBD39cAqt/XAPbf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD8zscAwM/IAFjRygAMz8gAANPLAADSygAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3tcAAN/XAADf1wAA39cAAN/XAAzf1wC639cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA2NDJAB/OxwAA0MkAAM/IAADUzQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3tYAAODYAADf1wAA39cAht/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/8/IAK3TzAAE0ssAANPMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf2AAA39cAAN/XAE7f1wD539cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyAB0zcYAAObcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5swAAN/XAADf1wAc39cA2d/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwDvz8gAN8/IAADUzAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39gAAd/XAJnf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscAvtHKAAzRygAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wBI39cA9d/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/8/IAG3NxgAA//cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAODZAADf1wAA39cADd/XAL3f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HANrQyQAg0MkAANPLAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wBb39cA+t/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/PyACAxcAAANTMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA39cADt/XALrf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwDX0MkAItDIAADSywAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wBH39cA8N/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD8z8gAasvFAADZ0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN7WAADf1wAA39YAA9/XAI7f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/z8gAsdHKAA7RyQAA08sAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADf1wAa39cAxN/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA3dDIADHPyAAAz8gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAN/XADrf1wDh39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA8s/IAFnMxQAA1s4AANXNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4NcAAN/XAADf1wAA39cAVd/XAO3f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA+c/IAHfV0AAC0coAANLLAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAM7oAADf1wBj39cA8N/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA+s/IAITSygAG0MkAANDJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA4NkAAd/XAGHf1wDr39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA98/IAIHSywAI0MkAANDJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN/XAADh2gAA39cAT9/XANvf1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA7M/IAGzSywAF0MkAANDJAADbzAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf1wAA39cAAN/XAADf1wAx39cAuN/XAP7f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscAz8/IAEmAhQAA0MkAANDJAADV1QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN/XAADf1wAA39cAAN/XABLf1wB839cA59/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwDyz8gAl9DJACHOxwAA08sAANLKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3tcAAN/XAADf1wAA6tUAAN/XADXf1wCk39cA8d/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/3tYA/9HKAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAPnPyAC6z8gAS9LLAAXPyAAAwLkAANPMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA39cAAN7XAADf1wAA39cABt/XAELf1wCh39cA6N/XAP/f1wD/39cA/9/XAP/f1wD/39cA/9/XAP/f1wD/39cA/97WAP/RygD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAP/OxwD/zscA8c/IALbPyABW0coADs7HAADRygAA0MkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADc1gAA39cAAN/XAADf1wAA39cABN/XAC7f1wB339cAvd/XAOrf1wD939cA/9/XAP/f1wD/39cA/9/XAP/e1gD/0coA/87HAP/OxwD/zscA/87HAP/OxwD/zscA/87HAPHOxwDMz8gAis/IAD3RygAKzscAANDJAADOxwAA1MwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADe2QAA39cAAN/XAADf1wAA39cAAN/XAAvf1wAu39cAXt/XAI3f1wC039cAz9/XAN/f1wDo3tYA7dHKAO7OxwDtzscA5c7HANbOxwC+z8gAm8/IAGzPyAA60MkAEsbAAADPyAAA0coAAM7HAADUzQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3tcAANvYAADf1wAA39cAAN/XAADf1wAA3tcABt/XABLf1wAe39cAKd7WAC7RygAwz8gALtDJACXQyQAW0coACv//AADOxwAAz8gAANDJAADUzAAA0soAANfXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN3ZAADf1gAA3tcAAN7XAADf1wAA39cAAN/XAADe1gAA0coAAM/IAADQyQAA0MkAANHKAADUywAA0skAANXNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP//////////////////AAD//////////////////wAA//////////////////8AAP//////////////////AAD////////wAAAAAAAAHwAA////////wAAAAAAAAA8AAP///////wAAAAAAAAAHAAD///////wAAAAAAAAABwAA///////wAAAAAAAAAAcAAP//////4AAAAAAAAAAHAAD//////4AAAAAAAAAABwAA//////4AAAAAAAAAAAcAAP/////4AAAAAAAAAAAHAAD/////4AAAAAAAAAAABwAA/////8AAAAAAAAAAAAcAAP////8AAAAAAAAAAAAHAAD////8AAAAAAAAAAAABwAA////8AAAAAAAAAAAAAcAAP///+AAAAAAAAAAAAAHAAD///8AAAAAAAAAAAAABwAA///+AAAAAAAAAAAAAAcAAP//+AAAAAAAAAAAAAAHAAD//+AAAAAAAAAAAAAABwAA///AAAAAAAAAAAAAAAcAAP//AAAAAAAAAAAAAAAHAAD//AAAAAAAAAAAAAAABwAA//AAAAAAAAAAAAAAAAcAAP/gAAAAAAAAAAAAAAAHAAD/gAAAAAAAAAAAAAAABwAA/wAAAAAAAAAAAAAAAAcAAP4AAAAAAAAAAAAAAAAHAAD8AAAAAAAAAAAAAAAABwAA/AAAAAAAAAAAAAAAAAcAAPgAAAAAAAAAAAAAAAAHAAD4AAAAAAAAAAAAAAAABwAA+AAAAAAAAAAAAAAAAAcAAPgAAAAAAAAAAAAAAAAHAAD4AAAAAAAAAAAAAAAABwAA+AAAAAAAAAAAAAAAAAcAAPgAAAAAAAAAAAAAAAAHAAD4AAAAAAAAAAAAAAAABwAA+AAAAAAAAAAAAAAAAAcAAPgAAAAAAAAAAAAAAAAHAAD4AAAAAAAAAAAAAAAABwAA/AAAABgAAAAAAAAAAAcAAPwAAAA8AAAAAAAAAAAHAAD8AAAA/gAAAAAAAAAADwAA/gAAA/+AAAAAAAAAAB8AAP8AAA//gA/AAAAAAAA/AAD/gAA//4AP4AAAAAAAfwAA/+AAf/+AD/gAAAAAAf8AAP/8A///gA/8AAAAAAP/AAD//////4AP/wAAAAAP/wAA//////gAAf/AAAAAP/8AAP/////gAAA/8AAAAP//AAD/////gAAAH/4AAAf//wAA///gAAAAAAAAcAD///8AAP//AAAAAAAAAA//////AAD//AAAAAAAAAAD/////wAA//AAAAAAAAAAAP////8AAP/gAAAAAAAAAAB/////AAD/wAAAAAAAAAAAP////wAA/4AAAAAAAAAAAB////8AAP8AAAAAAAAAAAAP////AAD+AAAAAAAAAAAAB////wAA/gAAAAAAAAAAAAP///8AAPwAAAAAAAAAAAAD////AAD4AAAAAAAAAAAAAf///wAA+AAAAAAAAAAAAAH///8AAPgAAAAAAAAAAAAA////AADwAAAAAAAAAAAAAP///wAA8AAAAAAAAAAAAAD///8AAPAAAAAAAAAAAAAAf///AADgAAAAAAAAAAAAAH///wAA4AAAAAAAAAAAAAB///8AAOAAAAAAAAAAAAAAf///AADgAAAAAAAAAAAAAH///wAA4AAAAAAAAAAAAAB///8AAOAAAAAAAAAAAAAAf///AADgAAAAAAAAAAAAAH///wAA4AAAAAAAAAAAAAB///8AAOAAAAAAAAAAAAAAf///AADgAAAAAAAAAAAAAH///wAA4AAAAAAAAAAAAAB///8AAOAAAAAAAAAAAAAAf///AADwAAAAAAAAAAAAAH///wAA8AAAAAAAAAAAAAD///8AAPAAAAAAAAAAAAAA////AADwAAAAAAAAAAAAAP///wAA+AAAAAAAAAAAAAH///8AAPgAAAAAAAAAAAAB////AAD8AAAAAAAAAAAAA////wAA/AAAAAAAAAAAAAP///8AAP4AAAAAAAAAAAAH////AAD/AAAAAAAAAAAAD////wAA/4AAAAAAAAAAAA////8AAP/AAAAAAAAAAAAf////AAD/4AAAAAAAAAAAP////wAA//AAAAAAAAAAAP////8AAP/4AAAAAAAAAAH/////AAD//gAAAAAAAAAH/////wAA//8AAAAAAAAAD/////8AAP//AAAAAAAAAA//////AAD//4AAAAAAAAAf/////wAA//+AAAAAAAAAH/////8AAP//gAAAAAAAAB//////AAD//8AAAAAAAAA//////wAA///AAAAAAAAAP/////8AAP//4AAAAAAAAH//////AAD//+AAAAAAAAB//////wAA///wAAAAAAAA//////8AAP//+AAAAAAAAP//////AAD///gAAAAAAAH//////wAA///8AAAAAAAD//////8AAP///gAAAAAAB///////AAD///8AAAAAAAf//////wAA////gAAAAAAP//////8AAP///8AAAAAAP///////AAD////gAAAAAH///////wAA////+AAAAAH///////8AAP////wAAAAD////////AAD/////AAAAD////////wAA/////+AAAD////////8AAP/////8AAP/////////AAD//////////////////wAA//////////////////8AAP//////////////////AAD//////////////////wAA'
with open("tmp.ico","wb+") as f:
f.write(base64.b64decode(icon)) | 19,841.25 | 79,282 | 0.842525 |
acef48d96bad880a04c6796df44ef4d64f4ccb5d | 26,064 | py | Python | electrumsv/tests/test_transaction.py | kevinejohn/electrumsv | 29f978a36101f72220be7c95c9e249bbde0e3faf | [
"MIT"
] | null | null | null | electrumsv/tests/test_transaction.py | kevinejohn/electrumsv | 29f978a36101f72220be7c95c9e249bbde0e3faf | [
"MIT"
] | null | null | null | electrumsv/tests/test_transaction.py | kevinejohn/electrumsv | 29f978a36101f72220be7c95c9e249bbde0e3faf | [
"MIT"
] | null | null | null | import pytest
from bitcoinx import (
Address, PrivateKey, PublicKey, Tx, Script, TxOutput, bip32_key_from_string, hash160, Bitcoin
)
from electrumsv.bitcoin import address_from_string
from electrumsv.keystore import Old_KeyStore, BIP32_KeyStore
from electrumsv.transaction import XPublicKey, Transaction, NO_SIGNATURE
from electrumsv.util import bh2u
unsigned_blob = '010000000149f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed010000005701ff4c53ff0488b21e0000000000000000004f130d773e678a58366711837ec2e33ea601858262f8eaef246a7ebd19909c9a03c3b30e38ca7d797fee1223df1c9827b2a9f3379768f520910260220e0560014600002300feffffffd8e43201000000000118e43201000000001976a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac5fbd0700'
signed_blob = '010000000149f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed010000006a473044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885412103b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166feffffff0118e43201000000001976a914e158fb15c888037fdc40fb9133b4c1c3c688706488ac5fbd0700'
v2_blob = "0200000001191601a44a81e061502b7bfbc6eaa1cef6d1e6af5308ef96c9342f71dbf4b9b5000000006b483045022100a6d44d0a651790a477e75334adfb8aae94d6612d01187b2c02526e340a7fd6c8022028bdf7a64a54906b13b145cd5dab21a26bd4b85d6044e9b97bceab5be44c2a9201210253e8e0254b0c95776786e40984c1aa32a7d03efa6bdacdea5f421b774917d346feffffff026b20fa04000000001976a914024db2e87dd7cfd0e5f266c5f212e21a31d805a588aca0860100000000001976a91421919b94ae5cefcdf0271191459157cdb41c4cbf88aca6240700"
class TestTransaction:
def test_tx_unsigned(self):
tx = Transaction.from_hex(unsigned_blob)
assert tx.version == 1
assert len(tx.inputs) == 1
txin = tx.inputs[0]
assert txin.prev_hash.hex() == '49f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed'
assert txin.prev_idx == 1
assert txin.script_sig.to_hex() == '01ff4c53ff0488b21e0000000000000000004f130d773e678a58366711837ec2e33ea601858262f8eaef246a7ebd19909c9a03c3b30e38ca7d797fee1223df1c9827b2a9f3379768f520910260220e0560014600002300'
assert txin.sequence == 4294967294
assert txin.value == 20112600
assert txin.signatures == [NO_SIGNATURE]
assert txin.x_pubkeys == [XPublicKey('ff0488b21e0000000000000000004f130d773e678a58366711837ec2e33ea601858262f8eaef246a7ebd19909c9a03c3b30e38ca7d797fee1223df1c9827b2a9f3379768f520910260220e0560014600002300')]
assert txin.address == address_from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN')
assert txin.threshold == 1
assert tx.outputs == [TxOutput(20112408, address_from_string(
'1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK').to_script())]
assert tx.locktime == 507231
assert tx.as_dict() == {'hex': unsigned_blob, 'complete': False}
def test_tx_signed(self):
tx = Transaction.from_hex(signed_blob)
assert tx.version == 1
assert len(tx.inputs) == 1
txin = tx.inputs[0]
assert txin.prev_hash.hex() == '49f35e43fefd22d8bb9e4b3ff294c6286154c25712baf6ab77b646e5074d6aed'
assert txin.prev_idx == 1
assert txin.script_sig.to_hex() == '473044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f46885412103b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166'
assert txin.sequence == 4294967294
assert txin.signatures == [bytes.fromhex('3044022025bdc804c6fe30966f6822dc25086bc6bb0366016e68e880cf6efd2468921f3202200e665db0404f6d6d9f86f73838306ac55bb0d0f6040ac6047d4e820f24f4688541')]
assert txin.x_pubkeys == [XPublicKey('03b5bbebceeb33c1b61f649596b9c3611c6b2853a1f6b48bce05dd54f667fa2166')]
assert txin.address == address_from_string('13Vp8Y3hD5Cb6sERfpxePz5vGJizXbWciN')
assert txin.threshold == 1
assert tx.outputs == [TxOutput(20112408, address_from_string(
'1MYXdf4moacvaEKZ57ozerpJ3t9xSeN6LK').to_script())]
assert tx.locktime == 507231
assert tx.as_dict() == {'hex': signed_blob, 'complete': True}
assert tx.serialize() == signed_blob
tx.update_signatures(signed_blob)
assert tx.estimated_size() == 192
def test_parse_xpub(self):
res = XPublicKey('fe4e13b0f311a55b8a5db9a32e959da9f011b131019d4cebe6141b9e2c93edcbfc0954c358b062a9f94111548e50bde5847a3096b8b7872dcffadb0e9579b9017b01000200').to_address()
assert res == address_from_string('19h943e4diLc68GXW7G75QNe2KWuMu7BaJ')
def test_version_field(self):
tx = Transaction.from_hex(v2_blob)
assert tx.txid() == "b97f9180173ab141b61b9f944d841e60feec691d6daab4d4d932b24dd36606fe"
def test_txid_coinbase_to_p2pk(self):
tx = Transaction.from_hex('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4103400d0302ef02062f503253482f522cfabe6d6dd90d39663d10f8fd25ec88338295d4c6ce1c90d4aeb368d8bdbadcc1da3b635801000000000000000474073e03ffffffff013c25cf2d01000000434104b0bd634234abbb1ba1e986e884185c61cf43e001f9137f23c2c409273eb16e6537a576782eba668a7ef8bd3b3cfb1edb7117ab65129b8a2e681f3c1e0908ef7bac00000000')
assert 'dbaf14e1c476e76ea05a8b71921a46d6b06f0a950f17c5f9f1a03b8fae467f10' == tx.txid()
def test_txid_coinbase_to_p2pkh(self):
tx = Transaction.from_hex('01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff25033ca0030400001256124d696e656420627920425443204775696c640800000d41000007daffffffff01c00d1298000000001976a91427a1f12771de5cc3b73941664b2537c15316be4388ac00000000')
assert '4328f9311c6defd9ae1bd7f4516b62acf64b361eb39dfcf09d9925c5fd5c61e8' == tx.txid()
def test_txid_p2pk_to_p2pkh(self):
tx = Transaction.from_hex('010000000118231a31d2df84f884ced6af11dc24306319577d4d7c340124a7e2dd9c314077000000004847304402200b6c45891aed48937241907bc3e3868ee4c792819821fcde33311e5a3da4789a02205021b59692b652a01f5f009bd481acac2f647a7d9c076d71d85869763337882e01fdffffff016c95052a010000001976a9149c4891e7791da9e622532c97f43863768264faaf88ac00000000')
assert '90ba90a5b115106d26663fce6c6215b8699c5d4b2672dd30756115f3337dddf9' == tx.txid()
def test_txid_p2pk_to_p2sh(self):
tx = Transaction.from_hex('0100000001e4643183d6497823576d17ac2439fb97eba24be8137f312e10fcc16483bb2d070000000048473044022032bbf0394dfe3b004075e3cbb3ea7071b9184547e27f8f73f967c4b3f6a21fa4022073edd5ae8b7b638f25872a7a308bb53a848baa9b9cc70af45fcf3c683d36a55301fdffffff011821814a0000000017a9143c640bc28a346749c09615b50211cb051faff00f8700000000')
assert '172bdf5a690b874385b98d7ab6f6af807356f03a26033c6a65ab79b4ac2085b5' == tx.txid()
def test_txid_p2pkh_to_p2pkh(self):
tx = Transaction.from_hex('0100000001f9dd7d33f315617530dd72264b5d9c69b815626cce3f66266d1015b1a590ba90000000006a4730440220699bfee3d280a499daf4af5593e8750b54fef0557f3c9f717bfa909493a84f60022057718eec7985b7796bb8630bf6ea2e9bf2892ac21bd6ab8f741a008537139ffe012103b4289890b40590447b57f773b5843bf0400e9cead08be225fac587b3c2a8e973fdffffff01ec24052a010000001976a914ce9ff3d15ed5f3a3d94b583b12796d063879b11588ac00000000')
assert '24737c68f53d4b519939119ed83b2a8d44d716d7f3ca98bcecc0fbb92c2085ce' == tx.txid()
def test_txid_p2pkh_to_p2sh(self):
tx = Transaction.from_hex('010000000195232c30f6611b9f2f82ec63f5b443b132219c425e1824584411f3d16a7a54bc000000006b4830450221009f39ac457dc8ff316e5cc03161c9eff6212d8694ccb88d801dbb32e85d8ed100022074230bb05e99b85a6a50d2b71e7bf04d80be3f1d014ea038f93943abd79421d101210317be0f7e5478e087453b9b5111bdad586038720f16ac9658fd16217ffd7e5785fdffffff0200e40b540200000017a914d81df3751b9e7dca920678cc19cac8d7ec9010b08718dfd63c2c0000001976a914303c42b63569ff5b390a2016ff44651cd84c7c8988acc7010000')
assert '155e4740fa59f374abb4e133b87247dccc3afc233cb97c2bf2b46bba3094aedc' == tx.txid()
def test_txid_p2sh_to_p2pkh(self):
tx = Transaction.from_hex('0100000001b98d550fa331da21038952d6931ffd3607c440ab2985b75477181b577de118b10b000000fdfd0000483045022100a26ea637a6d39aa27ea7a0065e9691d477e23ad5970b5937a9b06754140cf27102201b00ed050b5c468ee66f9ef1ff41dfb3bd64451469efaab1d4b56fbf92f9df48014730440220080421482a37cc9a98a8dc3bf9d6b828092ad1a1357e3be34d9c5bbdca59bb5f02206fa88a389c4bf31fa062977606801f3ea87e86636da2625776c8c228bcd59f8a014c69522102420e820f71d17989ed73c0ff2ec1c1926cf989ad6909610614ee90cf7db3ef8721036eae8acbae031fdcaf74a824f3894bf54881b42911bd3ad056ea59a33ffb3d312103752669b75eb4dc0cca209af77a59d2c761cbb47acc4cf4b316ded35080d92e8253aeffffffff0101ac3a00000000001976a914a6b6bcc85975bf6a01a0eabb2ac97d5a418223ad88ac00000000')
assert '0ea982e8e601863e604ef6d9acf9317ae59d3eac9cafee6dd946abadafd35af8' == tx.txid()
def test_txid_p2sh_to_p2sh(self):
# Note the public keys in this transaction are not sorted. This also tests we do
# not sort them.
tx = Transaction.from_hex('01000000018695eef2250b3a3b6ef45fe065e601610e69dd7a56de742092d40e6276e6c9ec00000000fdfd000047304402203199bf8e49f7203e8bcbfd754aa356c6ba61643a3490f8aef3888e0aaa7c048c02201e7180bfd670f4404e513359b4020fbc85d6625e3e265e0c357e8611f11b83e401483045022100e60f897db114679f9a310a032a22e9a7c2b8080affe2036c480ff87bf6f45ada02202dbd27af38dd97d418e24d89c3bb7a97e359dd927c1094d8c9e5cac57df704fb014c69522103adc563b9f5e506f485978f4e913c10da208eac6d96d49df4beae469e81a4dd982102c52bc9643a021464a31a3bfa99cfa46afaa4b3acda31e025da204b4ee44cc07a2103a1c8edcc3310b3d7937e9e4179e7bd9cdf31c276f985f4eb356f21b874225eb153aeffffffff02b8ce05000000000017a9145c9c158430b7b79c3ad7ef9bdf981601eda2412d87b82400000000000017a9146bf3ff89019ecc5971a39cdd4f1cabd3b647ad5d8700000000')
assert '2caab5a11fa1ec0f5bb014b8858d00fecf2c001e15d22ad04379ad7b36fef305' == tx.txid()
# 2 inputs, one for each priv_key below
unsigned_tx = "0100000002f25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084000000002401ff2102faf7f10ccad1bc40e697e6b90b1d7c9daf92fdf47a4cf726f1c0422e4730fe85fefffffff146000000000000f25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084010000002401ff21030c4ee92cd3c174e9aabcdec56ddc6b6d09a7767b563055a10e5406ec48f477eafeffffff415901000000000001de9e0100000000001976a914428f0dbcc74fc3a999bbaf8bf4600531e155e66b88ac75c50800"
priv_keys = [PrivateKey.from_WIF(WIF) for WIF in (
"KzjWgFAozj8EfMFpeCBshWA69QXG7Kj7nMYHjSkkcTM8DM8GF1Hd",
"KyY5VaoqPwjSgGpKHT3JJKDcxXMeqYo6umK7u1h3iBt9n9aihiPs",
)]
# First priv key only signed
signed_tx_1 = "0100000002f25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084000000002401ff2102faf7f10ccad1bc40e697e6b90b1d7c9daf92fdf47a4cf726f1c0422e4730fe85fefffffff146000000000000f25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084010000006b483045022100fa8ebdc7cefc407fd1b560fb2e2e5e96e900e94634d96df4fd284126048746a2022028d91ca132a1a386a67df69a2c5ba216218870c256c163d729f1575f7a8824f54121030c4ee92cd3c174e9aabcdec56ddc6b6d09a7767b563055a10e5406ec48f477eafeffffff01de9e0100000000001976a914428f0dbcc74fc3a999bbaf8bf4600531e155e66b88ac75c50800"
# Second priv key only signed
signed_tx_2 = "0100000002f25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084000000006b4830450221008dc02fa531a9a704f5c01abdeb58930514651565b42abf94f6ad1565d0ad6785022027b1396f772c696629a4a09b01aed2416861aeaee05d0ff4a2e6fdfde73ec84d412102faf7f10ccad1bc40e697e6b90b1d7c9daf92fdf47a4cf726f1c0422e4730fe85fefffffff25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084010000002401ff21030c4ee92cd3c174e9aabcdec56ddc6b6d09a7767b563055a10e5406ec48f477eafeffffff415901000000000001de9e0100000000001976a914428f0dbcc74fc3a999bbaf8bf4600531e155e66b88ac75c50800"
# Both priv keys signed
signed_tx_3 = "0100000002f25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084000000006b4830450221008dc02fa531a9a704f5c01abdeb58930514651565b42abf94f6ad1565d0ad6785022027b1396f772c696629a4a09b01aed2416861aeaee05d0ff4a2e6fdfde73ec84d412102faf7f10ccad1bc40e697e6b90b1d7c9daf92fdf47a4cf726f1c0422e4730fe85fefffffff25568d10d46181bc65b01b735f8cccdb91e4e7d172c5efb984b839d1c912084010000006b483045022100fa8ebdc7cefc407fd1b560fb2e2e5e96e900e94634d96df4fd284126048746a2022028d91ca132a1a386a67df69a2c5ba216218870c256c163d729f1575f7a8824f54121030c4ee92cd3c174e9aabcdec56ddc6b6d09a7767b563055a10e5406ec48f477eafeffffff01de9e0100000000001976a914428f0dbcc74fc3a999bbaf8bf4600531e155e66b88ac75c50800"
class TestTransaction2:
def sign_tx(self, unsigned_tx_hex, priv_keys):
keypairs = {XPublicKey(priv_key.public_key.to_hex()):
(priv_key.to_bytes(), priv_key.is_compressed())
for priv_key in priv_keys}
tx = Transaction.from_hex(unsigned_tx_hex)
tx.sign(keypairs)
return tx
def test_sign_tx_1(self):
# Test signing the first input only
tx = self.sign_tx(unsigned_tx, [priv_keys[0]])
assert tx.to_hex() == signed_tx_1
assert not tx.is_complete()
def test_sign_tx_2(self):
# Test signing the second input only
tx = self.sign_tx(unsigned_tx, [priv_keys[1]])
assert tx.to_hex() == signed_tx_2
assert not tx.is_complete()
def test_sign_tx_3(self):
# Test signing both
tx = self.sign_tx(unsigned_tx, priv_keys)
assert tx.to_hex() == signed_tx_3
assert tx.is_complete()
assert tx.txid() == "b83acf939a92c420d0cb8d45d5d4dfad4e90369ebce0f49a45808dc1b41259b0"
def test_update_signatures(self):
signed_tx = Tx.from_hex(signed_tx_3)
sigs = [next(input.script_sig.ops())[:-1] for input in signed_tx.inputs]
tx = Transaction.from_hex(unsigned_tx)
tx.update_signatures(sigs)
assert tx.is_complete()
assert tx.txid() == "b83acf939a92c420d0cb8d45d5d4dfad4e90369ebce0f49a45808dc1b41259b0"
def multisig_keystores(self):
seed = 'ee6ea9eceaf649640051a4c305ac5c59'
keystore1 = Old_KeyStore.from_seed(seed)
xprv = ('xprv9s21ZrQH143K4XLpSd2berkCzJTXDv68rusDQFiQGSqa1ZmVXnYzYpTQ9'
'qYiSB7mHvg6kEsrd2ZtnHRJ61sZhSN4jZ2T8wxA4T75BE4QQZ1')
xpub = ('xpub661MyMwAqRbcH1RHYeZc1zgwYLJ1dNozE8npCe81pnNYtN6e5KsF6cmt17Fv8w'
'GvJrRiv6Kewm8ggBG6N3XajhoioH3stUmLRi53tk46CiA')
keystore2 = BIP32_KeyStore({'xprv': xprv, 'xpub': xpub})
return [keystore1, keystore2]
@pytest.mark.parametrize("unsigned_hex, signed1_hex, fully_signed_hex, signed2_hex", (
(
# Here the x_pubkeys are naturally sorted
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000a50001ff01ff4c9e524c53ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdbb213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa120000000045fe84717a26df3332b129e59faaab25c11752277bc55c07d8724e1660e63b862d00b41d3db01e29ed54ca83300eb73d82b5381536298f40fdad8c1e307b66cf39a90000000052aeffffffffb4140000000000000188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000eb0001ff473044022100a9b906ec7fd40b8063326675d5f229d36227241dc84f262b203b3eaadfd91789021f267473437145d77c69273ffef2426055c6c89457832c3d38fcb3c07eb8c391414c9e524c53ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdbb213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa120000000045fe84717a26df3332b129e59faaab25c11752277bc55c07d8724e1660e63b862d00b41d3db01e29ed54ca83300eb73d82b5381536298f40fdad8c1e307b66cf39a90000000052aeffffffffb4140000000000000188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000fb00483045022100bc32a5f10b755dcd8dc9a498d76286f059993d1d72fbc5340d0da9dc99dcad0a022064e37760d9ad3e3b9f0b48263becca8dee5aac43bdcecfdfdc63553057083a8c41473044022100a9b906ec7fd40b8063326675d5f229d36227241dc84f262b203b3eaadfd91789021f267473437145d77c69273ffef2426055c6c89457832c3d38fcb3c07eb8c391414c675221020c8bd7a0cfa64714b8f01316cd46197b902565f2c812ed0d450fcd1425edc9e8410472cd64a288e4a518059b388a9164522e05c3f3aef3f6791f31074af734510054bfde0bee54dbefa0eebe71a53d18298c628842b1865e2e0bc053bb4197af726e52aeffffffff0188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000ec00483045022100bc32a5f10b755dcd8dc9a498d76286f059993d1d72fbc5340d0da9dc99dcad0a022064e37760d9ad3e3b9f0b48263becca8dee5aac43bdcecfdfdc63553057083a8c4101ff4c9e524c53ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdbb213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa120000000045fe84717a26df3332b129e59faaab25c11752277bc55c07d8724e1660e63b862d00b41d3db01e29ed54ca83300eb73d82b5381536298f40fdad8c1e307b66cf39a90000000052aeffffffffb4140000000000000188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
),
(
# Here the x_pubkeys are reverse-sorted. They should not be switched when signing.
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000a50001ff01ff4c9e5245fe84717a26df3332b129e59faaab25c11752277bc55c07d8724e1660e63b862d00b41d3db01e29ed54ca83300eb73d82b5381536298f40fdad8c1e307b66cf39a9000000004c53ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdbb213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa120000000052aeffffffffb4140000000000000188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000eb0047304402207a923d1b0ca9930cfb2162f1e85dc5feb6e9322efcceeaba7a91ad37f72b815702207ed90ebab7c8bbf728d29c2d93931bb44ff1a7147b37982c1d27c822c139079e4101ff4c9e5245fe84717a26df3332b129e59faaab25c11752277bc55c07d8724e1660e63b862d00b41d3db01e29ed54ca83300eb73d82b5381536298f40fdad8c1e307b66cf39a9000000004c53ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdbb213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa120000000052aeffffffffb4140000000000000188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000fb0047304402207a923d1b0ca9930cfb2162f1e85dc5feb6e9322efcceeaba7a91ad37f72b815702207ed90ebab7c8bbf728d29c2d93931bb44ff1a7147b37982c1d27c822c139079e41483045022100ae42f172f722ac2392ef3e5958d78bbca1ebedbce47eff27ba66345be781c46f02207c9ab6ff496791bf2e56300ff4621beaec6ccdd3639e460612569c6e0407e09a414c6752410472cd64a288e4a518059b388a9164522e05c3f3aef3f6791f31074af734510054bfde0bee54dbefa0eebe71a53d18298c628842b1865e2e0bc053bb4197af726e21020c8bd7a0cfa64714b8f01316cd46197b902565f2c812ed0d450fcd1425edc9e852aeffffffff0188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
'010000000111111111111111111111111111111111111111111111111111111111111111111b000000ec0001ff483045022100ae42f172f722ac2392ef3e5958d78bbca1ebedbce47eff27ba66345be781c46f02207c9ab6ff496791bf2e56300ff4621beaec6ccdd3639e460612569c6e0407e09a414c9e5245fe84717a26df3332b129e59faaab25c11752277bc55c07d8724e1660e63b862d00b41d3db01e29ed54ca83300eb73d82b5381536298f40fdad8c1e307b66cf39a9000000004c53ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdbb213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa120000000052aeffffffffb4140000000000000188130000000000001976a914000000000000000000000000000000000000000088ac00000000',
)
))
def test_multisig(self, unsigned_hex, signed1_hex, fully_signed_hex, signed2_hex):
tx = Transaction.from_hex(unsigned_hex)
keystore1, keystore2 = self.multisig_keystores()
# Check serializes the same
assert tx.serialize() == unsigned_hex
# Sign with keystore 1, then 2
keystore1.sign_transaction(tx, None)
result_hex = tx.serialize()
assert tx.serialize() == signed1_hex
keystore2.sign_transaction(tx, None)
assert tx.serialize() == fully_signed_hex
# Sign with keystore 2, then 1
tx = Transaction.from_hex(unsigned_hex)
keystore2.sign_transaction(tx, None)
assert tx.serialize() == signed2_hex
keystore1.sign_transaction(tx, None)
assert tx.serialize() == fully_signed_hex
class TestXPublicKey:
def test_bad_type(self):
public_key = PublicKey.from_hex(
'034339a901d8526c4d733c8ea7c861f1a6324f37f6b86f838725820e0c5fc19570')
with pytest.raises(TypeError):
XPublicKey(public_key)
def test_bad_key(self):
with pytest.raises(ValueError):
XPublicKey('034339a901d8526c4d733c8ea7c861f1a6324f37f6b86f838725820e0c5fc1957000')
with pytest.raises(ValueError):
XPublicKey(
'ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdb'
'b213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa1201000a'
)
@pytest.mark.parametrize("raw_hex", (
# An uncompressed 04 key
'046d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e'
'2487e6222a6664e079c8edf7518defd562dbeda1e7593dfd7f0be285880a24dab',
# A compressed 03 key
'034339a901d8526c4d733c8ea7c861f1a6324f37f6b86f838725820e0c5fc19570',
# A compressed 02 key
'026370246118a7c218fd557496ebb2b0862d59c6486e88f83e07fd12ce8a88fb00',
))
def test_raw_public_keys(self, raw_hex, coin):
public_key = PublicKey.from_hex(raw_hex)
x_pubkey = XPublicKey(raw_hex)
assert x_pubkey.to_bytes() == bytes.fromhex(raw_hex)
assert x_pubkey.to_hex() == raw_hex
assert not x_pubkey.is_bip32_key()
assert x_pubkey.to_public_key() == public_key
assert x_pubkey.to_address() == public_key.to_address(coin=coin)
assert x_pubkey.to_address().coin() is coin
@pytest.mark.parametrize("raw_hex, path", (
(
'ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdb'
'b213d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa1201000a00',
[1, 10],
),
(
'ff0488b21e000000000000000000f79d7a4d3ea07099f09fbf35c3103908cbb4b1f30e8602a06ffbdbb2'
'13d0025602e9aa22cc7106abab85e4c41f18f030c370213769c18d6754f3d0584e69a7fa1200001900',
[0, 25],
),
))
def test_bip32_extended_keys(self, raw_hex, path, coin):
# see test_keystore.py
xpub = ('xpub661MyMwAqRbcH1RHYeZc1zgwYLJ1dNozE8npCe81pnNYtN6e5KsF6cmt17Fv8w'
'GvJrRiv6Kewm8ggBG6N3XajhoioH3stUmLRi53tk46CiA')
root_key = bip32_key_from_string(xpub)
True_10_public_key = root_key.child(path[0]).child(path[1])
x_pubkey = XPublicKey(bytes.fromhex(raw_hex))
assert x_pubkey.to_bytes() == bytes.fromhex(raw_hex)
assert x_pubkey.to_hex() == raw_hex
assert x_pubkey.is_bip32_key()
assert x_pubkey.bip32_extended_key_and_path() == (xpub, path)
assert x_pubkey.to_public_key() == True_10_public_key
assert x_pubkey.to_address() == True_10_public_key.to_address(coin=coin)
assert x_pubkey.to_address().coin() is coin
@pytest.mark.parametrize("raw_hex, public_key_hex", (
('fee9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09d'
'5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b301000a00',
'044794e135aa6d397222b4395091e53557f0e1ab9ffc0358303de6b9800642a9f544c3'
'f8d2ece93e25864f19f44279661c16aaa8e85eea9ea1c8c1fcf1c61fcae0'
),
('fee9d4b7866dd1e91c862aebf62a49548c7dbf7bcc6e4b7b8c9da820c7737968df9c09'
'd5a3e271dc814a29981f81b3faaf2737b551ef5dcc6189cf0f8252c442b300000500',
'04935970bd7c9e51bfe8e1135bb89a8ce09f8876d60d81ba4432f5e6fa394e6d09c9b'
'a78f8d87aa7c519892a6adb5e7b39702379411dd7ba49f324f8c7e4e51f17'
),
))
def test_old_keystore(self, raw_hex, public_key_hex, coin):
public_key = PublicKey.from_hex(public_key_hex)
assert public_key.is_compressed() is False
x_pubkey = XPublicKey(raw_hex)
assert x_pubkey.to_bytes() == bytes.fromhex(raw_hex)
assert x_pubkey.to_hex() == raw_hex
assert not x_pubkey.is_bip32_key()
assert x_pubkey.to_public_key() == public_key
assert x_pubkey.to_public_key().is_compressed() is False
assert x_pubkey.to_address() == public_key.to_address(coin=coin)
assert x_pubkey.to_address().coin() is coin
@pytest.mark.parametrize("raw_hex, address", (
('fd76a9140d58656ec279ed001c58c7aabc64193f07414ff388ac',
'12DZgNWoB5c2PW74y7vQm9SACjxyoKCjtQ'
),
('fda91468916676bce64b1e02c3bffa3c75ca0449c8355e87',
'3BDvP5dDhJHpdZs393GUUnRiNwCj3C3GF4'
),
))
def test_addresses(self, raw_hex, address):
coin = Bitcoin
address = Address.from_string(address, coin)
address._coin = coin
x_pubkey = XPublicKey(raw_hex)
assert x_pubkey.to_bytes() == bytes.fromhex(raw_hex)
assert x_pubkey.to_hex() == raw_hex
assert not x_pubkey.is_bip32_key()
assert x_pubkey.to_public_key() == address
assert x_pubkey.to_address() == address
assert x_pubkey.to_address().coin() is coin
def test_fd_read_write(self):
tx_hex = (
'0100000001de8ead15a3044065ed8274b79af5fe7f860f5a026c241e9dd93dd3ce26208aeb010000001'
'd01ff1afd76a9148c16fd67cdf85cdd2b7686081152424159c3eb3388acfeffffffb06eb70000000000'
'01f06db700000000001976a9148c16fd67cdf85cdd2b7686081152424159c3eb3388ac7ce40800'
)
tx = Transaction.from_hex(tx_hex)
assert tx.serialize() == tx_hex
| 77.802985 | 777 | 0.843846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.