hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f730e75319233d0ac8ad0b52753232168804b1d8 | 13,463 | py | Python | pyinduct/parabolic/general.py | pyinduct/pyinduct | f743ecfcee3b940505ec95b48fa07bb1648cfcc1 | [
"BSD-3-Clause"
] | 1 | 2017-01-26T14:26:55.000Z | 2017-01-26T14:26:55.000Z | pyinduct/parabolic/general.py | pyinduct/pyinduct | f743ecfcee3b940505ec95b48fa07bb1648cfcc1 | [
"BSD-3-Clause"
] | 89 | 2017-01-31T12:52:18.000Z | 2020-12-14T23:31:53.000Z | pyinduct/parabolic/general.py | pyinduct/pyinduct | f743ecfcee3b940505ec95b48fa07bb1648cfcc1 | [
"BSD-3-Clause"
] | 5 | 2017-01-26T14:35:41.000Z | 2018-10-23T06:52:32.000Z | from collections.abc import Callable
import warnings
import numpy as np
from scipy.optimize import fsolve
from ..core import Function, find_roots, ConstantFunction
from ..eigenfunctions import SecondOrderOperator
from ..placeholder import (ScalarFunction, TestFunction, FieldVariable, ScalarTerm,
IntegralTerm, Input, Product)
from ..simulation import WeakFormulation
__all__ = ["compute_rad_robin_eigenfrequencies", "eliminate_advection_term", "get_parabolic_dirichlet_weak_form",
"get_parabolic_robin_weak_form", "get_in_domain_transformation_matrix"]
def compute_rad_robin_eigenfrequencies(param, l, n_roots=10, show_plot=False):
r"""
Return the first :code:`n_roots` eigenfrequencies :math:`\omega`
(and eigenvalues :math:`\lambda`)
.. math:: \omega = \sqrt{-\frac{a_1^2}{4a_2^2}+\frac{a_0-\lambda}{a_2}}
to the eigenvalue problem
.. math::
a_2\varphi''(z) + a_1&\varphi'(z) + a_0\varphi(z) = \lambda\varphi(z) \\
\varphi'(0) &= \alpha\varphi(0) \\
\varphi'(l) &= -\beta\varphi(l).
Args:
param (array_like): :math:`\Big( a_2, a_1, a_0, \alpha, \beta \Big)^T`
l (numbers.Number): Right boundary value of the domain
:math:`[0,l]\ni z`.
n_roots (int): Amount of eigenfrequencies to be compute.
show_plot (bool): A plot window of the characteristic equation appears
if it is :code:`True`.
Return:
tuple --> two numpy.ndarrays of length :code:`nroots`:
.. math:: \Big(\big[\omega_1,...,\omega_\text{n\_roots}\Big],
\Big[\lambda_1,...,\lambda_\text{n\_roots}\big]\Big)
"""
a2, a1, a0, alpha, beta = param
eta = -a1 / 2. / a2
def characteristic_equation(om):
if np.round(om, 200) != 0.:
zero = (alpha + beta) * np.cos(om * l) + ((eta + beta) * (alpha - eta) / om - om) * np.sin(om * l)
else:
zero = (alpha + beta) * np.cos(om * l) + (eta + beta) * (alpha - eta) * l - om * np.sin(om * l)
return zero
def complex_characteristic_equation(om):
if np.round(om, 200) != 0.:
zero = (alpha + beta) * np.cosh(om * l) + ((eta + beta) * (alpha - eta) / om + om) * np.sinh(om * l)
else:
zero = (alpha + beta) * np.cosh(om * l) + (eta + beta) * (alpha - eta) * l + om * np.sinh(om * l)
return zero
# assume 1 root per pi/l (safety factor = 3)
om_end = 3 * n_roots * np.pi / l
start_values = np.arange(0, om_end, .1)
om = find_roots(characteristic_equation,
start_values,
2 * n_roots,
rtol=l*1e-6).tolist()
# delete all around om = 0
om.reverse()
for i in range(np.sum(np.array(om) < np.pi / l / 2e1)):
om.pop()
om.reverse()
# if om = 0 is a root then add 0 to the list
zero_limit = alpha + beta + (eta + beta) * (alpha - eta) * l
if np.round(zero_limit, 6 + int(np.log10(l))) == 0.:
om.insert(0, 0.)
# regard complex roots
om_squared = np.power(om, 2).tolist()
complex_root = fsolve(complex_characteristic_equation, om_end)
if np.round(complex_root, 6 + int(np.log10(l))) != 0.:
om_squared.insert(0, -complex_root[0] ** 2)
# basically complex eigenfrequencies
om = np.sqrt(np.array(om_squared).astype(complex))
if len(om) < n_roots:
raise ValueError("RadRobinEigenvalues.compute_eigen_frequencies()"
"can not find enough roots")
eig_frequencies = om[:n_roots]
eig_values = a0 - a2 * eig_frequencies ** 2 - a1 ** 2 / 4. / a2
return eig_frequencies, eig_values
def eliminate_advection_term(param, domain_end):
r"""
This method performs a transformation
.. math:: \tilde x(z,t)=x(z,t)
e^{\int_0^z \frac{a_1(\bar z)}{2 a_2}\,d\bar z} ,
on the system, which eliminates the advection term :math:`a_1 x(z,t)` from a
reaction-advection-diffusion equation of the type:
.. math:: \dot x(z,t) = a_2 x''(z,t) + a_1(z) x'(z,t) + a_0(z) x(z,t) .
The boundary can be given by robin
.. math:: x'(0,t) = \alpha x(0,t), \quad x'(l,t) = -\beta x(l,t) ,
dirichlet
.. math:: x(0,t) = 0, \quad x(l,t) = 0
or mixed boundary conditions.
Args:
param (array_like): :math:`\Big( a_2, a_1, a_0, \alpha, \beta \Big)^T`
domain_end (float): upper bound of the spatial domain
Raises:
TypeError: If :math:`a_1(z)` is callable but no derivative handle is
defined for it.
Return:
SecondOrderOperator or tuple:
Parameters
.. math:: \big(a_2, \tilde a_1=0, \tilde a_0(z),
\tilde \alpha, \tilde \beta \big) for
the transformed system
.. math:: \dot{\tilde{x}}(z,t) = a_2 \tilde x''(z,t) +
\tilde a_0(z) \tilde x(z,t)
and the corresponding boundary conditions (:math:`\alpha` and/or
:math:`\beta` set to None by dirichlet boundary condition).
"""
# TODO remove this compatibility wrapper and promote use of new Operator
# class over the entire toolbox.
if isinstance(param, SecondOrderOperator):
a2 = param.a2
a1 = param.a1
a0 = param.a0
alpha = -param.alpha0
beta = param.beta0
else:
if not isinstance(param, (tuple, list)) or not len(param) == 5:
raise TypeError("pyinduct.utils.transform_2_intermediate(): "
"argument param must from type tuple or list")
a2, a1, a0, alpha, beta = param
if isinstance(a1, Function):
if not isinstance(a0, Callable):
a0_z = ConstantFunction(a0)
else:
a0_z = a0
def a0_n(z):
return a0_z(z) - a1(z) ** 2 / 4 / a2 - a1.derive(1)(z) / 2
else:
a0_n = a0 - a1 ** 2 / 4 / a2
if alpha is None:
alpha_n = None
elif isinstance(a1, Callable):
alpha_n = a1(0) / 2. / a2 + alpha
else:
alpha_n = a1 / 2. / a2 + alpha
if beta is None:
beta_n = None
elif isinstance(a1, Function):
beta_n = -a1(domain_end) / 2. / a2 + beta
else:
beta_n = -a1 / 2. / a2 + beta
a2_n = a2
a1_n = 0
# TODO see above.
if isinstance(param, SecondOrderOperator):
return SecondOrderOperator(a2=a2_n, a1=0, a0=a0_n,
alpha1=param.beta1, alpha0=-alpha_n,
beta1=param.beta1, beta0=beta_n)
else:
return a2_n, a1_n, a0_n, alpha_n, beta_n
def get_parabolic_dirichlet_weak_form(init_func_label,
test_func_label,
input_handle,
param,
spatial_domain):
"""
Return the weak formulation of a parabolic 2nd order system, using an
inhomogeneous dirichlet boundary at both sides.
Args:
init_func_label(str): Label of shape base to use.
test_func_label(str): Label of test base to use.
input_handle(:py:class:`.SimulationInput`): Input.
param(tuple): Parameters of the spatial operator.
spatial_domain(tuple): Spatial domain of the problem.
# spatial_domain(:py:class:`.Domain`): Spatial domain of the
# problem.
Returns:
:py:class:`.WeakFormulation`: Weak form of the system.
"""
a2, a1, a0, alpha, beta = param
l = spatial_domain[1]
x = FieldVariable(init_func_label)
x_dt = x.derive(temp_order=1)
x_dz = x.derive(spat_order=1)
x_ddz = x.derive(spat_order=2)
psi = TestFunction(test_func_label)
psi_dz = psi.derive(1)
psi_ddz = psi.derive(2)
# integral terms
int1 = IntegralTerm(Product(x_dt, psi), spatial_domain)
int2 = IntegralTerm(Product(x, psi_ddz), spatial_domain, -a2)
int2h = IntegralTerm(Product(x_ddz, psi), spatial_domain, -a2)
int3 = IntegralTerm(Product(x, psi_dz), spatial_domain, a1)
int4 = IntegralTerm(Product(x, psi), spatial_domain, -a0)
if input_handle is None:
# homogeneous case
return WeakFormulation([int1, int2h, int3, int4],
name="parabolic_dirichlet_hom")
# scalar terms
s1 = ScalarTerm(Product(Input(input_handle), psi_dz(l)), a2)
s2 = ScalarTerm(Product(Input(input_handle), psi(l)), -a1)
s3 = ScalarTerm(Product(x_dz(l), psi(l)), -a2)
s4 = ScalarTerm(Product(x_dz(0), psi(0)), a2)
return WeakFormulation([int1, int2, int3, int4, s1, s2, s3, s4],
name="parabolic_dirichlet")
def get_parabolic_robin_weak_form(shape_base_label, test_base_label,
input_handle, param, spatial_domain,
actuation_type_point=None):
r"""
Provide the weak formulation for the diffusion system with advection term,
reaction term, robin boundary condition and robin actuation.
.. math::
:nowrap:
\begin{align*}
\dot x(z,t) &= a_2 x''(z,t) + a_1(z) x'(z,t) + a_0(z) x(z,t),
&& z\in (0, l) \\
x'(0,t) &= \alpha x(0,t) \\
x'(l,t) &= -\beta x(l,t) + u(t)
\end{align*}
Args:
shape_base_label (str): State space base label
test_base_label (str): Test base label
input_handle (:py:class:`.SimulationInput`): System input
param (array-like): List of parameters:
- :math:`a_2` (numbers.Number) ~ diffusion coefficient
- :math:`a_1(z)` (callable) ~ advection coefficient
- :math:`a_0(z)` (callable) ~ reaction coefficient
- :math:`\alpha, \beta` (numbers.Number) ~ constants for robin
boundary conditions
spatial_domain (tuple): Limits of the spatial domain :math:`(0,l) \ni z`
actuation_type_point (numbers.number): Here you can shift the point of
actuation from :math:`z=l` to a other point in the spatial domain.
Returns:
tuple:
- :py:class:`.WeakFormulation`
- strings for the created base lables for the advection and reaction
coefficient
"""
if actuation_type_point is None:
actuation_type_point = spatial_domain[1]
a2, a1, a0, alpha, beta = param
l = spatial_domain[1]
# init ScalarFunction for a1 and a0 to handle spatially varying coefficients
created_base_labels = (shape_base_label + "a0_z", shape_base_label + "a1_z")
a0_z = ScalarFunction.from_scalar(a0, created_base_labels[0])
a1_z = ScalarFunction.from_scalar(a1, created_base_labels[1])
x = FieldVariable(shape_base_label)
x_dt = x.derive(temp_order=1)
x_dz = x.derive(spat_order=1)
psi = TestFunction(test_base_label, order=0)
psi_dz = psi.derive(1)
# integral terms
int1 = IntegralTerm(Product(x_dt, psi), spatial_domain)
int2 = IntegralTerm(Product(x_dz, psi_dz), spatial_domain, a2)
int3 = IntegralTerm(Product(Product(x_dz, a1_z), psi), spatial_domain, -1)
int4 = IntegralTerm(Product(Product(x, a0_z), psi), spatial_domain, -1)
# scalar terms
s1 = ScalarTerm(Product(x(0), psi(0)), a2 * alpha)
s2 = ScalarTerm(Product(x(l), psi(l)), a2 * beta)
terms = [int1, int2, int3, int4, s1, s2]
# consider input if given
if input_handle is not None:
terms.append(ScalarTerm(
Product(Input(input_handle), psi(actuation_type_point)), -a2))
# derive state-space system
weak_form = WeakFormulation(
terms, name="parabolic_robin_{}_{}".format(param, shape_base_label))
return weak_form, created_base_labels
def get_in_domain_transformation_matrix(k1, k2, mode='n_plus_1'):
r"""
Returns the transformation matrix M.
M is one part of a transformation
.. math::
x = My + Ty
where x is the field variable of an interior point controlled parabolic
system and y is the field variable of an boundary controlled parabolic
system. T is a (Fredholm-) integral transformation (which can be
approximated with M).
Args:
k1:
k2:
mode: Available modes
- `n_plus_1`:
M.shape = :math:`(n+1,n+1), w = (w(0),...,w(n))^T, w \in {x,y}`
- `2n`:
M.shape = (2n,2n), :math:`w = (w(0),...,w(n),...,w(1))^T, w \in {x,y}`
Return:
numpy.array: Transformation matrix M.
"""
if not all(isinstance(i, (int, float)) for i in [k1, k2]):
raise TypeError("TypeErrorMessage")
if not all(i % 1 == 0 for i in [k1, k2]):
raise TypeError("TypeErrorMessage")
n = k1 + k2
if k1 + k2 != n or n < 2 or k1 < 0 or k2 < 0:
raise ValueError("The sum of two positive integers k1 and k2 must be n.")
if (k1 != 0 and k2 != 0) and n % 2 == 0:
warnings.warn("Transformation matrix M is not invertible.")
mod_diag = lambda n, k: np.diag(np.ones(n - np.abs(k)), k)
if mode == 'n_plus_1':
M = np.zeros((n + 1, n + 1))
if k2 < n:
M += mod_diag(n + 1, k2) + mod_diag(n + 1, -k2)
if k2 != 0:
M += np.fliplr(mod_diag(n + 1, n - k2) + mod_diag(n + 1, -n + k2))
elif mode == '2n':
M = mod_diag(2 * n, k2) + mod_diag(2 * n, -k2) + mod_diag(2 * n, n + k1) + mod_diag(2 * n, -n - k1)
else:
raise ValueError("String in variable 'mode' not understood.")
return M * 0.5
| 34.609254 | 113 | 0.587016 | from collections.abc import Callable
import warnings
import numpy as np
from scipy.optimize import fsolve
from ..core import Function, find_roots, ConstantFunction
from ..eigenfunctions import SecondOrderOperator
from ..placeholder import (ScalarFunction, TestFunction, FieldVariable, ScalarTerm,
IntegralTerm, Input, Product)
from ..simulation import WeakFormulation
__all__ = ["compute_rad_robin_eigenfrequencies", "eliminate_advection_term", "get_parabolic_dirichlet_weak_form",
"get_parabolic_robin_weak_form", "get_in_domain_transformation_matrix"]
def compute_rad_robin_eigenfrequencies(param, l, n_roots=10, show_plot=False):
a2, a1, a0, alpha, beta = param
eta = -a1 / 2. / a2
def characteristic_equation(om):
if np.round(om, 200) != 0.:
zero = (alpha + beta) * np.cos(om * l) + ((eta + beta) * (alpha - eta) / om - om) * np.sin(om * l)
else:
zero = (alpha + beta) * np.cos(om * l) + (eta + beta) * (alpha - eta) * l - om * np.sin(om * l)
return zero
def complex_characteristic_equation(om):
if np.round(om, 200) != 0.:
zero = (alpha + beta) * np.cosh(om * l) + ((eta + beta) * (alpha - eta) / om + om) * np.sinh(om * l)
else:
zero = (alpha + beta) * np.cosh(om * l) + (eta + beta) * (alpha - eta) * l + om * np.sinh(om * l)
return zero
om_end = 3 * n_roots * np.pi / l
start_values = np.arange(0, om_end, .1)
om = find_roots(characteristic_equation,
start_values,
2 * n_roots,
rtol=l*1e-6).tolist()
om.reverse()
for i in range(np.sum(np.array(om) < np.pi / l / 2e1)):
om.pop()
om.reverse()
zero_limit = alpha + beta + (eta + beta) * (alpha - eta) * l
if np.round(zero_limit, 6 + int(np.log10(l))) == 0.:
om.insert(0, 0.)
om_squared = np.power(om, 2).tolist()
complex_root = fsolve(complex_characteristic_equation, om_end)
if np.round(complex_root, 6 + int(np.log10(l))) != 0.:
om_squared.insert(0, -complex_root[0] ** 2)
om = np.sqrt(np.array(om_squared).astype(complex))
if len(om) < n_roots:
raise ValueError("RadRobinEigenvalues.compute_eigen_frequencies()"
"can not find enough roots")
eig_frequencies = om[:n_roots]
eig_values = a0 - a2 * eig_frequencies ** 2 - a1 ** 2 / 4. / a2
return eig_frequencies, eig_values
def eliminate_advection_term(param, domain_end):
if isinstance(param, SecondOrderOperator):
a2 = param.a2
a1 = param.a1
a0 = param.a0
alpha = -param.alpha0
beta = param.beta0
else:
if not isinstance(param, (tuple, list)) or not len(param) == 5:
raise TypeError("pyinduct.utils.transform_2_intermediate(): "
"argument param must from type tuple or list")
a2, a1, a0, alpha, beta = param
if isinstance(a1, Function):
if not isinstance(a0, Callable):
a0_z = ConstantFunction(a0)
else:
a0_z = a0
def a0_n(z):
return a0_z(z) - a1(z) ** 2 / 4 / a2 - a1.derive(1)(z) / 2
else:
a0_n = a0 - a1 ** 2 / 4 / a2
if alpha is None:
alpha_n = None
elif isinstance(a1, Callable):
alpha_n = a1(0) / 2. / a2 + alpha
else:
alpha_n = a1 / 2. / a2 + alpha
if beta is None:
beta_n = None
elif isinstance(a1, Function):
beta_n = -a1(domain_end) / 2. / a2 + beta
else:
beta_n = -a1 / 2. / a2 + beta
a2_n = a2
a1_n = 0
if isinstance(param, SecondOrderOperator):
return SecondOrderOperator(a2=a2_n, a1=0, a0=a0_n,
alpha1=param.beta1, alpha0=-alpha_n,
beta1=param.beta1, beta0=beta_n)
else:
return a2_n, a1_n, a0_n, alpha_n, beta_n
def get_parabolic_dirichlet_weak_form(init_func_label,
test_func_label,
input_handle,
param,
spatial_domain):
a2, a1, a0, alpha, beta = param
l = spatial_domain[1]
x = FieldVariable(init_func_label)
x_dt = x.derive(temp_order=1)
x_dz = x.derive(spat_order=1)
x_ddz = x.derive(spat_order=2)
psi = TestFunction(test_func_label)
psi_dz = psi.derive(1)
psi_ddz = psi.derive(2)
int1 = IntegralTerm(Product(x_dt, psi), spatial_domain)
int2 = IntegralTerm(Product(x, psi_ddz), spatial_domain, -a2)
int2h = IntegralTerm(Product(x_ddz, psi), spatial_domain, -a2)
int3 = IntegralTerm(Product(x, psi_dz), spatial_domain, a1)
int4 = IntegralTerm(Product(x, psi), spatial_domain, -a0)
if input_handle is None:
return WeakFormulation([int1, int2h, int3, int4],
name="parabolic_dirichlet_hom")
s1 = ScalarTerm(Product(Input(input_handle), psi_dz(l)), a2)
s2 = ScalarTerm(Product(Input(input_handle), psi(l)), -a1)
s3 = ScalarTerm(Product(x_dz(l), psi(l)), -a2)
s4 = ScalarTerm(Product(x_dz(0), psi(0)), a2)
return WeakFormulation([int1, int2, int3, int4, s1, s2, s3, s4],
name="parabolic_dirichlet")
def get_parabolic_robin_weak_form(shape_base_label, test_base_label,
input_handle, param, spatial_domain,
actuation_type_point=None):
if actuation_type_point is None:
actuation_type_point = spatial_domain[1]
a2, a1, a0, alpha, beta = param
l = spatial_domain[1]
created_base_labels = (shape_base_label + "a0_z", shape_base_label + "a1_z")
a0_z = ScalarFunction.from_scalar(a0, created_base_labels[0])
a1_z = ScalarFunction.from_scalar(a1, created_base_labels[1])
x = FieldVariable(shape_base_label)
x_dt = x.derive(temp_order=1)
x_dz = x.derive(spat_order=1)
psi = TestFunction(test_base_label, order=0)
psi_dz = psi.derive(1)
int1 = IntegralTerm(Product(x_dt, psi), spatial_domain)
int2 = IntegralTerm(Product(x_dz, psi_dz), spatial_domain, a2)
int3 = IntegralTerm(Product(Product(x_dz, a1_z), psi), spatial_domain, -1)
int4 = IntegralTerm(Product(Product(x, a0_z), psi), spatial_domain, -1)
s1 = ScalarTerm(Product(x(0), psi(0)), a2 * alpha)
s2 = ScalarTerm(Product(x(l), psi(l)), a2 * beta)
terms = [int1, int2, int3, int4, s1, s2]
if input_handle is not None:
terms.append(ScalarTerm(
Product(Input(input_handle), psi(actuation_type_point)), -a2))
weak_form = WeakFormulation(
terms, name="parabolic_robin_{}_{}".format(param, shape_base_label))
return weak_form, created_base_labels
def get_in_domain_transformation_matrix(k1, k2, mode='n_plus_1'):
if not all(isinstance(i, (int, float)) for i in [k1, k2]):
raise TypeError("TypeErrorMessage")
if not all(i % 1 == 0 for i in [k1, k2]):
raise TypeError("TypeErrorMessage")
n = k1 + k2
if k1 + k2 != n or n < 2 or k1 < 0 or k2 < 0:
raise ValueError("The sum of two positive integers k1 and k2 must be n.")
if (k1 != 0 and k2 != 0) and n % 2 == 0:
warnings.warn("Transformation matrix M is not invertible.")
mod_diag = lambda n, k: np.diag(np.ones(n - np.abs(k)), k)
if mode == 'n_plus_1':
M = np.zeros((n + 1, n + 1))
if k2 < n:
M += mod_diag(n + 1, k2) + mod_diag(n + 1, -k2)
if k2 != 0:
M += np.fliplr(mod_diag(n + 1, n - k2) + mod_diag(n + 1, -n + k2))
elif mode == '2n':
M = mod_diag(2 * n, k2) + mod_diag(2 * n, -k2) + mod_diag(2 * n, n + k1) + mod_diag(2 * n, -n - k1)
else:
raise ValueError("String in variable 'mode' not understood.")
return M * 0.5
| true | true |
f730e7ff83b04baa1e0997a21573175be97d9f24 | 13,185 | py | Python | account/migrations/0011_initial.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | account/migrations/0011_initial.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | account/migrations/0011_initial.py | Amechi101/indieapp | 606c1346f65c343eb2cc8f7fba9d555b8c30a7fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Account'
db.create_table(u'account_account', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name=u'account', unique=True, to=orm['auth.User'])),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('birthday', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)),
('timezone', self.gf('account.fields.TimeZoneField')(default=u'', max_length=100, blank=True)),
('language', self.gf('django.db.models.fields.CharField')(default='en-us', max_length=10)),
))
db.send_create_signal(u'account', ['Account'])
# Adding model 'SignupCode'
db.create_table(u'account_signupcode', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('max_uses', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('expiry', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('inviter', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('sent', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('use_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'account', ['SignupCode'])
# Adding model 'SignupCodeResult'
db.create_table(u'account_signupcoderesult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('signup_code', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.SignupCode'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'account', ['SignupCodeResult'])
# Adding model 'EmailAddress'
db.create_table(u'account_emailaddress', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)),
('verified', self.gf('django.db.models.fields.BooleanField')(default=False)),
('primary', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'account', ['EmailAddress'])
# Adding model 'EmailConfirmation'
db.create_table(u'account_emailconfirmation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.EmailAddress'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 1, 26, 0, 0))),
('sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
))
db.send_create_signal(u'account', ['EmailConfirmation'])
# Adding model 'AccountDeletion'
db.create_table(u'account_accountdeletion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('date_requested', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_expunged', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'account', ['AccountDeletion'])
def backwards(self, orm):
# Deleting model 'Account'
db.delete_table(u'account_account')
# Deleting model 'SignupCode'
db.delete_table(u'account_signupcode')
# Deleting model 'SignupCodeResult'
db.delete_table(u'account_signupcoderesult')
# Deleting model 'EmailAddress'
db.delete_table(u'account_emailaddress')
# Deleting model 'EmailConfirmation'
db.delete_table(u'account_emailconfirmation')
# Deleting model 'AccountDeletion'
db.delete_table(u'account_accountdeletion')
models = {
u'account.account': {
'Meta': {'object_name': 'Account'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en-us'", 'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'timezone': ('account.fields.TimeZoneField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'account'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'account.accountdeletion': {
'Meta': {'object_name': 'AccountDeletion'},
'date_expunged': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_requested': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'account.emailaddress': {
'Meta': {'object_name': 'EmailAddress'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'account.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 26, 0, 0)'}),
'email_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.EmailAddress']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'account.signupcode': {
'Meta': {'object_name': 'SignupCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'expiry': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'max_uses': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'use_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'account.signupcoderesult': {
'Meta': {'object_name': 'SignupCodeResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signup_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.SignupCode']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account'] | 68.316062 | 195 | 0.593174 |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table(u'account_account', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name=u'account', unique=True, to=orm['auth.User'])),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('birthday', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('state', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)),
('timezone', self.gf('account.fields.TimeZoneField')(default=u'', max_length=100, blank=True)),
('language', self.gf('django.db.models.fields.CharField')(default='en-us', max_length=10)),
))
db.send_create_signal(u'account', ['Account'])
db.create_table(u'account_signupcode', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('max_uses', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('expiry', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('inviter', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('sent', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('use_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'account', ['SignupCode'])
db.create_table(u'account_signupcoderesult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('signup_code', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.SignupCode'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'account', ['SignupCodeResult'])
db.create_table(u'account_emailaddress', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)),
('verified', self.gf('django.db.models.fields.BooleanField')(default=False)),
('primary', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'account', ['EmailAddress'])
db.create_table(u'account_emailconfirmation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.EmailAddress'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 1, 26, 0, 0))),
('sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
))
db.send_create_signal(u'account', ['EmailConfirmation'])
db.create_table(u'account_accountdeletion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('date_requested', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_expunged', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'account', ['AccountDeletion'])
def backwards(self, orm):
db.delete_table(u'account_account')
db.delete_table(u'account_signupcode')
db.delete_table(u'account_signupcoderesult')
db.delete_table(u'account_emailaddress')
db.delete_table(u'account_emailconfirmation')
db.delete_table(u'account_accountdeletion')
models = {
u'account.account': {
'Meta': {'object_name': 'Account'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en-us'", 'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'timezone': ('account.fields.TimeZoneField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'account'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'account.accountdeletion': {
'Meta': {'object_name': 'AccountDeletion'},
'date_expunged': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_requested': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'account.emailaddress': {
'Meta': {'object_name': 'EmailAddress'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'account.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 26, 0, 0)'}),
'email_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.EmailAddress']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'account.signupcode': {
'Meta': {'object_name': 'SignupCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'expiry': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'max_uses': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'use_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'account.signupcoderesult': {
'Meta': {'object_name': 'SignupCodeResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signup_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.SignupCode']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account'] | true | true |
f730e8f89e65c95d296e2cde955fa32203c26f03 | 11,449 | py | Python | litex/build/efinix/ifacewriter.py | andykitchen/litex | 85d6cb4b8df98730bd83b412dff6108df188bbb4 | [
"ADSL"
] | null | null | null | litex/build/efinix/ifacewriter.py | andykitchen/litex | 85d6cb4b8df98730bd83b412dff6108df188bbb4 | [
"ADSL"
] | null | null | null | litex/build/efinix/ifacewriter.py | andykitchen/litex | 85d6cb4b8df98730bd83b412dff6108df188bbb4 | [
"ADSL"
] | 1 | 2022-02-11T01:07:07.000Z | 2022-02-11T01:07:07.000Z | #
# This file is part of LiteX.
#
# Copyright (c) 2021 Franck Jullien <franck.jullien@collshade.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
import csv
import re
import datetime
from xml.dom import expatbuilder
import xml.etree.ElementTree as et
from litex.build import tools
namespaces = {
"efxpt" : "http://www.efinixinc.com/peri_design_db",
"xi" : "http://www.w3.org/2001/XInclude"
}
# Interface Writer Block ---------------------------------------------------------------------------
class InterfaceWriterBlock(dict):
def generate(self):
raise NotImplementedError # Must be overloaded
class InterfaceWriterXMLBlock(dict):
def generate(self):
raise NotImplementedError # Must be overloaded
# Interface Writer --------------------------------------------------------------------------------
class InterfaceWriter:
def __init__(self, efinity_path):
self.efinity_path = efinity_path
self.blocks = []
self.xml_blocks = []
self.filename = ""
self.platform = None
def set_build_params(self, platform, build_name):
self.filename = build_name
self.platform = platform
def generate_xml_blocks(self):
et.register_namespace("efxpt", "http://www.efinixinc.com/peri_design_db")
tree = et.parse(self.filename + ".peri.xml")
root = tree.getroot()
for block in self.xml_blocks:
if isinstance(block, InterfaceWriterXMLBlock):
block.generate(root, namespaces)
else:
if block["type"] == "LVDS":
self.add_lvds_xml(root, block)
if block["type"] == "DRAM":
self.add_dram_xml(root, block)
xml_string = et.tostring(root, "utf-8")
reparsed = expatbuilder.parseString(xml_string, False)
print_string = reparsed.toprettyxml(indent=" ")
# Remove lines with only whitespaces. Not sure why they are here
print_string = os.linesep.join([s for s in print_string.splitlines() if s.strip()])
tools.write_to_file("{}.peri.xml".format(self.filename), print_string)
def header(self, build_name, partnumber):
header = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
header += """
import os
import sys
import pprint
home = "{0}"
os.environ["EFXPT_HOME"] = home + "/pt"
os.environ["EFXPGM_HOME"] = home + "/pgm"
os.environ["EFXDBG_HOME"] = home + "/debugger"
os.environ["EFXIPM_HOME"] = home + "/ipm"
sys.path.append(home + "/pt/bin")
sys.path.append(home + "/lib/python3.8/site-packages")
from api_service.design import DesignAPI
from api_service.device import DeviceAPI
is_verbose = {1}
design = DesignAPI(is_verbose)
device = DeviceAPI(is_verbose)
design.create("{2}", "{3}", "./../gateware", overwrite=True)
"""
return header.format(self.efinity_path, "True", build_name, partnumber)
def get_block(self, name):
for b in self.blocks:
if b["name"] == name:
return b
return None
def generate_gpio(self, block, verbose=True):
name = block["name"]
mode = block["mode"]
cmd = ""
if mode == "INOUT":
if len(block["location"]) == 1:
cmd += f'design.create_inout_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}","{block["location"][0]}")\n'
else:
cmd += f'design.create_inout_gpio("{name}",{block["size"]-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
cmd += "\n"
return cmd
if mode == "INPUT":
if len(block["location"]) == 1:
cmd += f'design.create_input_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}","{block["location"][0]}")\n'
else:
cmd += f'design.create_input_gpio("{name}",{block["size"]-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
if "in_reg" in block:
cmd += f'design.set_property("{name}","IN_REG","{block["in_reg"]}")\n'
cmd += f'design.set_property("{name}","IN_CLK_PIN","{block["in_clk_pin"]}")\n'
return cmd
if mode == "OUTPUT":
if len(block["location"]) == 1:
cmd += 'design.create_output_gpio("{}")\n'.format(name)
cmd += 'design.assign_pkg_pin("{}","{}")\n'.format(name, block["location"][0])
else:
cmd += 'design.create_input_gpio("{}",{},0)\n'.format(name, block["size"]-1)
for i, pad in enumerate(block["location"]):
cmd += 'design.assign_pkg_pin("{}[{}]","{}")\n'.format(name, i, pad)
if "out_reg" in block:
cmd += 'design.set_property("{}","OUT_REG","{}")\n'.format(name, block["out_reg"])
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, block["out_clk_pin"])
if "drive_strength" in block:
cmd += 'design.set_property("{}","DRIVE_STRENGTH","4")\n'.format(name, block["drive_strength"])
cmd += "\n"
return cmd
if mode == "INPUT_CLK":
cmd += 'design.create_input_clock_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","IN_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
if mode == "OUTPUT_CLK":
cmd += 'design.create_clockout_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
cmd = "# TODO: " + str(block) +"\n"
return cmd
def generate_pll(self, block, partnumber, verbose=True):
name = block["name"]
cmd = "# ---------- PLL {} ---------\n".format(name)
cmd += 'design.create_block("{}", block_type="PLL")\n'.format(name)
cmd += 'pll_config = {{ "REFCLK_FREQ":"{}" }}\n'.format(block["input_freq"] / 1e6)
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
if block["input_clock"] == "EXTERNAL":
# PLL V1 has a different configuration
if partnumber[0:2] in ["T4", "T8"]:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_res="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock_pad"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_src="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_name="{}", refclk_src="CORE")\n'.format(name, block["resource"], block["input_signal"])
cmd += 'design.set_property("{}", "CORE_CLK_PIN", "{}", block_type="PLL")\n\n'.format(name, block["input_signal"])
cmd += 'design.set_property("{}","LOCKED_PIN","{}", block_type="PLL")\n'.format(name, block["locked"])
if block["rstn"] != "":
cmd += 'design.set_property("{}","RSTN_PIN","{}", block_type="PLL")\n\n'.format(name, block["rstn"])
# Output clock 0 is enabled by default
for i, clock in enumerate(block["clk_out"]):
if i > 0:
cmd += 'pll_config = {{ "CLKOUT{}_EN":"1", "CLKOUT{}_PIN":"{}" }}\n'.format(i, i, clock[0])
else:
cmd += 'pll_config = {{ "CLKOUT{}_PIN":"{}" }}\n'.format(i, clock[0])
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
for i, clock in enumerate(block["clk_out"]):
cmd += 'design.set_property("{}","CLKOUT{}_PHASE","{}","PLL")\n'.format(name, i, clock[2])
cmd += "target_freq = {\n"
for i, clock in enumerate(block["clk_out"]):
cmd += ' "CLKOUT{}_FREQ": "{}",\n'.format(i, clock[1] / 1e6)
cmd += "}\n"
cmd += 'calc_result = design.auto_calc_pll_clock("{}", target_freq)\n'.format(name)
if "extra" in block:
cmd += block["extra"]
cmd += "\n"
if verbose:
cmd += 'print("#### {} ####")\n'.format(name)
cmd += 'clksrc_info = design.trace_ref_clock("{}", block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(clksrc_info)\n'
cmd += 'clock_source_prop = ["REFCLK_SOURCE", "CORE_CLK_PIN", "EXT_CLK", "CLKOUT1_EN", "CLKOUT2_EN","REFCLK_FREQ", "RESOURCE"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_FREQ", "CLKOUT1_FREQ", "CLKOUT2_FREQ"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_PHASE", "CLKOUT1_PHASE", "CLKOUT2_PHASE"]\n'
cmd += 'prop_map = design.get_property("{}", clock_source_prop, block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(prop_map)\n'
cmd += "# ---------- END PLL {} ---------\n\n".format(name)
return cmd
def generate(self, partnumber):
output = ""
for block in self.blocks:
if isinstance(block, InterfaceWriterBlock):
output += block.generate()
else:
if block["type"] == "PLL":
output += self.generate_pll(block, partnumber)
if block["type"] == "GPIO":
output += self.generate_gpio(block)
return output
def footer(self):
return """
# Check design, generate constraints and reports
design.generate(enable_bitstream=True)
# Save the configured periphery design
design.save()"""
def add_lvds_xml(self, root, params):
lvds_info = root.find("efxpt:lvds_info", namespaces)
if params["mode"] == "OUTPUT":
dir = "tx"
mode = "out"
else:
dir = "rx"
mode = "in"
pad = self.platform.parser.get_gpio_instance_from_pin(params["location"][0])
pad = pad.replace("TXP", "TX")
pad = pad.replace("TXN", "TX")
pad = pad.replace("RXP", "RX")
pad = pad.replace("RXN", "RX")
# Sometimes there is an extra identifier at the end
# TODO: do a better parser
if pad.count("_") == 2:
pad = pad.rsplit("_", 1)[0]
lvds = et.SubElement(lvds_info, "efxpt:lvds",
name = params["name"],
lvds_def = pad,
ops_type = dir
)
et.SubElement(lvds, "efxpt:ltx_info",
pll_instance = "",
fast_clock_name = "{}".format(params["fast_clk"]),
slow_clock_name = "{}".format(params["slow_clk"]),
reset_name = "",
out_bname = "{}".format(params["name"]),
oe_name = "",
clock_div = "1",
mode = "{}".format(mode),
serialization = "{}".format(params["serialisation"]),
reduced_swing = "false",
load = "3"
)
| 40.17193 | 159 | 0.537427 |
import os
import csv
import re
import datetime
from xml.dom import expatbuilder
import xml.etree.ElementTree as et
from litex.build import tools
namespaces = {
"efxpt" : "http://www.efinixinc.com/peri_design_db",
"xi" : "http://www.w3.org/2001/XInclude"
}
class InterfaceWriterBlock(dict):
def generate(self):
raise NotImplementedError
class InterfaceWriterXMLBlock(dict):
def generate(self):
raise NotImplementedError
class InterfaceWriter:
def __init__(self, efinity_path):
self.efinity_path = efinity_path
self.blocks = []
self.xml_blocks = []
self.filename = ""
self.platform = None
def set_build_params(self, platform, build_name):
self.filename = build_name
self.platform = platform
def generate_xml_blocks(self):
et.register_namespace("efxpt", "http://www.efinixinc.com/peri_design_db")
tree = et.parse(self.filename + ".peri.xml")
root = tree.getroot()
for block in self.xml_blocks:
if isinstance(block, InterfaceWriterXMLBlock):
block.generate(root, namespaces)
else:
if block["type"] == "LVDS":
self.add_lvds_xml(root, block)
if block["type"] == "DRAM":
self.add_dram_xml(root, block)
xml_string = et.tostring(root, "utf-8")
reparsed = expatbuilder.parseString(xml_string, False)
print_string = reparsed.toprettyxml(indent=" ")
print_string = os.linesep.join([s for s in print_string.splitlines() if s.strip()])
tools.write_to_file("{}.peri.xml".format(self.filename), print_string)
def header(self, build_name, partnumber):
header = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
header += """
import os
import sys
import pprint
home = "{0}"
os.environ["EFXPT_HOME"] = home + "/pt"
os.environ["EFXPGM_HOME"] = home + "/pgm"
os.environ["EFXDBG_HOME"] = home + "/debugger"
os.environ["EFXIPM_HOME"] = home + "/ipm"
sys.path.append(home + "/pt/bin")
sys.path.append(home + "/lib/python3.8/site-packages")
from api_service.design import DesignAPI
from api_service.device import DeviceAPI
is_verbose = {1}
design = DesignAPI(is_verbose)
device = DeviceAPI(is_verbose)
design.create("{2}", "{3}", "./../gateware", overwrite=True)
"""
return header.format(self.efinity_path, "True", build_name, partnumber)
def get_block(self, name):
for b in self.blocks:
if b["name"] == name:
return b
return None
def generate_gpio(self, block, verbose=True):
name = block["name"]
mode = block["mode"]
cmd = ""
if mode == "INOUT":
if len(block["location"]) == 1:
cmd += f'design.create_inout_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}","{block["location"][0]}")\n'
else:
cmd += f'design.create_inout_gpio("{name}",{block["size"]-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
cmd += "\n"
return cmd
if mode == "INPUT":
if len(block["location"]) == 1:
cmd += f'design.create_input_gpio("{name}")\n'
cmd += f'design.assign_pkg_pin("{name}","{block["location"][0]}")\n'
else:
cmd += f'design.create_input_gpio("{name}",{block["size"]-1},0)\n'
for i, pad in enumerate(block["location"]):
cmd += f'design.assign_pkg_pin("{name}[{i}]","{pad}")\n'
if "in_reg" in block:
cmd += f'design.set_property("{name}","IN_REG","{block["in_reg"]}")\n'
cmd += f'design.set_property("{name}","IN_CLK_PIN","{block["in_clk_pin"]}")\n'
return cmd
if mode == "OUTPUT":
if len(block["location"]) == 1:
cmd += 'design.create_output_gpio("{}")\n'.format(name)
cmd += 'design.assign_pkg_pin("{}","{}")\n'.format(name, block["location"][0])
else:
cmd += 'design.create_input_gpio("{}",{},0)\n'.format(name, block["size"]-1)
for i, pad in enumerate(block["location"]):
cmd += 'design.assign_pkg_pin("{}[{}]","{}")\n'.format(name, i, pad)
if "out_reg" in block:
cmd += 'design.set_property("{}","OUT_REG","{}")\n'.format(name, block["out_reg"])
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, block["out_clk_pin"])
if "drive_strength" in block:
cmd += 'design.set_property("{}","DRIVE_STRENGTH","4")\n'.format(name, block["drive_strength"])
cmd += "\n"
return cmd
if mode == "INPUT_CLK":
cmd += 'design.create_input_clock_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","IN_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
if mode == "OUTPUT_CLK":
cmd += 'design.create_clockout_gpio("{}")\n'.format(name)
cmd += 'design.set_property("{}","OUT_CLK_PIN","{}")\n'.format(name, name)
cmd += 'design.assign_pkg_pin("{}","{}")\n\n'.format(name, block["location"])
return cmd
cmd = "# TODO: " + str(block) +"\n"
return cmd
def generate_pll(self, block, partnumber, verbose=True):
name = block["name"]
cmd = "# ---------- PLL {} ---------\n".format(name)
cmd += 'design.create_block("{}", block_type="PLL")\n'.format(name)
cmd += 'pll_config = {{ "REFCLK_FREQ":"{}" }}\n'.format(block["input_freq"] / 1e6)
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
if block["input_clock"] == "EXTERNAL":
if partnumber[0:2] in ["T4", "T8"]:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_res="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock_pad"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_src="{}", refclk_name="{}", ext_refclk_no="{}")\n\n' \
.format(name, block["resource"], block["input_clock"], block["input_clock_name"], block["clock_no"])
else:
cmd += 'design.gen_pll_ref_clock("{}", pll_res="{}", refclk_name="{}", refclk_src="CORE")\n'.format(name, block["resource"], block["input_signal"])
cmd += 'design.set_property("{}", "CORE_CLK_PIN", "{}", block_type="PLL")\n\n'.format(name, block["input_signal"])
cmd += 'design.set_property("{}","LOCKED_PIN","{}", block_type="PLL")\n'.format(name, block["locked"])
if block["rstn"] != "":
cmd += 'design.set_property("{}","RSTN_PIN","{}", block_type="PLL")\n\n'.format(name, block["rstn"])
for i, clock in enumerate(block["clk_out"]):
if i > 0:
cmd += 'pll_config = {{ "CLKOUT{}_EN":"1", "CLKOUT{}_PIN":"{}" }}\n'.format(i, i, clock[0])
else:
cmd += 'pll_config = {{ "CLKOUT{}_PIN":"{}" }}\n'.format(i, clock[0])
cmd += 'design.set_property("{}", pll_config, block_type="PLL")\n\n'.format(name)
for i, clock in enumerate(block["clk_out"]):
cmd += 'design.set_property("{}","CLKOUT{}_PHASE","{}","PLL")\n'.format(name, i, clock[2])
cmd += "target_freq = {\n"
for i, clock in enumerate(block["clk_out"]):
cmd += ' "CLKOUT{}_FREQ": "{}",\n'.format(i, clock[1] / 1e6)
cmd += "}\n"
cmd += 'calc_result = design.auto_calc_pll_clock("{}", target_freq)\n'.format(name)
if "extra" in block:
cmd += block["extra"]
cmd += "\n"
if verbose:
cmd += 'print("#### {} ####")\n'.format(name)
cmd += 'clksrc_info = design.trace_ref_clock("{}", block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(clksrc_info)\n'
cmd += 'clock_source_prop = ["REFCLK_SOURCE", "CORE_CLK_PIN", "EXT_CLK", "CLKOUT1_EN", "CLKOUT2_EN","REFCLK_FREQ", "RESOURCE"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_FREQ", "CLKOUT1_FREQ", "CLKOUT2_FREQ"]\n'
cmd += 'clock_source_prop += ["CLKOUT0_PHASE", "CLKOUT1_PHASE", "CLKOUT2_PHASE"]\n'
cmd += 'prop_map = design.get_property("{}", clock_source_prop, block_type="PLL")\n'.format(name)
cmd += 'pprint.pprint(prop_map)\n'
cmd += "# ---------- END PLL {} ---------\n\n".format(name)
return cmd
def generate(self, partnumber):
output = ""
for block in self.blocks:
if isinstance(block, InterfaceWriterBlock):
output += block.generate()
else:
if block["type"] == "PLL":
output += self.generate_pll(block, partnumber)
if block["type"] == "GPIO":
output += self.generate_gpio(block)
return output
def footer(self):
return """
# Check design, generate constraints and reports
design.generate(enable_bitstream=True)
# Save the configured periphery design
design.save()"""
def add_lvds_xml(self, root, params):
lvds_info = root.find("efxpt:lvds_info", namespaces)
if params["mode"] == "OUTPUT":
dir = "tx"
mode = "out"
else:
dir = "rx"
mode = "in"
pad = self.platform.parser.get_gpio_instance_from_pin(params["location"][0])
pad = pad.replace("TXP", "TX")
pad = pad.replace("TXN", "TX")
pad = pad.replace("RXP", "RX")
pad = pad.replace("RXN", "RX")
if pad.count("_") == 2:
pad = pad.rsplit("_", 1)[0]
lvds = et.SubElement(lvds_info, "efxpt:lvds",
name = params["name"],
lvds_def = pad,
ops_type = dir
)
et.SubElement(lvds, "efxpt:ltx_info",
pll_instance = "",
fast_clock_name = "{}".format(params["fast_clk"]),
slow_clock_name = "{}".format(params["slow_clk"]),
reset_name = "",
out_bname = "{}".format(params["name"]),
oe_name = "",
clock_div = "1",
mode = "{}".format(mode),
serialization = "{}".format(params["serialisation"]),
reduced_swing = "false",
load = "3"
)
| true | true |
f730ec2bfb3b16a954fec00652bacff3f553b828 | 4,007 | py | Python | totem/TotemModel.py | amonmillner/playfulportfolio | d848d9c87d28fd9930c50e44cd7c5f4cdd2db93d | [
"MIT"
] | 1 | 2019-04-22T18:50:52.000Z | 2019-04-22T18:50:52.000Z | totem/TotemModel.py | amonmillner/playfulportfolio | d848d9c87d28fd9930c50e44cd7c5f4cdd2db93d | [
"MIT"
] | null | null | null | totem/TotemModel.py | amonmillner/playfulportfolio | d848d9c87d28fd9930c50e44cd7c5f4cdd2db93d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created April 2019
@author: Amon Millner
This is a module that contains a class that serves as a model
for the totem game built, which is an example of the
Model-View-Controller (MVC) framework.
"""
import pygame, copy
from pygame.locals import *
import random
class TotemModel(object):
""" Encodes a model of the game state """
def __init__(self, size=(640,480),number_of_faces=0):
self.width, self.height = size
self.level = 1
self.foundation = {}
self.direction = 'left'
self.reset_game = 0
self.won_game = 0
self.new_game = 0
self.number_of_faces = number_of_faces
self.face_index = random.randint(0, number_of_faces)
self.face = Face(self.width, self.height, self.face_index)
def addFaceToFoundation(self):
"""Puts a face in the game area on the current level where
the user pressed the space key. Future rows will check the location
of faces in the foundation to test whether a head can stack on top.
"""
if self.level > 1: #only initiates if there are faces below
#compares the x and y values of the face below to check boundaries
if (self.face.x > (self.foundation[self.level-1].x + (self.face.width//2)))\
or ((self.face.x + (self.face.width//2)) < self.foundation[self.level-1].x):
self.reset_game = 1 #sets the reset flag if out of bounds
return
self.oldface = copy.deepcopy(self.face) #puts a copy into the foundation
self.foundation[self.level] = self.oldface
self.level += 1
#picks a new face from the array of possible images
self.face_index = random.randint(0, self.number_of_faces)
def update(self):
""" Update the game state """
if self.face.x > (self.width - self.face.width):
self.direction = 'left'
elif self.face.x < 1: # checks the left wall, changes direction
self.direction = 'right'
# checks to see whether the stack is high enough to win the game
if (self.height - (self.face.height * self.level)) < self.face.height:
self.won_game = 1
else:
# calls each face's update function, to help facilitate its drawing
self.face.update(self.height - (self.face.height * self.level),
self.direction, self.level, self.face_index)
def __str__(self):
output_lines = []
# will detail each face as a string
for key, value in self.foundation:
output_lines.append(str(value))
# print one item per line
return "\n".join(output_lines)
class Face(object):
""" Encodes the state of a face in the game """
def __init__(self,starting_x=0,starting_y=0,velocity=6,height=80,width=80,
face_index=0):
self.height = height
self.width = width
self.x = starting_x
self.y = starting_y - self.height
self.velocity = velocity
self.face_index = face_index
def update(self, vertLocation, direction, level, new_face_index):
""" update the state of the faces """
if direction == 'right':
self.x += (self.velocity + (level)) # adds speed as level increases
else:
self.x -= (self.velocity + (level))
self.y = vertLocation
if self.face_index != new_face_index: #sets a new face upon level ups
self.face_index = new_face_index
def __str__(self):
return "Face height=%f, width=%f, x=%f, y=%f, velocity=%f" % (self.height,
self.width,
self.x,
self.y,
self.velocity,
self.face_index)
| 38.902913 | 88 | 0.572498 |
import pygame, copy
from pygame.locals import *
import random
class TotemModel(object):
def __init__(self, size=(640,480),number_of_faces=0):
self.width, self.height = size
self.level = 1
self.foundation = {}
self.direction = 'left'
self.reset_game = 0
self.won_game = 0
self.new_game = 0
self.number_of_faces = number_of_faces
self.face_index = random.randint(0, number_of_faces)
self.face = Face(self.width, self.height, self.face_index)
def addFaceToFoundation(self):
if self.level > 1:
if (self.face.x > (self.foundation[self.level-1].x + (self.face.width//2)))\
or ((self.face.x + (self.face.width//2)) < self.foundation[self.level-1].x):
self.reset_game = 1
return
self.oldface = copy.deepcopy(self.face)
self.foundation[self.level] = self.oldface
self.level += 1
self.face_index = random.randint(0, self.number_of_faces)
def update(self):
if self.face.x > (self.width - self.face.width):
self.direction = 'left'
elif self.face.x < 1:
self.direction = 'right'
if (self.height - (self.face.height * self.level)) < self.face.height:
self.won_game = 1
else:
self.face.update(self.height - (self.face.height * self.level),
self.direction, self.level, self.face_index)
def __str__(self):
output_lines = []
# will detail each face as a string
for key, value in self.foundation:
output_lines.append(str(value))
# print one item per line
return "\n".join(output_lines)
class Face(object):
def __init__(self,starting_x=0,starting_y=0,velocity=6,height=80,width=80,
face_index=0):
self.height = height
self.width = width
self.x = starting_x
self.y = starting_y - self.height
self.velocity = velocity
self.face_index = face_index
def update(self, vertLocation, direction, level, new_face_index):
if direction == 'right':
self.x += (self.velocity + (level)) # adds speed as level increases
else:
self.x -= (self.velocity + (level))
self.y = vertLocation
if self.face_index != new_face_index: #sets a new face upon level ups
self.face_index = new_face_index
def __str__(self):
return "Face height=%f, width=%f, x=%f, y=%f, velocity=%f" % (self.height,
self.width,
self.x,
self.y,
self.velocity,
self.face_index)
| true | true |
f730ed6d33436dced0bf6a00b2f80e83c54ec3ea | 18,160 | py | Python | tests/utils/test_imageutil.py | shokakucarrier/atomic-reactor | 3ad81a5532cde317d0fddd4578f27438e88ee637 | [
"BSD-3-Clause"
] | null | null | null | tests/utils/test_imageutil.py | shokakucarrier/atomic-reactor | 3ad81a5532cde317d0fddd4578f27438e88ee637 | [
"BSD-3-Clause"
] | null | null | null | tests/utils/test_imageutil.py | shokakucarrier/atomic-reactor | 3ad81a5532cde317d0fddd4578f27438e88ee637 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import pytest
import tarfile
import io
import os
from flexmock import flexmock
from pathlib import Path
from osbs.utils import ImageName
from atomic_reactor import config
from atomic_reactor import util
from atomic_reactor.utils import imageutil, retries
@pytest.fixture
def df_images():
"""DockerfileImages instance for testing."""
return util.DockerfileImages(["registry.com/fedora:35"])
@pytest.mark.parametrize(
"image, is_inspectable",
[
("scratch", False),
("koji/image-build", False),
("registry.com/foo/bar", True),
# does not work, nobody should ever try to use scratch as an ImageName
# (ImageName.parse("scratch"), False),
(ImageName.parse("koji/image-build"), False),
(ImageName.parse("registry.com/foo/bar"), True),
],
)
def test_inspectable(image, is_inspectable):
assert imageutil.image_is_inspectable(image) == is_inspectable
def mock_tarball(tarball_path, files):
with tarfile.open(tarball_path, 'w:gz') as tf:
for filename, file_data in files.items():
file = tarfile.TarInfo(filename)
file.size = file_data['size']
if file_data['content']:
tf.addfile(file, io.BytesIO(file_data['content']))
else:
tf.addfile(file, io.BytesIO(os.urandom(file.size)))
class TestImageUtil:
"""Tests for the ImageUtil class."""
config = config.Configuration(
raw_config={
"version": 1,
# "registries": [], # relevant to RegistrySession, not directly relevant to ImageUtil
"platform_descriptors": [{"platform": "x86_64", "architecture": "amd64"}],
},
)
inspect_data = {"some": "inspect data as returned by RegistryClient.get_inspect_for_image"}
def mock_get_registry_client(self, expect_image, expect_arch):
"""Make the _get_registry_client method return a fake RegistryClient."""
registry_client = flexmock()
(
registry_client
.should_receive("get_inspect_for_image")
.with_args(expect_image, expect_arch)
.once()
.and_return(self.inspect_data)
)
(
flexmock(imageutil.ImageUtil)
.should_receive("_get_registry_client")
.with_args(expect_image.registry)
.once()
.and_return(registry_client)
)
return registry_client
def test_get_inspect_for_image(self, df_images):
"""Test get_inspect_for_image and its caching behavior."""
image_util = imageutil.ImageUtil(df_images, self.config)
image = ImageName.parse("registry.com/some-image:1")
self.mock_get_registry_client(image, expect_arch=None)
assert image_util.get_inspect_for_image(image) == self.inspect_data
# check caching (the registry client mock expects its method to be called exactly once,
# if imageutil didn't cache the result, it would get called twice)
assert image_util.get_inspect_for_image(image) == self.inspect_data
image_as_str = image.to_str()
# should hit cache regardless of whether you pass a string or an ImageName
assert image_util.get_inspect_for_image(image_as_str) == self.inspect_data
@pytest.mark.parametrize(
"platform, expect_goarch",
[
("x86_64", "amd64"), # platform is mapped to goarch
("s390x", "s390x"), # platform is not mapped (goarch name is the same)
("amd64", "amd64"), # pass goarch directly
],
)
def test_get_inspect_for_image_specific_platform(self, platform, expect_goarch, df_images):
"""Test that get_inspect_for_image handles the platform to goarch mapping properly."""
image_util = imageutil.ImageUtil(df_images, self.config)
image = ImageName.parse("registry.com/some-image:1")
# main check: expect_arch
self.mock_get_registry_client(image, expect_arch=expect_goarch)
assert image_util.get_inspect_for_image(image, platform) == self.inspect_data
# should hit cache regardless of whether you pass a platform or a goarch
assert image_util.get_inspect_for_image(image, expect_goarch) == self.inspect_data
def test_get_inspect_for_image_not_inspectable(self, df_images):
"""Test that passing a non-inspectable image raises an error."""
image_util = imageutil.ImageUtil(df_images, self.config)
custom_image = ImageName.parse("koji/image-build")
with pytest.raises(ValueError, match=r"ImageName\(.*\) is not inspectable"):
image_util.get_inspect_for_image(custom_image)
@pytest.mark.parametrize("platform", [None, "x86_64"])
def test_base_image_inspect(self, platform, df_images):
"""Test that base_image_inspect just calls get_inspect_for_image with the right args."""
image_util = imageutil.ImageUtil(df_images, self.config)
(
flexmock(image_util)
.should_receive("get_inspect_for_image")
# base image in df_images
.with_args(ImageName.parse("registry.com/fedora:35"), platform)
.once()
.and_return(self.inspect_data)
)
assert image_util.base_image_inspect(platform) == self.inspect_data
@pytest.mark.parametrize("base_image", ["scratch", "koji/image-build"])
def test_base_image_inspect_not_inspectable(self, base_image):
"""Test that inspecting a non-inspectable base image returns an empty dict."""
image_util = imageutil.ImageUtil(util.DockerfileImages([base_image]), self.config)
assert image_util.base_image_inspect() == {}
def test_get_registry_client(self):
"""Test the method that makes a RegistryClient (other tests mock this method)."""
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
registry_session = flexmock()
(
flexmock(util.RegistrySession)
.should_receive("create_from_config")
.with_args(self.config, "registry.com")
.once()
.and_return(registry_session)
)
flexmock(util.RegistryClient).should_receive("__init__").with_args(registry_session).once()
image_util._get_registry_client("registry.com")
# test caching (i.e. test that the create_from_config method is called only once)
image_util._get_registry_client("registry.com")
def test_extract_file_from_image_non_empty_dst_dir(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
file = dst_path / 'somefile.txt'
file.touch()
with pytest.raises(ValueError, match=f'the destination directory {dst_path} must be empty'):
image_util.extract_file_from_image(image=image, src_path=src_path, dst_path=dst_path)
def test_extract_file_from_image_no_file_extracted(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['oc', 'image', 'extract', image, '--path', f'{src_path}:{dst_path}'])
.once()
)
with pytest.raises(
ValueError,
match=f"Extraction failed, files at path {src_path} not found in the image",
):
image_util.extract_file_from_image(
image=image, src_path=src_path, dst_path=dst_path
)
def test_extract_file_from_image(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
# mock the functionality of oc image extract
# just creates a file in dst_path
def mock_extract_file(cmd):
file = dst_path / 'somefile.txt'
file.touch()
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['oc', 'image', 'extract', image, '--path', f'{src_path}:{dst_path}'])
.replace_with(mock_extract_file).once()
)
image_util.extract_file_from_image(image=image, src_path=src_path, dst_path=dst_path)
def test_download_image_archive_tarball(self):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
path = '/tmp/path'
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['skopeo', 'copy', f'docker://{image}', f'docker-archive:{path}'])
.once()
)
image_util.download_image_archive_tarball(image=image, path=path)
def test_get_uncompressed_image_layer_sizes(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
path = Path(tmpdir) / 'tarball.tar'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}]'
).encode('utf-8')
config_file_content = (
'{"rootfs": {"type": "layers", "diff_ids": '
'["sha256:92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0", '
'"sha256:eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0", '
'"sha256:6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8", '
'"sha256:07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b"]}}'
).encode("utf-8")
mock_files = {
"92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar": {
"content": None,
"size": 1,
},
"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar": {
"content": None,
"size": 2,
},
"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar": {
"content": None,
"size": 3,
},
"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar": {
"content": None,
"size": 4,
},
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json": {
"content": config_file_content,
"size": len(config_file_content),
},
}
mock_tarball(tarball_path=path, files=mock_files)
actual_data = image_util.get_uncompressed_image_layer_sizes(path=path)
expected_data = [
{
"diff_id": "sha256:92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0", # noqa
"size": 1,
},
{
"diff_id": "sha256:eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0", # noqa
"size": 2,
},
{
"diff_id": "sha256:6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8", # noqa
"size": 3,
},
{
"diff_id": "sha256:07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b", # noqa
"size": 4,
},
]
assert actual_data == expected_data
def test_get_uncompressed_image_layer_sizes_multiple_entries_in_manifest_json(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
path = Path(tmpdir) / 'tarball.tar'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}, '
'{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad711.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6e.tar"]}]'
).encode('utf-8')
mock_files = {
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
}
mock_tarball(tarball_path=path, files=mock_files)
with pytest.raises(
ValueError, match="manifest.json file has multiple entries, expected only one"
):
image_util.get_uncompressed_image_layer_sizes(path=path)
def test_extract_filesystem_layer(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
expected_layer_filename = 'd31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar' # noqa
manifest_file_content = (
'[{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar"]}]'
).encode('utf-8')
mocked_files = {
'manifest.json': {'content': manifest_file_content, 'size': len(manifest_file_content)},
expected_layer_filename: {'content': None, 'size': 1}
}
mock_tarball(tarball_path=src_path, files=mocked_files)
actual_layer_filename = image_util.extract_filesystem_layer(src_path, dst_path)
assert actual_layer_filename == expected_layer_filename
assert (dst_path / expected_layer_filename).exists()
def test_extract_filesystem_layer_more_than_one_layer_fail(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}]'
).encode('utf-8')
mocked_files = {
"92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar": {
"content": None,
"size": 1,
},
"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar": {
"content": None,
"size": 2,
},
"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar": {
"content": None,
"size": 3,
},
"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar": {
"content": None,
"size": 4,
},
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
}
mock_tarball(tarball_path=src_path, files=mocked_files)
with pytest.raises(ValueError, match=f'Tarball at {src_path} has more than 1 layer'):
image_util.extract_filesystem_layer(src_path, dst_path)
def test_extract_filesystem_layer_multiple_entries_in_manifest_json(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
expected_layer_filename = 'd31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar' # noqa
manifest_file_content = (
'[{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar"]},'
'{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad711.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6e.tar"]}]'
).encode("utf-8")
mocked_files = {
'manifest.json': {'content': manifest_file_content, 'size': len(manifest_file_content)},
expected_layer_filename: {'content': None, 'size': 1}
}
mock_tarball(tarball_path=src_path, files=mocked_files)
with pytest.raises(
ValueError, match="manifest.json file has multiple entries, expected only one"
):
image_util.extract_filesystem_layer(src_path, dst_path)
| 42.931442 | 111 | 0.644934 |
import pytest
import tarfile
import io
import os
from flexmock import flexmock
from pathlib import Path
from osbs.utils import ImageName
from atomic_reactor import config
from atomic_reactor import util
from atomic_reactor.utils import imageutil, retries
@pytest.fixture
def df_images():
return util.DockerfileImages(["registry.com/fedora:35"])
@pytest.mark.parametrize(
"image, is_inspectable",
[
("scratch", False),
("koji/image-build", False),
("registry.com/foo/bar", True),
(ImageName.parse("koji/image-build"), False),
(ImageName.parse("registry.com/foo/bar"), True),
],
)
def test_inspectable(image, is_inspectable):
assert imageutil.image_is_inspectable(image) == is_inspectable
def mock_tarball(tarball_path, files):
with tarfile.open(tarball_path, 'w:gz') as tf:
for filename, file_data in files.items():
file = tarfile.TarInfo(filename)
file.size = file_data['size']
if file_data['content']:
tf.addfile(file, io.BytesIO(file_data['content']))
else:
tf.addfile(file, io.BytesIO(os.urandom(file.size)))
class TestImageUtil:
config = config.Configuration(
raw_config={
"version": 1,
hitecture": "amd64"}],
},
)
inspect_data = {"some": "inspect data as returned by RegistryClient.get_inspect_for_image"}
def mock_get_registry_client(self, expect_image, expect_arch):
registry_client = flexmock()
(
registry_client
.should_receive("get_inspect_for_image")
.with_args(expect_image, expect_arch)
.once()
.and_return(self.inspect_data)
)
(
flexmock(imageutil.ImageUtil)
.should_receive("_get_registry_client")
.with_args(expect_image.registry)
.once()
.and_return(registry_client)
)
return registry_client
def test_get_inspect_for_image(self, df_images):
image_util = imageutil.ImageUtil(df_images, self.config)
image = ImageName.parse("registry.com/some-image:1")
self.mock_get_registry_client(image, expect_arch=None)
assert image_util.get_inspect_for_image(image) == self.inspect_data
assert image_util.get_inspect_for_image(image) == self.inspect_data
image_as_str = image.to_str()
# should hit cache regardless of whether you pass a string or an ImageName
assert image_util.get_inspect_for_image(image_as_str) == self.inspect_data
@pytest.mark.parametrize(
"platform, expect_goarch",
[
("x86_64", "amd64"), # platform is mapped to goarch
("s390x", "s390x"), # platform is not mapped (goarch name is the same)
("amd64", "amd64"), # pass goarch directly
],
)
def test_get_inspect_for_image_specific_platform(self, platform, expect_goarch, df_images):
image_util = imageutil.ImageUtil(df_images, self.config)
image = ImageName.parse("registry.com/some-image:1")
# main check: expect_arch
self.mock_get_registry_client(image, expect_arch=expect_goarch)
assert image_util.get_inspect_for_image(image, platform) == self.inspect_data
# should hit cache regardless of whether you pass a platform or a goarch
assert image_util.get_inspect_for_image(image, expect_goarch) == self.inspect_data
def test_get_inspect_for_image_not_inspectable(self, df_images):
image_util = imageutil.ImageUtil(df_images, self.config)
custom_image = ImageName.parse("koji/image-build")
with pytest.raises(ValueError, match=r"ImageName\(.*\) is not inspectable"):
image_util.get_inspect_for_image(custom_image)
@pytest.mark.parametrize("platform", [None, "x86_64"])
def test_base_image_inspect(self, platform, df_images):
image_util = imageutil.ImageUtil(df_images, self.config)
(
flexmock(image_util)
.should_receive("get_inspect_for_image")
# base image in df_images
.with_args(ImageName.parse("registry.com/fedora:35"), platform)
.once()
.and_return(self.inspect_data)
)
assert image_util.base_image_inspect(platform) == self.inspect_data
@pytest.mark.parametrize("base_image", ["scratch", "koji/image-build"])
def test_base_image_inspect_not_inspectable(self, base_image):
image_util = imageutil.ImageUtil(util.DockerfileImages([base_image]), self.config)
assert image_util.base_image_inspect() == {}
def test_get_registry_client(self):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
registry_session = flexmock()
(
flexmock(util.RegistrySession)
.should_receive("create_from_config")
.with_args(self.config, "registry.com")
.once()
.and_return(registry_session)
)
flexmock(util.RegistryClient).should_receive("__init__").with_args(registry_session).once()
image_util._get_registry_client("registry.com")
# test caching (i.e. test that the create_from_config method is called only once)
image_util._get_registry_client("registry.com")
def test_extract_file_from_image_non_empty_dst_dir(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
file = dst_path / 'somefile.txt'
file.touch()
with pytest.raises(ValueError, match=f'the destination directory {dst_path} must be empty'):
image_util.extract_file_from_image(image=image, src_path=src_path, dst_path=dst_path)
def test_extract_file_from_image_no_file_extracted(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['oc', 'image', 'extract', image, '--path', f'{src_path}:{dst_path}'])
.once()
)
with pytest.raises(
ValueError,
match=f"Extraction failed, files at path {src_path} not found in the image",
):
image_util.extract_file_from_image(
image=image, src_path=src_path, dst_path=dst_path
)
def test_extract_file_from_image(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
src_path = '/path/to/file'
dst_path = Path(tmpdir) / 'dst_dir'
dst_path.mkdir()
# mock the functionality of oc image extract
# just creates a file in dst_path
def mock_extract_file(cmd):
file = dst_path / 'somefile.txt'
file.touch()
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['oc', 'image', 'extract', image, '--path', f'{src_path}:{dst_path}'])
.replace_with(mock_extract_file).once()
)
image_util.extract_file_from_image(image=image, src_path=src_path, dst_path=dst_path)
def test_download_image_archive_tarball(self):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
image = 'registry.com/fedora:35'
path = '/tmp/path'
(
flexmock(retries)
.should_receive("run_cmd")
.with_args(['skopeo', 'copy', f'docker://{image}', f'docker-archive:{path}'])
.once()
)
image_util.download_image_archive_tarball(image=image, path=path)
def test_get_uncompressed_image_layer_sizes(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
path = Path(tmpdir) / 'tarball.tar'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}]'
).encode('utf-8')
config_file_content = (
'{"rootfs": {"type": "layers", "diff_ids": '
'["sha256:92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0", '
'"sha256:eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0", '
'"sha256:6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8", '
'"sha256:07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b"]}}'
).encode("utf-8")
mock_files = {
"92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar": {
"content": None,
"size": 1,
},
"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar": {
"content": None,
"size": 2,
},
"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar": {
"content": None,
"size": 3,
},
"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar": {
"content": None,
"size": 4,
},
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json": {
"content": config_file_content,
"size": len(config_file_content),
},
}
mock_tarball(tarball_path=path, files=mock_files)
actual_data = image_util.get_uncompressed_image_layer_sizes(path=path)
expected_data = [
{
"diff_id": "sha256:92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0", # noqa
"size": 1,
},
{
"diff_id": "sha256:eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0", # noqa
"size": 2,
},
{
"diff_id": "sha256:6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8", # noqa
"size": 3,
},
{
"diff_id": "sha256:07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b", # noqa
"size": 4,
},
]
assert actual_data == expected_data
def test_get_uncompressed_image_layer_sizes_multiple_entries_in_manifest_json(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
path = Path(tmpdir) / 'tarball.tar'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}, '
'{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad711.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6e.tar"]}]'
).encode('utf-8')
mock_files = {
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
}
mock_tarball(tarball_path=path, files=mock_files)
with pytest.raises(
ValueError, match="manifest.json file has multiple entries, expected only one"
):
image_util.get_uncompressed_image_layer_sizes(path=path)
def test_extract_filesystem_layer(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
expected_layer_filename = 'd31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar' # noqa
manifest_file_content = (
'[{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar"]}]'
).encode('utf-8')
mocked_files = {
'manifest.json': {'content': manifest_file_content, 'size': len(manifest_file_content)},
expected_layer_filename: {'content': None, 'size': 1}
}
mock_tarball(tarball_path=src_path, files=mocked_files)
actual_layer_filename = image_util.extract_filesystem_layer(src_path, dst_path)
assert actual_layer_filename == expected_layer_filename
assert (dst_path / expected_layer_filename).exists()
def test_extract_filesystem_layer_more_than_one_layer_fail(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
manifest_file_content = (
'[{"Config":"62700350851fb36b2e770ba33639e9d111616d39fc63da8845a5e53e9ad013de.json",'
'"RepoTags":[],'
'"Layers":["92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar"'
',"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar",'
'"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar",'
'"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar"]}]'
).encode('utf-8')
mocked_files = {
"92538e92de2938d7c4e279f871107b835bf0c8cc76a5a1655d66855706da18b0.tar": {
"content": None,
"size": 1,
},
"eb7bf34352ca9ba2fb0218870ac3c47b76d0b1fb7d50543d3ecfa497eca242b0.tar": {
"content": None,
"size": 2,
},
"6da3b8e0475dcc80515944d0cc3f699429248df6b040f8dd7711e681387185e8.tar": {
"content": None,
"size": 3,
},
"07adb74645fe71dec6917e5caca489018edf7ed94f29ac74398eca89c1b9458b.tar": {
"content": None,
"size": 4,
},
"manifest.json": {
"content": manifest_file_content,
"size": len(manifest_file_content),
},
}
mock_tarball(tarball_path=src_path, files=mocked_files)
with pytest.raises(ValueError, match=f'Tarball at {src_path} has more than 1 layer'):
image_util.extract_filesystem_layer(src_path, dst_path)
def test_extract_filesystem_layer_multiple_entries_in_manifest_json(self, tmpdir):
image_util = imageutil.ImageUtil(util.DockerfileImages([]), self.config)
src_path = Path(tmpdir) / 'tarball.tar'
dst_path = Path(tmpdir) / 'dst'
expected_layer_filename = 'd31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar' # noqa
manifest_file_content = (
'[{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6d.tar"]},'
'{"Config": "ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad711.json"'
', "RepoTags": [], '
'"Layers": ["d31505fd5050f6b96ca3268d1db58fc91ae561ddf14eaabc41d63ea2ef8c1c6e.tar"]}]'
).encode("utf-8")
mocked_files = {
'manifest.json': {'content': manifest_file_content, 'size': len(manifest_file_content)},
expected_layer_filename: {'content': None, 'size': 1}
}
mock_tarball(tarball_path=src_path, files=mocked_files)
with pytest.raises(
ValueError, match="manifest.json file has multiple entries, expected only one"
):
image_util.extract_filesystem_layer(src_path, dst_path)
| true | true |
f730ed7aa9f9349f15c6cbfc5165fbc8a60781f3 | 1,755 | py | Python | MGSIM/Commands/Genome_download.py | nick-youngblut/MGSIM | 9edae3c170cf5100b3408a853a87e1205e70dd1b | [
"MIT"
] | 3 | 2019-09-02T11:03:40.000Z | 2021-12-13T15:59:06.000Z | MGSIM/Commands/Genome_download.py | nick-youngblut/MGSIM | 9edae3c170cf5100b3408a853a87e1205e70dd1b | [
"MIT"
] | 2 | 2020-11-13T13:04:47.000Z | 2022-02-03T14:58:13.000Z | MGSIM/Commands/Genome_download.py | nick-youngblut/MGSIM | 9edae3c170cf5100b3408a853a87e1205e70dd1b | [
"MIT"
] | 1 | 2020-08-13T12:40:39.000Z | 2020-08-13T12:40:39.000Z | #!/usr/bin/env python
"""
genome_download: downloading genomes
Usage:
genome_download [options] <accession_table>
genome_download -h | --help
genome_download --version
Options:
<accessin_table> Taxon-accession table (see Description).
Use '-' if from STDIN.
-d=<d> Output directory. [Default: .]
-e=<e> Email to use for NCBI queries. [Default: blank@gmail.com]
-a=<a> Number of ambiguous nucleotides allowed in a genome. [Default: 0]
-n=<n> Number of cpus. [Default: 1]
-t=<t> Number of tries to download genomes. [Default: 10]
-r Rename genome sequences based on taxon name?
--debug Debug mode (no multiprocessing).
-h --help Show this screen.
--version Show version.
Description:
Taxon-accession table
---------------------
* tab-delimited
* must contain 2 columns
* "Taxon" = taxon name
* "Accession" = NCBI accession used for downloading
* Possible accessions:
* ncbi nucleotide db
* ncbi assembly db
* ftp url to genome (direct download)
* other columns are allowed
Output
------
* Genome fasta files written to the specified output directory
* A table mapping taxa to the download genome fasta file is written to STDOUT
"""
# import
import sys,os
import logging
## batteries
from docopt import docopt
from MGSIM import Genome_Download
## logging
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.DEBUG)
# opt parse
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
Genome_Download.main(args)
| 28.770492 | 85 | 0.631339 |
import sys,os
import logging
t import docopt
from MGSIM import Genome_Download
basicConfig(format='%(asctime)s - %(message)s', level=logging.DEBUG)
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
Genome_Download.main(args)
| true | true |
f730edaf07a7da17bb74420c371eafd4f32d6873 | 7,584 | py | Python | fixtura/contact.py | KateRuss/python_training | 38ea3f710fd9ba3b555277aac84a1ccda752b056 | [
"Apache-2.0"
] | null | null | null | fixtura/contact.py | KateRuss/python_training | 38ea3f710fd9ba3b555277aac84a1ccda752b056 | [
"Apache-2.0"
] | null | null | null | fixtura/contact.py | KateRuss/python_training | 38ea3f710fd9ba3b555277aac84a1ccda752b056 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact
from selenium.webdriver.support.ui import Select
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_contact_form(self, contact):
wd = self.app.wd
# fill contact fields (displayd on home page)
self.change_contact_field("firstname", contact.first_name)
self.change_contact_field("lastname", contact.last_name)
self.change_contact_field("address", contact.address)
self.change_contact_field("home", contact.address)
self.change_contact_field("mobile", contact.mobile_phone_number)
self.change_contact_field("work", contact.work_phone_number)
self.change_contact_field("fax", contact.fax_number)
self.change_contact_field("email", contact.email_1)
self.change_contact_field("email2", contact.email_2)
self.change_contact_field("email3", contact.email_3)
self.change_contact_field("address2", contact.address2)
self.change_contact_field("notes", contact.notes)
def change_contact_field(self, fild_name, value):
if value is not None:
wd = self.app.wd
wd.find_element_by_name(fild_name).click()
wd.find_element_by_name(fild_name).clear()
wd.find_element_by_name(fild_name).send_keys(value)
def create_new(self, contact):
wd = self.app.wd
# create new contact
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.return_to_home_page()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_for_del_by_index(index)
# delete element
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit element delete
wd.switch_to.alert.accept()
wd.implicitly_wait(3)
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_contact_by_id(id)
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit element delete
wd.switch_to.alert.accept()
wd.implicitly_wait(3)
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_contact_for_del_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_all_contact(self):
wd = self.app.wd
wd.find_element_by_id("MassCB").click()
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit contact delete
wd.switch_to.alert.accept()
self.contact_cache = None
def edit_first_contact(self, contact):
wd = self.app.wd
self.edit_contact_by_index(0, contact)
def edit_contact_by_index(self, index, contact):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
# edit contact
self.fill_contact_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.app.return_to_home_page()
self.contact_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
wd.find_elements_by_xpath("//img[@alt='Details']")[index].click()
def contact_count(self):
wd = self.app.wd
self.app.return_to_home_page()
# self.app.return_to_home_page()
#if wd.find_element_by_xpath("//span[@id='search_count']") == 0:
return int(wd.find_element_by_xpath("//span[@id='search_count']").text)
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.return_to_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
name = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(last_name=last_name, first_name=name, address = address, id=id,
all_emails_from_homepage=all_emails,
all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(first_name=firstname, last_name=lastname, id=id, home_phone_number=homephone,
mobile_phone_number=mobilephone,work_phone_number=workphone, address=address, email_1=email1,
email_2=email2, email_3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
return Contact(home_phone_number=homephone, mobile_phone_number=mobilephone, work_phone_number=workphone)
def select_group_by_id(self, group_list_name, group_id):
wd = self.app.wd
wd.find_element_by_name(group_list_name).click()
Select(wd.find_element_by_name(group_list_name)).select_by_value(group_id)
def add_contact_in_group(self, group_id, contact_id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_contact_by_id(contact_id)
self.select_group_by_id("to_group", group_id)
wd.find_element_by_name("add").click()
self.app.return_to_home_page()
def del_contact_from_group(self, group_id, contact_id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_group_by_id("group", group_id)
self.select_contact_by_id(contact_id)
wd.find_element_by_name("remove").click()
self.app.return_to_home_page()
| 41.900552 | 116 | 0.654272 |
from model.contact import Contact
from selenium.webdriver.support.ui import Select
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_contact_field("firstname", contact.first_name)
self.change_contact_field("lastname", contact.last_name)
self.change_contact_field("address", contact.address)
self.change_contact_field("home", contact.address)
self.change_contact_field("mobile", contact.mobile_phone_number)
self.change_contact_field("work", contact.work_phone_number)
self.change_contact_field("fax", contact.fax_number)
self.change_contact_field("email", contact.email_1)
self.change_contact_field("email2", contact.email_2)
self.change_contact_field("email3", contact.email_3)
self.change_contact_field("address2", contact.address2)
self.change_contact_field("notes", contact.notes)
def change_contact_field(self, fild_name, value):
if value is not None:
wd = self.app.wd
wd.find_element_by_name(fild_name).click()
wd.find_element_by_name(fild_name).clear()
wd.find_element_by_name(fild_name).send_keys(value)
def create_new(self, contact):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.return_to_home_page()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_for_del_by_index(index)
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to.alert.accept()
wd.implicitly_wait(3)
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_contact_by_id(id)
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to.alert.accept()
wd.implicitly_wait(3)
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_contact_for_del_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_all_contact(self):
wd = self.app.wd
wd.find_element_by_id("MassCB").click()
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to.alert.accept()
self.contact_cache = None
def edit_first_contact(self, contact):
wd = self.app.wd
self.edit_contact_by_index(0, contact)
def edit_contact_by_index(self, index, contact):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
self.fill_contact_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.app.return_to_home_page()
self.contact_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
wd.find_elements_by_xpath("//img[@alt='Details']")[index].click()
def contact_count(self):
wd = self.app.wd
self.app.return_to_home_page()
return int(wd.find_element_by_xpath("//span[@id='search_count']").text)
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.return_to_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
name = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(last_name=last_name, first_name=name, address = address, id=id,
all_emails_from_homepage=all_emails,
all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(first_name=firstname, last_name=lastname, id=id, home_phone_number=homephone,
mobile_phone_number=mobilephone,work_phone_number=workphone, address=address, email_1=email1,
email_2=email2, email_3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
return Contact(home_phone_number=homephone, mobile_phone_number=mobilephone, work_phone_number=workphone)
def select_group_by_id(self, group_list_name, group_id):
wd = self.app.wd
wd.find_element_by_name(group_list_name).click()
Select(wd.find_element_by_name(group_list_name)).select_by_value(group_id)
def add_contact_in_group(self, group_id, contact_id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_contact_by_id(contact_id)
self.select_group_by_id("to_group", group_id)
wd.find_element_by_name("add").click()
self.app.return_to_home_page()
def del_contact_from_group(self, group_id, contact_id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_group_by_id("group", group_id)
self.select_contact_by_id(contact_id)
wd.find_element_by_name("remove").click()
self.app.return_to_home_page()
| true | true |
f730edc45a14dd6184a9dea61869338a9c700c7e | 545 | py | Python | dashboard/models.py | Lukmanhakim112/ppdb | c6179478b4c1f0b6cec77a8a059a6e418f6263f1 | [
"MIT"
] | null | null | null | dashboard/models.py | Lukmanhakim112/ppdb | c6179478b4c1f0b6cec77a8a059a6e418f6263f1 | [
"MIT"
] | null | null | null | dashboard/models.py | Lukmanhakim112/ppdb | c6179478b4c1f0b6cec77a8a059a6e418f6263f1 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from users.models import CustomUser
from primaseru import choices
class StudentStatus(models.Model):
student = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
accepted = models.BooleanField('Diterima', db_index=True, null=True)
major = models.CharField('Diterima dijurusan', choices=choices.MAJOR, max_length=4, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.student} Status'
| 34.0625 | 110 | 0.766972 | from django.db import models
from django.utils import timezone
from users.models import CustomUser
from primaseru import choices
class StudentStatus(models.Model):
student = models.OneToOneField(CustomUser, on_delete=models.CASCADE)
accepted = models.BooleanField('Diterima', db_index=True, null=True)
major = models.CharField('Diterima dijurusan', choices=choices.MAJOR, max_length=4, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.student} Status'
| true | true |
f730edff89f56bcceeeca88db6221bf6caeacef5 | 27,762 | py | Python | tacotron2/model.py | HudsonHuang/tacotron2 | fa55a0b633abe358e1258e1dc3b40d85e17b3450 | [
"BSD-3-Clause"
] | null | null | null | tacotron2/model.py | HudsonHuang/tacotron2 | fa55a0b633abe358e1258e1dc3b40d85e17b3450 | [
"BSD-3-Clause"
] | null | null | null | tacotron2/model.py | HudsonHuang/tacotron2 | fa55a0b633abe358e1258e1dc3b40d85e17b3450 | [
"BSD-3-Clause"
] | null | null | null | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from math import sqrt
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/../'))
from common.layers import ConvNorm, LinearNorm
from common.utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x, inference=False):
if inference:
for linear in self.layers:
x = F.relu(linear(x))
x0 = x[0].unsqueeze(0)
mask = Variable(torch.bernoulli(x0.data.new(x0.data.size()).fill_(0.5)))
mask = mask.expand(x.size(0), x.size(1))
x = x*mask*2
else:
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, n_mel_channels, postnet_embedding_dim,
postnet_kernel_size, postnet_n_convolutions):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(n_mel_channels, postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim, n_mel_channels,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_n_convolutions,
encoder_embedding_dim, encoder_kernel_size):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def infer(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Decoder(nn.Module):
def __init__(self, n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
early_stopping):
super(Decoder, self).__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.p_attention_dropout = p_attention_dropout
self.p_decoder_dropout = p_decoder_dropout
self.early_stopping = early_stopping
self.prenet = Prenet(
n_mel_channels,
[prenet_dim, prenet_dim])
self.attention_rnn = nn.LSTMCell(
prenet_dim + encoder_embedding_dim,
attention_rnn_dim)
self.attention_layer = Attention(
attention_rnn_dim, encoder_embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
attention_rnn_dim + encoder_embedding_dim,
decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim,
n_mel_channels * n_frames_per_step)
self.gate_layer = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2).contiguous()
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outputs: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_input_frames = []
z = int(decoder_inputs.size(2) / self.n_frames_per_step)
for i in range(self.n_frames_per_step):
decoder_input_frames.append(self.prenet(decoder_inputs[:, :, i*z:(i+1)*z]))
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_input_frames[0].size(0) - 1:
for input_frame in decoder_input_frames:
decoder_input = input_frame[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
gate_outputs += [gate_output.squeeze() if memory.shape[0] > 1 else gate_output]
alignments += [attention_weights]
mel_outputs += [mel_output.squeeze(1)]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def infer(self, memory):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32).cuda()
not_finished = torch.ones([memory.size(0)], dtype=torch.int32).cuda()
mel_outputs, gate_outputs, alignments = [], [], []
z = int(decoder_input.size(1) / self.n_frames_per_step)
while True:
decoder_input_frames = []
for i in range(self.n_frames_per_step):
decoder_input_frames.append(decoder_input[:, i * z:(i + 1) * z])
for input_frame in decoder_input_frames:
mel_output, gate_output, alignment = self.decode(self.prenet(input_frame))
gate_outputs += [gate_output]
alignments += [alignment]
mel_outputs += [mel_output.squeeze(1)]
dec = torch.le(torch.sigmoid(gate_output.data),
self.gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if self.early_stopping and torch.sum(not_finished) == 0:
break
if len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, mask_padding, n_mel_channels,
n_symbols, symbols_embedding_dim, n_speakers, speakers_embedding_dim,
use_emotions, n_emotions, emotions_embedding_dim,
encoder_kernel_size, encoder_n_convolutions, encoder_embedding_dim,
attention_rnn_dim, attention_dim, attention_location_n_filters,
attention_location_kernel_size, n_frames_per_step,
decoder_rnn_dim, prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
postnet_embedding_dim, postnet_kernel_size,
postnet_n_convolutions, decoder_no_early_stopping, **kwargs):
super(Tacotron2, self).__init__()
self.mask_padding = mask_padding
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.symbols_embedding = nn.Embedding(
n_symbols, symbols_embedding_dim)
std = sqrt(2.0 / (n_symbols + symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.symbols_embedding.weight.data.uniform_(-val, val)
self.speakers_embedding = nn.Embedding(n_speakers, speakers_embedding_dim)
torch.nn.init.xavier_uniform_(self.speakers_embedding.weight)
self.encoder = Encoder(encoder_n_convolutions,
encoder_embedding_dim,
encoder_kernel_size)
encoder_out_embedding_dim = encoder_embedding_dim + speakers_embedding_dim
self.use_emotions = use_emotions
if self.use_emotions:
self.emotions_embedding = nn.Embedding(n_emotions, emotions_embedding_dim)
torch.nn.init.xavier_uniform_(self.emotions_embedding.weight)
encoder_out_embedding_dim += emotions_embedding_dim
self.decoder = Decoder(n_mel_channels, n_frames_per_step,
encoder_out_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps,
gate_threshold, p_attention_dropout,
p_decoder_dropout,
not decoder_no_early_stopping)
self.postnet = Postnet(n_mel_channels, postnet_embedding_dim,
postnet_kernel_size,
postnet_n_convolutions)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, speaker_ids, emotion_ids = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
speaker_ids = to_gpu(speaker_ids).long()
emotion_ids = to_gpu(emotion_ids).long()
return ((text_padded, input_lengths, mel_padded, max_len, output_lengths, speaker_ids, emotion_ids),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
# Parse inputs
inputs, input_lengths, targets, max_len, output_lengths, speaker_ids, emotion_ids = inputs
input_lengths, output_lengths = input_lengths.data, output_lengths.data
# Outputs
outputs = []
# Get symbols encoder outputs
embedded_inputs = self.symbols_embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, input_lengths)
outputs.append(encoder_outputs)
# Extract speaker embeddings
speaker_ids = speaker_ids.unsqueeze(1)
embedded_speakers = self.speakers_embedding(speaker_ids)
embedded_speakers = embedded_speakers.expand(-1, max_len, -1)
outputs.append(embedded_speakers)
# Extract emotion embeddings
if self.use_emotions:
emotion_ids = emotion_ids.unsqueeze(1)
embedded_emotions = self.emotions_embedding(emotion_ids)
embedded_emotions = embedded_emotions.expand(-1, max_len, -1)
outputs.append(embedded_emotions)
# Combine all embeddings embeddings
merged_outputs = torch.cat(outputs, -1)
mel_outputs, gate_outputs, alignments = self.decoder(
merged_outputs, targets, memory_lengths=input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def infer(self, input, speaker_id, emotion_id=None):
# Outputs
outputs = []
# Get symbols encoder output
embedded_input = self.symbols_embedding(input).transpose(1, 2)
encoder_output = self.encoder.infer(embedded_input)
outputs.append(encoder_output)
# Get speaker embedding
speaker_id = speaker_id.unsqueeze(1)
embedded_speaker = self.speakers_embedding(speaker_id)
embedded_speaker = embedded_speaker.expand(-1, encoder_output.shape[1], -1)
outputs.append(embedded_speaker)
# Extract emotion embeddings
if self.use_emotions:
emotion_id = emotion_id.unsqueeze(1)
embedded_emotion = self.emotions_embedding(emotion_id)
embedded_emotion = embedded_emotion.expand(-1, encoder_output.shape[1], -1)
outputs.append(embedded_emotion)
# Merge embeddings
merged_outputs = torch.cat(outputs, -1)
# Decode
mel_outputs, gate_outputs, alignments = self.decoder.infer(
merged_outputs)
# Post
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
# Parse
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
return outputs
| 40.002882 | 108 | 0.630394 |
from math import sqrt
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
import sys
from os.path import abspath, dirname
sys.path.append(abspath(dirname(__file__)+'/../'))
from common.layers import ConvNorm, LinearNorm
from common.utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x, inference=False):
if inference:
for linear in self.layers:
x = F.relu(linear(x))
x0 = x[0].unsqueeze(0)
mask = Variable(torch.bernoulli(x0.data.new(x0.data.size()).fill_(0.5)))
mask = mask.expand(x.size(0), x.size(1))
x = x*mask*2
else:
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
def __init__(self, n_mel_channels, postnet_embedding_dim,
postnet_kernel_size, postnet_n_convolutions):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(n_mel_channels, postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim, n_mel_channels,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
return x
class Encoder(nn.Module):
def __init__(self, encoder_n_convolutions,
encoder_embedding_dim, encoder_kernel_size):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def infer(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Decoder(nn.Module):
def __init__(self, n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
early_stopping):
super(Decoder, self).__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.p_attention_dropout = p_attention_dropout
self.p_decoder_dropout = p_decoder_dropout
self.early_stopping = early_stopping
self.prenet = Prenet(
n_mel_channels,
[prenet_dim, prenet_dim])
self.attention_rnn = nn.LSTMCell(
prenet_dim + encoder_embedding_dim,
attention_rnn_dim)
self.attention_layer = Attention(
attention_rnn_dim, encoder_embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
attention_rnn_dim + encoder_embedding_dim,
decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim,
n_mel_channels * n_frames_per_step)
self.gate_layer = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, mask):
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
decoder_inputs = decoder_inputs.transpose(1, 2).contiguous()
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
alignments = torch.stack(alignments).transpose(0, 1)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, decoder_inputs, memory_lengths):
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_input_frames = []
z = int(decoder_inputs.size(2) / self.n_frames_per_step)
for i in range(self.n_frames_per_step):
decoder_input_frames.append(self.prenet(decoder_inputs[:, :, i*z:(i+1)*z]))
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_input_frames[0].size(0) - 1:
for input_frame in decoder_input_frames:
decoder_input = input_frame[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
gate_outputs += [gate_output.squeeze() if memory.shape[0] > 1 else gate_output]
alignments += [attention_weights]
mel_outputs += [mel_output.squeeze(1)]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def infer(self, memory):
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32).cuda()
not_finished = torch.ones([memory.size(0)], dtype=torch.int32).cuda()
mel_outputs, gate_outputs, alignments = [], [], []
z = int(decoder_input.size(1) / self.n_frames_per_step)
while True:
decoder_input_frames = []
for i in range(self.n_frames_per_step):
decoder_input_frames.append(decoder_input[:, i * z:(i + 1) * z])
for input_frame in decoder_input_frames:
mel_output, gate_output, alignment = self.decode(self.prenet(input_frame))
gate_outputs += [gate_output]
alignments += [alignment]
mel_outputs += [mel_output.squeeze(1)]
dec = torch.le(torch.sigmoid(gate_output.data),
self.gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if self.early_stopping and torch.sum(not_finished) == 0:
break
if len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, mask_padding, n_mel_channels,
n_symbols, symbols_embedding_dim, n_speakers, speakers_embedding_dim,
use_emotions, n_emotions, emotions_embedding_dim,
encoder_kernel_size, encoder_n_convolutions, encoder_embedding_dim,
attention_rnn_dim, attention_dim, attention_location_n_filters,
attention_location_kernel_size, n_frames_per_step,
decoder_rnn_dim, prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
postnet_embedding_dim, postnet_kernel_size,
postnet_n_convolutions, decoder_no_early_stopping, **kwargs):
super(Tacotron2, self).__init__()
self.mask_padding = mask_padding
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.symbols_embedding = nn.Embedding(
n_symbols, symbols_embedding_dim)
std = sqrt(2.0 / (n_symbols + symbols_embedding_dim))
val = sqrt(3.0) * std
self.symbols_embedding.weight.data.uniform_(-val, val)
self.speakers_embedding = nn.Embedding(n_speakers, speakers_embedding_dim)
torch.nn.init.xavier_uniform_(self.speakers_embedding.weight)
self.encoder = Encoder(encoder_n_convolutions,
encoder_embedding_dim,
encoder_kernel_size)
encoder_out_embedding_dim = encoder_embedding_dim + speakers_embedding_dim
self.use_emotions = use_emotions
if self.use_emotions:
self.emotions_embedding = nn.Embedding(n_emotions, emotions_embedding_dim)
torch.nn.init.xavier_uniform_(self.emotions_embedding.weight)
encoder_out_embedding_dim += emotions_embedding_dim
self.decoder = Decoder(n_mel_channels, n_frames_per_step,
encoder_out_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps,
gate_threshold, p_attention_dropout,
p_decoder_dropout,
not decoder_no_early_stopping)
self.postnet = Postnet(n_mel_channels, postnet_embedding_dim,
postnet_kernel_size,
postnet_n_convolutions)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, speaker_ids, emotion_ids = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
speaker_ids = to_gpu(speaker_ids).long()
emotion_ids = to_gpu(emotion_ids).long()
return ((text_padded, input_lengths, mel_padded, max_len, output_lengths, speaker_ids, emotion_ids),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3)
return outputs
def forward(self, inputs):
inputs, input_lengths, targets, max_len, output_lengths, speaker_ids, emotion_ids = inputs
input_lengths, output_lengths = input_lengths.data, output_lengths.data
outputs = []
embedded_inputs = self.symbols_embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, input_lengths)
outputs.append(encoder_outputs)
speaker_ids = speaker_ids.unsqueeze(1)
embedded_speakers = self.speakers_embedding(speaker_ids)
embedded_speakers = embedded_speakers.expand(-1, max_len, -1)
outputs.append(embedded_speakers)
if self.use_emotions:
emotion_ids = emotion_ids.unsqueeze(1)
embedded_emotions = self.emotions_embedding(emotion_ids)
embedded_emotions = embedded_emotions.expand(-1, max_len, -1)
outputs.append(embedded_emotions)
merged_outputs = torch.cat(outputs, -1)
mel_outputs, gate_outputs, alignments = self.decoder(
merged_outputs, targets, memory_lengths=input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def infer(self, input, speaker_id, emotion_id=None):
outputs = []
embedded_input = self.symbols_embedding(input).transpose(1, 2)
encoder_output = self.encoder.infer(embedded_input)
outputs.append(encoder_output)
speaker_id = speaker_id.unsqueeze(1)
embedded_speaker = self.speakers_embedding(speaker_id)
embedded_speaker = embedded_speaker.expand(-1, encoder_output.shape[1], -1)
outputs.append(embedded_speaker)
if self.use_emotions:
emotion_id = emotion_id.unsqueeze(1)
embedded_emotion = self.emotions_embedding(emotion_id)
embedded_emotion = embedded_emotion.expand(-1, encoder_output.shape[1], -1)
outputs.append(embedded_emotion)
merged_outputs = torch.cat(outputs, -1)
mel_outputs, gate_outputs, alignments = self.decoder.infer(
merged_outputs)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
return outputs
| true | true |
f730efb7bb6d6467c2a0411647710d26fbaf595b | 17,309 | py | Python | models/tacotron.py | peter3125/WaveRNN | ef34b9d91dfbff3197c8cc20d3ed272b222a5ec2 | [
"MIT"
] | null | null | null | models/tacotron.py | peter3125/WaveRNN | ef34b9d91dfbff3197c8cc20d3ed272b222a5ec2 | [
"MIT"
] | null | null | null | models/tacotron.py | peter3125/WaveRNN | ef34b9d91dfbff3197c8cc20d3ed272b222a5ec2 | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class HighwayNetwork(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.)
def forward(self, x):
x1 = self.W1(x)
x2 = self.W2(x)
g = torch.sigmoid(x2)
y = g * F.relu(x1) + (1. - g) * x
return y
class Encoder(nn.Module):
def __init__(self, embed_dims, num_chars, cbhg_channels, K, num_highways, dropout):
super().__init__()
self.embedding = nn.Embedding(num_chars, embed_dims)
self.pre_net = PreNet(embed_dims)
self.cbhg = CBHG(K=K, in_channels=cbhg_channels, channels=cbhg_channels,
proj_channels=[cbhg_channels, cbhg_channels],
num_highways=num_highways)
def forward(self, x):
x = self.embedding(x)
x = self.pre_net(x)
x.transpose_(1, 2)
x = self.cbhg(x)
return x
class BatchNormConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel, relu=True):
super().__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel, stride=1, padding=kernel // 2, bias=False)
self.bnorm = nn.BatchNorm1d(out_channels)
self.relu = relu
def forward(self, x):
x = self.conv(x)
x = F.relu(x) if self.relu is True else x
return self.bnorm(x)
class CBHG(nn.Module):
def __init__(self, K, in_channels, channels, proj_channels, num_highways):
super().__init__()
self.bank_kernels = [i for i in range(1, K + 1)]
self.conv1d_bank = nn.ModuleList()
for k in self.bank_kernels:
conv = BatchNormConv(in_channels, channels, k)
self.conv1d_bank.append(conv)
self.maxpool = nn.MaxPool1d(kernel_size=2, stride=1, padding=1)
self.conv_project1 = BatchNormConv(len(self.bank_kernels) * channels, proj_channels[0], 3)
self.conv_project2 = BatchNormConv(proj_channels[0], proj_channels[1], 3, relu=False)
# Fix the highway input if necessary
if proj_channels[-1] != channels:
self.highway_mismatch = True
self.pre_highway = nn.Linear(proj_channels[-1], channels, bias=False)
else:
self.highway_mismatch = False
self.highways = nn.ModuleList()
for i in range(num_highways):
hn = HighwayNetwork(channels)
self.highways.append(hn)
self.rnn = nn.GRU(channels, channels, batch_first=True, bidirectional=True)
def forward(self, x):
# Save these for later
residual = x
seq_len = x.size(-1)
conv_bank = []
# Convolution Bank
for conv in self.conv1d_bank:
c = conv(x) # Convolution
conv_bank.append(c[:, :, :seq_len])
# Stack along the channel axis
conv_bank = torch.cat(conv_bank, dim=1)
# dump the last padding to fit residual
x = self.maxpool(conv_bank)[:, :, :seq_len]
# Conv1d projections
x = self.conv_project1(x)
x = self.conv_project2(x)
# Residual Connect
x = x + residual
# Through the highways
x = x.transpose(1, 2)
if self.highway_mismatch is True:
x = self.pre_highway(x)
for h in self.highways: x = h(x)
# And then the RNN
x, _ = self.rnn(x)
return x
class PreNet(nn.Module):
def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5):
super().__init__()
self.fc1 = nn.Linear(in_dims, fc1_dims)
self.fc2 = nn.Linear(fc1_dims, fc2_dims)
self.p = dropout
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=self.training)
x = self.fc2(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=self.training)
return x
class Attention(nn.Module):
def __init__(self, attn_dims):
super().__init__()
self.W = nn.Linear(attn_dims, attn_dims, bias=False)
self.v = nn.Linear(attn_dims, 1, bias=False)
def forward(self, encoder_seq_proj, query, t):
# print(encoder_seq_proj.shape)
# Transform the query vector
query_proj = self.W(query).unsqueeze(1)
# Compute the scores
u = self.v(torch.tanh(encoder_seq_proj + query_proj))
scores = F.softmax(u, dim=1)
return scores.transpose(1, 2)
class LSA(nn.Module):
def __init__(self, attn_dim, kernel_size=31, filters=32):
super().__init__()
self.conv = nn.Conv1d(2, filters, padding=(kernel_size - 1) // 2, kernel_size=kernel_size, bias=False)
self.L = nn.Linear(filters, attn_dim, bias=True)
self.W = nn.Linear(attn_dim, attn_dim, bias=True)
self.v = nn.Linear(attn_dim, 1, bias=False)
self.cumulative = None
self.attention = None
def init_attention(self, encoder_seq_proj):
device = next(self.parameters()).device # use same device as parameters
b, t, c = encoder_seq_proj.size()
self.cumulative = torch.zeros(b, t, device=device)
self.attention = torch.zeros(b, t, device=device)
def forward(self, encoder_seq_proj, query, t):
if t == 0: self.init_attention(encoder_seq_proj)
processed_query = self.W(query).unsqueeze(1)
location = torch.cat([self.cumulative.unsqueeze(1), self.attention.unsqueeze(1)], dim=1)
processed_loc = self.L(self.conv(location).transpose(1, 2))
u = self.v(torch.tanh(processed_query + encoder_seq_proj + processed_loc))
u = u.squeeze(-1)
# Smooth Attention
scores = torch.sigmoid(u) / torch.sigmoid(u).sum(dim=1, keepdim=True)
# scores = F.softmax(u, dim=1)
self.attention = scores
self.cumulative += self.attention
return scores.unsqueeze(-1).transpose(1, 2)
class Decoder(nn.Module):
def __init__(self, n_mels, decoder_dims, lstm_dims):
super().__init__()
self.max_r = 20
self.r = None
self.generating = False
self.n_mels = n_mels
self.prenet = PreNet(n_mels)
self.attn_net = LSA(decoder_dims)
self.attn_rnn = nn.GRUCell(decoder_dims + decoder_dims // 2, decoder_dims)
self.rnn_input = nn.Linear(2 * decoder_dims, lstm_dims)
self.res_rnn1 = nn.LSTMCell(lstm_dims, lstm_dims)
self.res_rnn2 = nn.LSTMCell(lstm_dims, lstm_dims)
self.mel_proj = nn.Linear(lstm_dims, n_mels * self.max_r, bias=False)
def zoneout(self, prev, current, p=0.1):
device = prev.device
assert prev.device == current.device
mask = torch.zeros(prev.size(), device=device).bernoulli_(p)
return prev * mask + current * (1 - mask)
def forward(self, encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t):
# Need this for reshaping mels
batch_size = encoder_seq.size(0)
# Unpack the hidden and cell states
attn_hidden, rnn1_hidden, rnn2_hidden = hidden_states
rnn1_cell, rnn2_cell = cell_states
# PreNet for the Attention RNN
prenet_out = self.prenet(prenet_in)
# Compute the Attention RNN hidden state
attn_rnn_in = torch.cat([context_vec, prenet_out], dim=-1)
attn_hidden = self.attn_rnn(attn_rnn_in.squeeze(1), attn_hidden)
# Compute the attention scores
scores = self.attn_net(encoder_seq_proj, attn_hidden, t)
# Dot product to create the context vector
context_vec = scores @ encoder_seq
context_vec = context_vec.squeeze(1)
# Concat Attention RNN output w. Context Vector & project
x = torch.cat([context_vec, attn_hidden], dim=1)
x = self.rnn_input(x)
# Compute first Residual RNN
rnn1_hidden_next, rnn1_cell = self.res_rnn1(x, (rnn1_hidden, rnn1_cell))
if not self.generating:
rnn1_hidden = self.zoneout(rnn1_hidden, rnn1_hidden_next)
else:
rnn1_hidden = rnn1_hidden_next
x = x + rnn1_hidden
# Compute second Residual RNN
rnn2_hidden_next, rnn2_cell = self.res_rnn2(x, (rnn2_hidden, rnn2_cell))
if not self.generating:
rnn2_hidden = self.zoneout(rnn2_hidden, rnn2_hidden_next)
else:
rnn2_hidden = rnn2_hidden_next
x = x + rnn2_hidden
# Project Mels
mels = self.mel_proj(x)
mels = mels.view(batch_size, self.n_mels, self.max_r)[:, :, :self.r]
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
cell_states = (rnn1_cell, rnn2_cell)
return mels, scores, hidden_states, cell_states, context_vec
class Tacotron(nn.Module):
def __init__(self, embed_dims, num_chars, encoder_dims, decoder_dims, n_mels, fft_bins, postnet_dims,
encoder_K, lstm_dims, postnet_K, num_highways, dropout):
super().__init__()
self.n_mels = n_mels
self.lstm_dims = lstm_dims
self.decoder_dims = decoder_dims
self.encoder = Encoder(embed_dims, num_chars, encoder_dims,
encoder_K, num_highways, dropout)
self.encoder_proj = nn.Linear(decoder_dims, decoder_dims, bias=False)
self.decoder = Decoder(n_mels, decoder_dims, lstm_dims)
self.postnet = CBHG(postnet_K, n_mels, postnet_dims, [256, 80], num_highways)
self.post_proj = nn.Linear(postnet_dims * 2, fft_bins, bias=False)
self.init_model()
self.num_params()
# Unfortunately I have to put these settings into params in order to save
# if anyone knows a better way of doing this please open an issue in the repo
self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False)
self.r = nn.Parameter(torch.tensor(0).long(), requires_grad=False)
def set_r(self, r):
self.r.data = torch.tensor(r)
self.decoder.r = r
def get_r(self):
return self.r.item()
def forward(self, x, m, generate_gta=False):
device = next(self.parameters()).device # use same device as parameters
self.step += 1
if generate_gta:
self.encoder.eval()
self.postnet.eval()
self.decoder.generating = True
else:
self.encoder.train()
self.postnet.train()
self.decoder.generating = False
batch_size, _, steps = m.size()
# Initialise all hidden states and pack into tuple
attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device)
rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
# Initialise all lstm cell states and pack into tuple
rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
cell_states = (rnn1_cell, rnn2_cell)
# <GO> Frame for start of decoder loop
go_frame = torch.zeros(batch_size, self.n_mels, device=device)
# Need an initial context vector
context_vec = torch.zeros(batch_size, self.decoder_dims, device=device)
# Project the encoder outputs to avoid
# unnecessary matmuls in the decoder loop
encoder_seq = self.encoder(x)
encoder_seq_proj = self.encoder_proj(encoder_seq)
# Need a couple of lists for outputs
mel_outputs, attn_scores = [], []
# Run the decoder loop
for t in range(0, steps, self.r):
prenet_in = m[:, :, t - 1] if t > 0 else go_frame
mel_frames, scores, hidden_states, cell_states, context_vec = \
self.decoder(encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t)
mel_outputs.append(mel_frames)
attn_scores.append(scores)
# Concat the mel outputs into sequence
mel_outputs = torch.cat(mel_outputs, dim=2)
# Post-Process for Linear Spectrograms
postnet_out = self.postnet(mel_outputs)
linear = self.post_proj(postnet_out)
linear = linear.transpose(1, 2)
# For easy visualisation
attn_scores = torch.cat(attn_scores, 1)
attn_scores = attn_scores.cpu().data.numpy()
return mel_outputs, linear, attn_scores
def generate(self, x, steps=2000):
device = next(self.parameters()).device # use same device as parameters
self.encoder.eval()
self.postnet.eval()
self.decoder.generating = True
batch_size = 1
x = torch.as_tensor(x, dtype=torch.long, device=device).unsqueeze(0)
# Need to initialise all hidden states and pack into tuple for tidyness
attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device)
rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
# Need to initialise all lstm cell states and pack into tuple for tidyness
rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
cell_states = (rnn1_cell, rnn2_cell)
# Need a <GO> Frame for start of decoder loop
go_frame = torch.zeros(batch_size, self.n_mels, device=device)
# Need an initial context vector
context_vec = torch.zeros(batch_size, self.decoder_dims, device=device)
# Project the encoder outputs to avoid
# unnecessary matmuls in the decoder loop
encoder_seq = self.encoder(x)
encoder_seq_proj = self.encoder_proj(encoder_seq)
# Need a couple of lists for outputs
mel_outputs, attn_scores = [], []
# Run the decoder loop
for t in range(0, steps, self.r):
prenet_in = mel_outputs[-1][:, :, -1] if t > 0 else go_frame
mel_frames, scores, hidden_states, cell_states, context_vec = \
self.decoder(encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t)
mel_outputs.append(mel_frames)
attn_scores.append(scores)
# Stop the loop if silent frames present
if (mel_frames < -3.8).all() and t > 10: break
# Concat the mel outputs into sequence
mel_outputs = torch.cat(mel_outputs, dim=2)
# Post-Process for Linear Spectrograms
postnet_out = self.postnet(mel_outputs)
linear = self.post_proj(postnet_out)
linear = linear.transpose(1, 2)[0].cpu().data.numpy()
mel_outputs = mel_outputs[0].cpu().data.numpy()
# For easy visualisation
attn_scores = torch.cat(attn_scores, 1)
attn_scores = attn_scores.cpu().data.numpy()[0]
self.encoder.train()
self.postnet.train()
self.decoder.generating = False
return mel_outputs, linear, attn_scores
def init_model(self):
for p in self.parameters():
if p.dim() > 1: nn.init.xavier_uniform_(p)
def get_step(self):
return self.step.data.item()
def reset_step(self):
self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False)
def checkpoint(self, path):
k_steps = self.get_step() // 1000
self.save(f'{path}/checkpoint_{k_steps}k_steps.pyt')
def log(self, path, msg):
with open(path, 'a') as f:
print(msg, file=f)
def restore(self, path):
if not os.path.exists(path):
print('\nNew Tacotron Training Session...\n')
self.save(path)
else:
print(f'\nLoading Weights: "{path}"\n')
self.load(path)
self.decoder.r = self.r.item()
def load(self, path, device='cpu'):
# because PyTorch places on CPU by default, we follow those semantics by using CPU as default.
print("models/tacotron.py loading {}".format(path))
self.load_state_dict(torch.load(path, map_location=device), strict=False)
def save(self, path):
torch.save(self.state_dict(), path)
def num_params(self, print_out=True):
parameters = filter(lambda p: p.requires_grad, self.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out:
print('Trainable Parameters: %.3fM' % parameters)
| 37.223656 | 110 | 0.60847 | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class HighwayNetwork(nn.Module):
def __init__(self, size):
super().__init__()
self.W1 = nn.Linear(size, size)
self.W2 = nn.Linear(size, size)
self.W1.bias.data.fill_(0.)
def forward(self, x):
x1 = self.W1(x)
x2 = self.W2(x)
g = torch.sigmoid(x2)
y = g * F.relu(x1) + (1. - g) * x
return y
class Encoder(nn.Module):
def __init__(self, embed_dims, num_chars, cbhg_channels, K, num_highways, dropout):
super().__init__()
self.embedding = nn.Embedding(num_chars, embed_dims)
self.pre_net = PreNet(embed_dims)
self.cbhg = CBHG(K=K, in_channels=cbhg_channels, channels=cbhg_channels,
proj_channels=[cbhg_channels, cbhg_channels],
num_highways=num_highways)
def forward(self, x):
x = self.embedding(x)
x = self.pre_net(x)
x.transpose_(1, 2)
x = self.cbhg(x)
return x
class BatchNormConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel, relu=True):
super().__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel, stride=1, padding=kernel // 2, bias=False)
self.bnorm = nn.BatchNorm1d(out_channels)
self.relu = relu
def forward(self, x):
x = self.conv(x)
x = F.relu(x) if self.relu is True else x
return self.bnorm(x)
class CBHG(nn.Module):
def __init__(self, K, in_channels, channels, proj_channels, num_highways):
super().__init__()
self.bank_kernels = [i for i in range(1, K + 1)]
self.conv1d_bank = nn.ModuleList()
for k in self.bank_kernels:
conv = BatchNormConv(in_channels, channels, k)
self.conv1d_bank.append(conv)
self.maxpool = nn.MaxPool1d(kernel_size=2, stride=1, padding=1)
self.conv_project1 = BatchNormConv(len(self.bank_kernels) * channels, proj_channels[0], 3)
self.conv_project2 = BatchNormConv(proj_channels[0], proj_channels[1], 3, relu=False)
if proj_channels[-1] != channels:
self.highway_mismatch = True
self.pre_highway = nn.Linear(proj_channels[-1], channels, bias=False)
else:
self.highway_mismatch = False
self.highways = nn.ModuleList()
for i in range(num_highways):
hn = HighwayNetwork(channels)
self.highways.append(hn)
self.rnn = nn.GRU(channels, channels, batch_first=True, bidirectional=True)
def forward(self, x):
residual = x
seq_len = x.size(-1)
conv_bank = []
for conv in self.conv1d_bank:
c = conv(x)
conv_bank.append(c[:, :, :seq_len])
conv_bank = torch.cat(conv_bank, dim=1)
x = self.maxpool(conv_bank)[:, :, :seq_len]
x = self.conv_project1(x)
x = self.conv_project2(x)
x = x + residual
x = x.transpose(1, 2)
if self.highway_mismatch is True:
x = self.pre_highway(x)
for h in self.highways: x = h(x)
x, _ = self.rnn(x)
return x
class PreNet(nn.Module):
def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5):
super().__init__()
self.fc1 = nn.Linear(in_dims, fc1_dims)
self.fc2 = nn.Linear(fc1_dims, fc2_dims)
self.p = dropout
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=self.training)
x = self.fc2(x)
x = F.relu(x)
x = F.dropout(x, self.p, training=self.training)
return x
class Attention(nn.Module):
def __init__(self, attn_dims):
super().__init__()
self.W = nn.Linear(attn_dims, attn_dims, bias=False)
self.v = nn.Linear(attn_dims, 1, bias=False)
def forward(self, encoder_seq_proj, query, t):
query_proj = self.W(query).unsqueeze(1)
u = self.v(torch.tanh(encoder_seq_proj + query_proj))
scores = F.softmax(u, dim=1)
return scores.transpose(1, 2)
class LSA(nn.Module):
def __init__(self, attn_dim, kernel_size=31, filters=32):
super().__init__()
self.conv = nn.Conv1d(2, filters, padding=(kernel_size - 1) // 2, kernel_size=kernel_size, bias=False)
self.L = nn.Linear(filters, attn_dim, bias=True)
self.W = nn.Linear(attn_dim, attn_dim, bias=True)
self.v = nn.Linear(attn_dim, 1, bias=False)
self.cumulative = None
self.attention = None
def init_attention(self, encoder_seq_proj):
device = next(self.parameters()).device
b, t, c = encoder_seq_proj.size()
self.cumulative = torch.zeros(b, t, device=device)
self.attention = torch.zeros(b, t, device=device)
def forward(self, encoder_seq_proj, query, t):
if t == 0: self.init_attention(encoder_seq_proj)
processed_query = self.W(query).unsqueeze(1)
location = torch.cat([self.cumulative.unsqueeze(1), self.attention.unsqueeze(1)], dim=1)
processed_loc = self.L(self.conv(location).transpose(1, 2))
u = self.v(torch.tanh(processed_query + encoder_seq_proj + processed_loc))
u = u.squeeze(-1)
scores = torch.sigmoid(u) / torch.sigmoid(u).sum(dim=1, keepdim=True)
self.attention = scores
self.cumulative += self.attention
return scores.unsqueeze(-1).transpose(1, 2)
class Decoder(nn.Module):
def __init__(self, n_mels, decoder_dims, lstm_dims):
super().__init__()
self.max_r = 20
self.r = None
self.generating = False
self.n_mels = n_mels
self.prenet = PreNet(n_mels)
self.attn_net = LSA(decoder_dims)
self.attn_rnn = nn.GRUCell(decoder_dims + decoder_dims // 2, decoder_dims)
self.rnn_input = nn.Linear(2 * decoder_dims, lstm_dims)
self.res_rnn1 = nn.LSTMCell(lstm_dims, lstm_dims)
self.res_rnn2 = nn.LSTMCell(lstm_dims, lstm_dims)
self.mel_proj = nn.Linear(lstm_dims, n_mels * self.max_r, bias=False)
def zoneout(self, prev, current, p=0.1):
device = prev.device
assert prev.device == current.device
mask = torch.zeros(prev.size(), device=device).bernoulli_(p)
return prev * mask + current * (1 - mask)
def forward(self, encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t):
batch_size = encoder_seq.size(0)
attn_hidden, rnn1_hidden, rnn2_hidden = hidden_states
rnn1_cell, rnn2_cell = cell_states
prenet_out = self.prenet(prenet_in)
attn_rnn_in = torch.cat([context_vec, prenet_out], dim=-1)
attn_hidden = self.attn_rnn(attn_rnn_in.squeeze(1), attn_hidden)
scores = self.attn_net(encoder_seq_proj, attn_hidden, t)
context_vec = scores @ encoder_seq
context_vec = context_vec.squeeze(1)
x = torch.cat([context_vec, attn_hidden], dim=1)
x = self.rnn_input(x)
rnn1_hidden_next, rnn1_cell = self.res_rnn1(x, (rnn1_hidden, rnn1_cell))
if not self.generating:
rnn1_hidden = self.zoneout(rnn1_hidden, rnn1_hidden_next)
else:
rnn1_hidden = rnn1_hidden_next
x = x + rnn1_hidden
rnn2_hidden_next, rnn2_cell = self.res_rnn2(x, (rnn2_hidden, rnn2_cell))
if not self.generating:
rnn2_hidden = self.zoneout(rnn2_hidden, rnn2_hidden_next)
else:
rnn2_hidden = rnn2_hidden_next
x = x + rnn2_hidden
mels = self.mel_proj(x)
mels = mels.view(batch_size, self.n_mels, self.max_r)[:, :, :self.r]
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
cell_states = (rnn1_cell, rnn2_cell)
return mels, scores, hidden_states, cell_states, context_vec
class Tacotron(nn.Module):
def __init__(self, embed_dims, num_chars, encoder_dims, decoder_dims, n_mels, fft_bins, postnet_dims,
encoder_K, lstm_dims, postnet_K, num_highways, dropout):
super().__init__()
self.n_mels = n_mels
self.lstm_dims = lstm_dims
self.decoder_dims = decoder_dims
self.encoder = Encoder(embed_dims, num_chars, encoder_dims,
encoder_K, num_highways, dropout)
self.encoder_proj = nn.Linear(decoder_dims, decoder_dims, bias=False)
self.decoder = Decoder(n_mels, decoder_dims, lstm_dims)
self.postnet = CBHG(postnet_K, n_mels, postnet_dims, [256, 80], num_highways)
self.post_proj = nn.Linear(postnet_dims * 2, fft_bins, bias=False)
self.init_model()
self.num_params()
self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False)
self.r = nn.Parameter(torch.tensor(0).long(), requires_grad=False)
def set_r(self, r):
self.r.data = torch.tensor(r)
self.decoder.r = r
def get_r(self):
return self.r.item()
def forward(self, x, m, generate_gta=False):
device = next(self.parameters()).device
self.step += 1
if generate_gta:
self.encoder.eval()
self.postnet.eval()
self.decoder.generating = True
else:
self.encoder.train()
self.postnet.train()
self.decoder.generating = False
batch_size, _, steps = m.size()
attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device)
rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
cell_states = (rnn1_cell, rnn2_cell)
go_frame = torch.zeros(batch_size, self.n_mels, device=device)
context_vec = torch.zeros(batch_size, self.decoder_dims, device=device)
encoder_seq = self.encoder(x)
encoder_seq_proj = self.encoder_proj(encoder_seq)
mel_outputs, attn_scores = [], []
for t in range(0, steps, self.r):
prenet_in = m[:, :, t - 1] if t > 0 else go_frame
mel_frames, scores, hidden_states, cell_states, context_vec = \
self.decoder(encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t)
mel_outputs.append(mel_frames)
attn_scores.append(scores)
mel_outputs = torch.cat(mel_outputs, dim=2)
postnet_out = self.postnet(mel_outputs)
linear = self.post_proj(postnet_out)
linear = linear.transpose(1, 2)
attn_scores = torch.cat(attn_scores, 1)
attn_scores = attn_scores.cpu().data.numpy()
return mel_outputs, linear, attn_scores
def generate(self, x, steps=2000):
device = next(self.parameters()).device
self.encoder.eval()
self.postnet.eval()
self.decoder.generating = True
batch_size = 1
x = torch.as_tensor(x, dtype=torch.long, device=device).unsqueeze(0)
attn_hidden = torch.zeros(batch_size, self.decoder_dims, device=device)
rnn1_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_hidden = torch.zeros(batch_size, self.lstm_dims, device=device)
hidden_states = (attn_hidden, rnn1_hidden, rnn2_hidden)
rnn1_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
rnn2_cell = torch.zeros(batch_size, self.lstm_dims, device=device)
cell_states = (rnn1_cell, rnn2_cell)
go_frame = torch.zeros(batch_size, self.n_mels, device=device)
context_vec = torch.zeros(batch_size, self.decoder_dims, device=device)
encoder_seq = self.encoder(x)
encoder_seq_proj = self.encoder_proj(encoder_seq)
mel_outputs, attn_scores = [], []
for t in range(0, steps, self.r):
prenet_in = mel_outputs[-1][:, :, -1] if t > 0 else go_frame
mel_frames, scores, hidden_states, cell_states, context_vec = \
self.decoder(encoder_seq, encoder_seq_proj, prenet_in,
hidden_states, cell_states, context_vec, t)
mel_outputs.append(mel_frames)
attn_scores.append(scores)
if (mel_frames < -3.8).all() and t > 10: break
mel_outputs = torch.cat(mel_outputs, dim=2)
postnet_out = self.postnet(mel_outputs)
linear = self.post_proj(postnet_out)
linear = linear.transpose(1, 2)[0].cpu().data.numpy()
mel_outputs = mel_outputs[0].cpu().data.numpy()
attn_scores = torch.cat(attn_scores, 1)
attn_scores = attn_scores.cpu().data.numpy()[0]
self.encoder.train()
self.postnet.train()
self.decoder.generating = False
return mel_outputs, linear, attn_scores
def init_model(self):
for p in self.parameters():
if p.dim() > 1: nn.init.xavier_uniform_(p)
def get_step(self):
return self.step.data.item()
def reset_step(self):
self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False)
def checkpoint(self, path):
k_steps = self.get_step() // 1000
self.save(f'{path}/checkpoint_{k_steps}k_steps.pyt')
def log(self, path, msg):
with open(path, 'a') as f:
print(msg, file=f)
def restore(self, path):
if not os.path.exists(path):
print('\nNew Tacotron Training Session...\n')
self.save(path)
else:
print(f'\nLoading Weights: "{path}"\n')
self.load(path)
self.decoder.r = self.r.item()
def load(self, path, device='cpu'):
print("models/tacotron.py loading {}".format(path))
self.load_state_dict(torch.load(path, map_location=device), strict=False)
def save(self, path):
torch.save(self.state_dict(), path)
def num_params(self, print_out=True):
parameters = filter(lambda p: p.requires_grad, self.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out:
print('Trainable Parameters: %.3fM' % parameters)
| true | true |
f730efec7b67472ded36ecfb1b4ff7670ef81c41 | 849 | py | Python | slixmpp/xmlstream/handler/xmlwaiter.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 86 | 2016-07-04T13:26:02.000Z | 2022-02-19T10:26:21.000Z | slixmpp/xmlstream/handler/xmlwaiter.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 10 | 2016-09-30T18:55:41.000Z | 2020-05-01T14:22:47.000Z | slixmpp/xmlstream/handler/xmlwaiter.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 45 | 2016-09-30T18:48:41.000Z | 2022-03-18T21:39:33.000Z |
# Slixmpp: The Slick XMPP Library
# Copyright (C) 2010 Nathanael C. Fritz
# This file is part of Slixmpp.
# See the file LICENSE for copying permission.
from slixmpp.xmlstream.stanzabase import StanzaBase
from slixmpp.xmlstream.handler import Waiter
class XMLWaiter(Waiter):
"""
The XMLWaiter class is identical to the normal Waiter class
except that it returns the XML contents of the stanza instead
of the full stanza object itself.
Methods:
prerun -- Overrides Waiter.prerun
"""
def prerun(self, payload: StanzaBase) -> None:
"""
Store the XML contents of the stanza to return to the
waiting event handler.
Overrides Waiter.prerun
Arguments:
payload -- The matched stanza object.
"""
Waiter.prerun(self, payload.xml) # type: ignore
| 26.53125 | 65 | 0.674912 |
from slixmpp.xmlstream.stanzabase import StanzaBase
from slixmpp.xmlstream.handler import Waiter
class XMLWaiter(Waiter):
def prerun(self, payload: StanzaBase) -> None:
Waiter.prerun(self, payload.xml)
| true | true |
f730f06f9acee549a188aa709f31f37e19a2c108 | 8,731 | py | Python | tensorpack/RL/expreplay.py | skoppula/ternarynet | 1a67251f7f5a1cdf854f87f90f841655c7c9f11c | [
"Apache-2.0"
] | 109 | 2017-01-16T23:55:36.000Z | 2021-08-31T17:48:08.000Z | f1/F1_track1/tensorpack/RL/expreplay.py | mihahauke/vizdoom_cig2017 | 42baffa7c6ee43db618605838ea6f9e0547001d1 | [
"MIT"
] | 19 | 2018-01-28T23:05:33.000Z | 2022-03-11T23:14:57.000Z | tensorpack/RL/expreplay.py | czhu95/ternarynet | 1a67251f7f5a1cdf854f87f90f841655c7c9f11c | [
"Apache-2.0"
] | 46 | 2017-01-23T07:35:10.000Z | 2021-12-26T13:52:19.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# File: expreplay.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
from collections import deque, namedtuple
import threading
from tqdm import tqdm
import six
from six.moves import queue
from ..dataflow import DataFlow
from ..utils import *
from ..utils.concurrency import LoopThread
from ..callbacks.base import Callback
__all__ = ['ExpReplay']
Experience = namedtuple('Experience',
['state', 'action', 'reward', 'isOver'])
class ExpReplay(DataFlow, Callback):
"""
Implement experience replay in the paper
`Human-level control through deep reinforcement learning`.
This implementation provides the interface as an DataFlow.
This DataFlow is not fork-safe (doesn't support multiprocess prefetching)
"""
def __init__(self,
predictor_io_names,
player,
batch_size=32,
memory_size=1e6,
populate_size=None, # deprecated
init_memory_size=50000,
exploration=1,
end_exploration=0.1,
exploration_epoch_anneal=0.002,
reward_clip=None,
update_frequency=1,
history_len=1
):
"""
:param predictor: a callabale running the up-to-date network.
called with a state, return a distribution.
:param player: an `RLEnvironment`
:param history_len: length of history frames to concat. zero-filled initial frames
:param update_frequency: number of new transitions to add to memory
after sampling a batch of transitions for training
"""
# XXX back-compat
if populate_size is not None:
logger.warn("populate_size in ExpReplay is deprecated in favor of init_memory_size")
init_memory_size = populate_size
init_memory_size = int(init_memory_size)
for k, v in locals().items():
if k != 'self':
setattr(self, k, v)
self.num_actions = player.get_action_space().num_actions()
logger.info("Number of Legal actions: {}".format(self.num_actions))
self.mem = deque(maxlen=memory_size)
self.rng = get_rng(self)
self._init_memory_flag = threading.Event() # tell if memory has been initialized
self._predictor_io_names = predictor_io_names
def _init_memory(self):
logger.info("Populating replay memory...")
# fill some for the history
old_exploration = self.exploration
self.exploration = 1
for k in range(self.history_len):
self._populate_exp()
self.exploration = old_exploration
with tqdm(total=self.init_memory_size) as pbar:
while len(self.mem) < self.init_memory_size:
#from copy import deepcopy # quickly fill the memory for debug
#self.mem.append(deepcopy(self.mem[0]))
self._populate_exp()
pbar.update()
self._init_memory_flag.set()
def _populate_exp(self):
""" populate a transition by epsilon-greedy"""
old_s = self.player.current_state()
if self.rng.rand() <= self.exploration:
act = self.rng.choice(range(self.num_actions))
else:
# build a history state
# XXX assume a state can be representated by one tensor
ss = [old_s]
isOver = False
for k in range(1, self.history_len):
hist_exp = self.mem[-k]
if hist_exp.isOver:
isOver = True
if isOver:
ss.append(np.zeros_like(ss[0]))
else:
ss.append(hist_exp.state)
ss.reverse()
ss = np.concatenate(ss, axis=2)
# XXX assume batched network
q_values = self.predictor([[ss]])[0][0]
act = np.argmax(q_values)
reward, isOver = self.player.action(act)
if self.reward_clip:
reward = np.clip(reward, self.reward_clip[0], self.reward_clip[1])
self.mem.append(Experience(old_s, act, reward, isOver))
def get_data(self):
self._init_memory_flag.wait()
# new s is considered useless if isOver==True
while True:
batch_exp = [self._sample_one() for _ in range(self.batch_size)]
#import cv2 # for debug
#def view_state(state, next_state):
#""" for debugging state representation"""
#r = np.concatenate([state[:,:,k] for k in range(self.history_len)], axis=1)
#r2 = np.concatenate([next_state[:,:,k] for k in range(self.history_len)], axis=1)
#r = np.concatenate([r, r2], axis=0)
#print r.shape
#cv2.imshow("state", r)
#cv2.waitKey()
#exp = batch_exp[0]
#print("Act: ", exp[3], " reward:", exp[2], " isOver: ", exp[4])
#if exp[2] or exp[4]:
#view_state(exp[0], exp[1])
yield self._process_batch(batch_exp)
self._populate_job_queue.put(1)
def _sample_one(self):
""" return the transition tuple for
[idx, idx+history_len) -> [idx+1, idx+1+history_len)
it's the transition from state idx+history_len-1 to state idx+history_len
"""
# look for a state to start with
# when x.isOver==True, (x+1).state is of a different episode
idx = self.rng.randint(len(self.mem) - self.history_len - 1)
samples = [self.mem[k] for k in range(idx, idx+self.history_len+1)]
def concat(idx):
v = [x.state for x in samples[idx:idx+self.history_len]]
return np.concatenate(v, axis=2)
state = concat(0)
next_state = concat(1)
start_mem = samples[-2]
reward, action, isOver = start_mem.reward, start_mem.action, start_mem.isOver
start_idx = self.history_len - 1
# zero-fill state before starting
zero_fill = False
for k in range(1, self.history_len):
if samples[start_idx-k].isOver:
zero_fill = True
if zero_fill:
state[:,:,-k-1] = 0
if k + 2 <= self.history_len:
next_state[:,:,-k-2] = 0
return (state, next_state, reward, action, isOver)
def _process_batch(self, batch_exp):
state = np.array([e[0] for e in batch_exp])
next_state = np.array([e[1] for e in batch_exp])
reward = np.array([e[2] for e in batch_exp])
action = np.array([e[3] for e in batch_exp], dtype='int8')
isOver = np.array([e[4] for e in batch_exp], dtype='bool')
return [state, action, reward, next_state, isOver]
def _setup_graph(self):
self.predictor = self.trainer.get_predict_func(*self._predictor_io_names)
# Callback-related:
def _before_train(self):
# spawn a separate thread to run policy, can speed up 1.3x
self._populate_job_queue = queue.Queue(maxsize=1)
def populate_job_func():
self._populate_job_queue.get()
with self.trainer.sess.as_default():
for _ in range(self.update_frequency):
self._populate_exp()
self._populate_job_th = LoopThread(populate_job_func, False)
self._populate_job_th.start()
self._init_memory()
def _trigger_epoch(self):
if self.exploration > self.end_exploration:
self.exploration -= self.exploration_epoch_anneal
logger.info("Exploration changed to {}".format(self.exploration))
# log player statistics
stats = self.player.stats
for k, v in six.iteritems(stats):
try:
mean, max = np.mean(v), np.max(v)
self.trainer.write_scalar_summary('expreplay/mean_' + k, mean)
self.trainer.write_scalar_summary('expreplay/max_' + k, max)
except:
pass
self.player.reset_stat()
if __name__ == '__main__':
from .atari import AtariPlayer
import sys
predictor = lambda x: np.array([1,1,1,1])
player = AtariPlayer(sys.argv[1], viz=0, frame_skip=10, height_range=(36, 204))
E = ExpReplay(predictor,
player=player,
num_actions=player.get_action_space().num_actions(),
populate_size=1001,
history_len=4)
E._init_memory()
for k in E.get_data():
import IPython as IP;
IP.embed(config=IP.terminal.ipapp.load_default_config())
pass
#import IPython;
#IPython.embed(config=IPython.terminal.ipapp.load_default_config())
#break
| 38.126638 | 98 | 0.593746 |
import numpy as np
from collections import deque, namedtuple
import threading
from tqdm import tqdm
import six
from six.moves import queue
from ..dataflow import DataFlow
from ..utils import *
from ..utils.concurrency import LoopThread
from ..callbacks.base import Callback
__all__ = ['ExpReplay']
Experience = namedtuple('Experience',
['state', 'action', 'reward', 'isOver'])
class ExpReplay(DataFlow, Callback):
def __init__(self,
predictor_io_names,
player,
batch_size=32,
memory_size=1e6,
populate_size=None,
init_memory_size=50000,
exploration=1,
end_exploration=0.1,
exploration_epoch_anneal=0.002,
reward_clip=None,
update_frequency=1,
history_len=1
):
if populate_size is not None:
logger.warn("populate_size in ExpReplay is deprecated in favor of init_memory_size")
init_memory_size = populate_size
init_memory_size = int(init_memory_size)
for k, v in locals().items():
if k != 'self':
setattr(self, k, v)
self.num_actions = player.get_action_space().num_actions()
logger.info("Number of Legal actions: {}".format(self.num_actions))
self.mem = deque(maxlen=memory_size)
self.rng = get_rng(self)
self._init_memory_flag = threading.Event()
self._predictor_io_names = predictor_io_names
def _init_memory(self):
logger.info("Populating replay memory...")
old_exploration = self.exploration
self.exploration = 1
for k in range(self.history_len):
self._populate_exp()
self.exploration = old_exploration
with tqdm(total=self.init_memory_size) as pbar:
while len(self.mem) < self.init_memory_size:
elf._populate_exp()
pbar.update()
self._init_memory_flag.set()
def _populate_exp(self):
old_s = self.player.current_state()
if self.rng.rand() <= self.exploration:
act = self.rng.choice(range(self.num_actions))
else:
ss = [old_s]
isOver = False
for k in range(1, self.history_len):
hist_exp = self.mem[-k]
if hist_exp.isOver:
isOver = True
if isOver:
ss.append(np.zeros_like(ss[0]))
else:
ss.append(hist_exp.state)
ss.reverse()
ss = np.concatenate(ss, axis=2)
q_values = self.predictor([[ss]])[0][0]
act = np.argmax(q_values)
reward, isOver = self.player.action(act)
if self.reward_clip:
reward = np.clip(reward, self.reward_clip[0], self.reward_clip[1])
self.mem.append(Experience(old_s, act, reward, isOver))
def get_data(self):
self._init_memory_flag.wait()
while True:
batch_exp = [self._sample_one() for _ in range(self.batch_size)]
yield self._process_batch(batch_exp)
self._populate_job_queue.put(1)
def _sample_one(self):
idx = self.rng.randint(len(self.mem) - self.history_len - 1)
samples = [self.mem[k] for k in range(idx, idx+self.history_len+1)]
def concat(idx):
v = [x.state for x in samples[idx:idx+self.history_len]]
return np.concatenate(v, axis=2)
state = concat(0)
next_state = concat(1)
start_mem = samples[-2]
reward, action, isOver = start_mem.reward, start_mem.action, start_mem.isOver
start_idx = self.history_len - 1
zero_fill = False
for k in range(1, self.history_len):
if samples[start_idx-k].isOver:
zero_fill = True
if zero_fill:
state[:,:,-k-1] = 0
if k + 2 <= self.history_len:
next_state[:,:,-k-2] = 0
return (state, next_state, reward, action, isOver)
def _process_batch(self, batch_exp):
state = np.array([e[0] for e in batch_exp])
next_state = np.array([e[1] for e in batch_exp])
reward = np.array([e[2] for e in batch_exp])
action = np.array([e[3] for e in batch_exp], dtype='int8')
isOver = np.array([e[4] for e in batch_exp], dtype='bool')
return [state, action, reward, next_state, isOver]
def _setup_graph(self):
self.predictor = self.trainer.get_predict_func(*self._predictor_io_names)
def _before_train(self):
self._populate_job_queue = queue.Queue(maxsize=1)
def populate_job_func():
self._populate_job_queue.get()
with self.trainer.sess.as_default():
for _ in range(self.update_frequency):
self._populate_exp()
self._populate_job_th = LoopThread(populate_job_func, False)
self._populate_job_th.start()
self._init_memory()
def _trigger_epoch(self):
if self.exploration > self.end_exploration:
self.exploration -= self.exploration_epoch_anneal
logger.info("Exploration changed to {}".format(self.exploration))
stats = self.player.stats
for k, v in six.iteritems(stats):
try:
mean, max = np.mean(v), np.max(v)
self.trainer.write_scalar_summary('expreplay/mean_' + k, mean)
self.trainer.write_scalar_summary('expreplay/max_' + k, max)
except:
pass
self.player.reset_stat()
if __name__ == '__main__':
from .atari import AtariPlayer
import sys
predictor = lambda x: np.array([1,1,1,1])
player = AtariPlayer(sys.argv[1], viz=0, frame_skip=10, height_range=(36, 204))
E = ExpReplay(predictor,
player=player,
num_actions=player.get_action_space().num_actions(),
populate_size=1001,
history_len=4)
E._init_memory()
for k in E.get_data():
import IPython as IP;
IP.embed(config=IP.terminal.ipapp.load_default_config())
pass
| true | true |
f730f172a583af4fe7d0cf23626ad4bacc89fb28 | 12,835 | py | Python | tests/plugins/test_plugins_manager.py | raspberryfield/airflow | 3eb67af7a3fab61b7f691da66129c228af27d95e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2021-07-30T16:46:46.000Z | 2021-10-19T07:18:47.000Z | tests/plugins/test_plugins_manager.py | raspberryfield/airflow | 3eb67af7a3fab61b7f691da66129c228af27d95e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2019-03-16T02:09:30.000Z | 2019-06-27T03:27:34.000Z | tests/plugins/test_plugins_manager.py | raspberryfield/airflow | 3eb67af7a3fab61b7f691da66129c228af27d95e | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-05-12T13:17:37.000Z | 2021-05-12T13:17:37.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import importlib
import logging
import sys
import unittest
from unittest import mock
from airflow.hooks.base import BaseHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import app as application
from tests.test_utils.mock_plugins import mock_plugin_manager
py39 = sys.version_info >= (3, 9)
importlib_metadata = 'importlib.metadata' if py39 else 'importlib_metadata'
class TestPluginsRBAC(unittest.TestCase):
def setUp(self):
self.app = application.create_app(testing=True)
self.appbuilder = self.app.appbuilder # pylint: disable=no-member
def test_flaskappbuilder_views(self):
from tests.plugins.test_plugin import v_appbuilder_package
appbuilder_class_name = str(v_appbuilder_package['view'].__class__.__name__)
plugin_views = [
view for view in self.appbuilder.baseviews if view.blueprint.name == appbuilder_class_name
]
assert len(plugin_views) == 1
# view should have a menu item matching category of v_appbuilder_package
links = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == v_appbuilder_package['category']
]
assert len(links) == 1
# menu link should also have a link matching the name of the package.
link = links[0]
assert link.name == v_appbuilder_package['category']
assert link.childs[0].name == v_appbuilder_package['name']
def test_flaskappbuilder_nomenu_views(self):
from tests.plugins.test_plugin import v_nomenu_appbuilder_package
class AirflowNoMenuViewsPlugin(AirflowPlugin):
appbuilder_views = [v_nomenu_appbuilder_package]
appbuilder_class_name = str(v_nomenu_appbuilder_package['view'].__class__.__name__)
with mock_plugin_manager(plugins=[AirflowNoMenuViewsPlugin()]):
appbuilder = application.create_app(testing=True).appbuilder # pylint: disable=no-member
plugin_views = [
view for view in appbuilder.baseviews if view.blueprint.name == appbuilder_class_name
]
assert len(plugin_views) == 1
def test_flaskappbuilder_menu_links(self):
from tests.plugins.test_plugin import appbuilder_mitem, appbuilder_mitem_toplevel
# menu item (category) should exist matching appbuilder_mitem.category
categories = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem['category']
]
assert len(categories) == 1
# menu link should be a child in the category
category = categories[0]
assert category.name == appbuilder_mitem['category']
assert category.childs[0].name == appbuilder_mitem['name']
assert category.childs[0].href == appbuilder_mitem['href']
# a top level link isn't nested in a category
top_levels = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem_toplevel['name']
]
assert len(top_levels) == 1
link = top_levels[0]
assert link.href == appbuilder_mitem_toplevel['href']
assert link.label == appbuilder_mitem_toplevel['label']
def test_app_blueprints(self):
from tests.plugins.test_plugin import bp
# Blueprint should be present in the app
assert 'test_plugin' in self.app.blueprints
assert self.app.blueprints['test_plugin'].name == bp.name
class TestPluginsManager:
def test_no_log_when_no_plugins(self, caplog):
with mock_plugin_manager(plugins=[]):
from airflow import plugins_manager
plugins_manager.ensure_plugins_loaded()
assert caplog.record_tuples == []
def test_should_load_plugins_from_property(self, caplog):
class AirflowTestPropertyPlugin(AirflowPlugin):
name = "test_property_plugin"
@property
def hooks(self):
class TestPropertyHook(BaseHook):
pass
return [TestPropertyHook]
with mock_plugin_manager(plugins=[AirflowTestPropertyPlugin()]):
from airflow import plugins_manager
caplog.set_level(logging.DEBUG, "airflow.plugins_manager")
plugins_manager.ensure_plugins_loaded()
assert 'AirflowTestPropertyPlugin' in str(plugins_manager.plugins)
assert 'TestPropertyHook' in str(plugins_manager.registered_hooks)
assert caplog.records[-1].levelname == 'DEBUG'
assert caplog.records[-1].msg == 'Loading %d plugin(s) took %.2f seconds'
def test_should_warning_about_incompatible_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == [
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_admin_views_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_menu_links_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
]
def test_should_not_warning_about_fab_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_should_not_warning_about_fab_and_flask_admin_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_entrypoint_plugin_errors_dont_raise_exceptions(self, caplog):
"""
Test that Airflow does not raise an error if there is any Exception because of a plugin.
"""
from airflow.plugins_manager import import_errors, load_entrypoint_plugins
mock_dist = mock.Mock()
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint'
mock_entrypoint.group = 'airflow.plugins'
mock_entrypoint.module = 'test.plugins.test_plugins_manager'
mock_entrypoint.load.side_effect = ImportError('my_fake_module not found')
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(f'{importlib_metadata}.distributions', return_value=[mock_dist]), caplog.at_level(
logging.ERROR, logger='airflow.plugins_manager'
):
load_entrypoint_plugins()
received_logs = caplog.text
# Assert Traceback is shown too
assert "Traceback (most recent call last):" in received_logs
assert "my_fake_module not found" in received_logs
assert "Failed to import plugin test-entrypoint" in received_logs
assert ("test.plugins.test_plugins_manager", "my_fake_module not found") in import_errors.items()
def test_registering_plugin_macros(self, request):
"""
Tests whether macros that originate from plugins are being registered correctly.
"""
from airflow import macros
from airflow.plugins_manager import integrate_macros_plugins
def cleanup_macros():
"""Reloads the airflow.macros module such that the symbol table is reset after the test."""
# We're explicitly deleting the module from sys.modules and importing it again
# using import_module() as opposed to using importlib.reload() because the latter
# does not undo the changes to the airflow.macros module that are being caused by
# invoking integrate_macros_plugins()
del sys.modules['airflow.macros']
importlib.import_module('airflow.macros')
request.addfinalizer(cleanup_macros)
def custom_macro():
return 'foo'
class MacroPlugin(AirflowPlugin):
name = 'macro_plugin'
macros = [custom_macro]
with mock_plugin_manager(plugins=[MacroPlugin()]):
# Ensure the macros for the plugin have been integrated.
integrate_macros_plugins()
# Test whether the modules have been created as expected.
plugin_macros = importlib.import_module(f"airflow.macros.{MacroPlugin.name}")
for macro in MacroPlugin.macros:
# Verify that the macros added by the plugin are being set correctly
# on the plugin's macro module.
assert hasattr(plugin_macros, macro.__name__)
# Verify that the symbol table in airflow.macros has been updated with an entry for
# this plugin, this is necessary in order to allow the plugin's macros to be used when
# rendering templates.
assert hasattr(macros, MacroPlugin.name)
class TestPluginsDirectorySource(unittest.TestCase):
def test_should_return_correct_path_name(self):
from airflow import plugins_manager
source = plugins_manager.PluginsDirectorySource(__file__)
assert "test_plugins_manager.py" == source.path
assert "$PLUGINS_FOLDER/test_plugins_manager.py" == str(source)
assert "<em>$PLUGINS_FOLDER/</em>test_plugins_manager.py" == source.__html__()
class TestEntryPointSource:
def test_should_return_correct_source_details(self):
from airflow import plugins_manager
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint-plugin'
mock_entrypoint.module = 'module_name_plugin'
mock_dist = mock.Mock()
mock_dist.metadata = {'name': 'test-entrypoint-plugin'}
mock_dist.version = '1.0.0'
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(f'{importlib_metadata}.distributions', return_value=[mock_dist]):
plugins_manager.load_entrypoint_plugins()
source = plugins_manager.EntryPointSource(mock_entrypoint, mock_dist)
assert str(mock_entrypoint) == source.entrypoint
assert "test-entrypoint-plugin==1.0.0: " + str(mock_entrypoint) == str(source)
assert "<em>test-entrypoint-plugin==1.0.0:</em> " + str(mock_entrypoint) == source.__html__()
| 40.109375 | 109 | 0.67651 |
import importlib
import logging
import sys
import unittest
from unittest import mock
from airflow.hooks.base import BaseHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import app as application
from tests.test_utils.mock_plugins import mock_plugin_manager
py39 = sys.version_info >= (3, 9)
importlib_metadata = 'importlib.metadata' if py39 else 'importlib_metadata'
class TestPluginsRBAC(unittest.TestCase):
def setUp(self):
self.app = application.create_app(testing=True)
self.appbuilder = self.app.appbuilder
def test_flaskappbuilder_views(self):
from tests.plugins.test_plugin import v_appbuilder_package
appbuilder_class_name = str(v_appbuilder_package['view'].__class__.__name__)
plugin_views = [
view for view in self.appbuilder.baseviews if view.blueprint.name == appbuilder_class_name
]
assert len(plugin_views) == 1
links = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == v_appbuilder_package['category']
]
assert len(links) == 1
link = links[0]
assert link.name == v_appbuilder_package['category']
assert link.childs[0].name == v_appbuilder_package['name']
def test_flaskappbuilder_nomenu_views(self):
from tests.plugins.test_plugin import v_nomenu_appbuilder_package
class AirflowNoMenuViewsPlugin(AirflowPlugin):
appbuilder_views = [v_nomenu_appbuilder_package]
appbuilder_class_name = str(v_nomenu_appbuilder_package['view'].__class__.__name__)
with mock_plugin_manager(plugins=[AirflowNoMenuViewsPlugin()]):
appbuilder = application.create_app(testing=True).appbuilder
plugin_views = [
view for view in appbuilder.baseviews if view.blueprint.name == appbuilder_class_name
]
assert len(plugin_views) == 1
def test_flaskappbuilder_menu_links(self):
from tests.plugins.test_plugin import appbuilder_mitem, appbuilder_mitem_toplevel
categories = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem['category']
]
assert len(categories) == 1
category = categories[0]
assert category.name == appbuilder_mitem['category']
assert category.childs[0].name == appbuilder_mitem['name']
assert category.childs[0].href == appbuilder_mitem['href']
top_levels = [
menu_item
for menu_item in self.appbuilder.menu.menu
if menu_item.name == appbuilder_mitem_toplevel['name']
]
assert len(top_levels) == 1
link = top_levels[0]
assert link.href == appbuilder_mitem_toplevel['href']
assert link.label == appbuilder_mitem_toplevel['label']
def test_app_blueprints(self):
from tests.plugins.test_plugin import bp
# Blueprint should be present in the app
assert 'test_plugin' in self.app.blueprints
assert self.app.blueprints['test_plugin'].name == bp.name
class TestPluginsManager:
def test_no_log_when_no_plugins(self, caplog):
with mock_plugin_manager(plugins=[]):
from airflow import plugins_manager
plugins_manager.ensure_plugins_loaded()
assert caplog.record_tuples == []
def test_should_load_plugins_from_property(self, caplog):
class AirflowTestPropertyPlugin(AirflowPlugin):
name = "test_property_plugin"
@property
def hooks(self):
class TestPropertyHook(BaseHook):
pass
return [TestPropertyHook]
with mock_plugin_manager(plugins=[AirflowTestPropertyPlugin()]):
from airflow import plugins_manager
caplog.set_level(logging.DEBUG, "airflow.plugins_manager")
plugins_manager.ensure_plugins_loaded()
assert 'AirflowTestPropertyPlugin' in str(plugins_manager.plugins)
assert 'TestPropertyHook' in str(plugins_manager.registered_hooks)
assert caplog.records[-1].levelname == 'DEBUG'
assert caplog.records[-1].msg == 'Loading %d plugin(s) took %.2f seconds'
def test_should_warning_about_incompatible_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == [
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_admin_views_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
(
"airflow.plugins_manager",
logging.WARNING,
"Plugin 'test_menu_links_plugin' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
),
]
def test_should_not_warning_about_fab_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_should_not_warning_about_fab_and_flask_admin_plugins(self, caplog):
class AirflowAdminViewsPlugin(AirflowPlugin):
name = "test_admin_views_plugin"
admin_views = [mock.MagicMock()]
appbuilder_views = [mock.MagicMock()]
class AirflowAdminMenuLinksPlugin(AirflowPlugin):
name = "test_menu_links_plugin"
menu_links = [mock.MagicMock()]
appbuilder_menu_items = [mock.MagicMock()]
with mock_plugin_manager(
plugins=[AirflowAdminViewsPlugin(), AirflowAdminMenuLinksPlugin()]
), caplog.at_level(logging.WARNING, logger='airflow.plugins_manager'):
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
assert caplog.record_tuples == []
def test_entrypoint_plugin_errors_dont_raise_exceptions(self, caplog):
from airflow.plugins_manager import import_errors, load_entrypoint_plugins
mock_dist = mock.Mock()
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint'
mock_entrypoint.group = 'airflow.plugins'
mock_entrypoint.module = 'test.plugins.test_plugins_manager'
mock_entrypoint.load.side_effect = ImportError('my_fake_module not found')
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(f'{importlib_metadata}.distributions', return_value=[mock_dist]), caplog.at_level(
logging.ERROR, logger='airflow.plugins_manager'
):
load_entrypoint_plugins()
received_logs = caplog.text
# Assert Traceback is shown too
assert "Traceback (most recent call last):" in received_logs
assert "my_fake_module not found" in received_logs
assert "Failed to import plugin test-entrypoint" in received_logs
assert ("test.plugins.test_plugins_manager", "my_fake_module not found") in import_errors.items()
def test_registering_plugin_macros(self, request):
from airflow import macros
from airflow.plugins_manager import integrate_macros_plugins
def cleanup_macros():
# We're explicitly deleting the module from sys.modules and importing it again
del sys.modules['airflow.macros']
importlib.import_module('airflow.macros')
request.addfinalizer(cleanup_macros)
def custom_macro():
return 'foo'
class MacroPlugin(AirflowPlugin):
name = 'macro_plugin'
macros = [custom_macro]
with mock_plugin_manager(plugins=[MacroPlugin()]):
integrate_macros_plugins()
plugin_macros = importlib.import_module(f"airflow.macros.{MacroPlugin.name}")
for macro in MacroPlugin.macros:
assert hasattr(plugin_macros, macro.__name__)
# Verify that the symbol table in airflow.macros has been updated with an entry for
# this plugin, this is necessary in order to allow the plugin's macros to be used when
assert hasattr(macros, MacroPlugin.name)
class TestPluginsDirectorySource(unittest.TestCase):
def test_should_return_correct_path_name(self):
from airflow import plugins_manager
source = plugins_manager.PluginsDirectorySource(__file__)
assert "test_plugins_manager.py" == source.path
assert "$PLUGINS_FOLDER/test_plugins_manager.py" == str(source)
assert "<em>$PLUGINS_FOLDER/</em>test_plugins_manager.py" == source.__html__()
class TestEntryPointSource:
def test_should_return_correct_source_details(self):
from airflow import plugins_manager
mock_entrypoint = mock.Mock()
mock_entrypoint.name = 'test-entrypoint-plugin'
mock_entrypoint.module = 'module_name_plugin'
mock_dist = mock.Mock()
mock_dist.metadata = {'name': 'test-entrypoint-plugin'}
mock_dist.version = '1.0.0'
mock_dist.entry_points = [mock_entrypoint]
with mock.patch(f'{importlib_metadata}.distributions', return_value=[mock_dist]):
plugins_manager.load_entrypoint_plugins()
source = plugins_manager.EntryPointSource(mock_entrypoint, mock_dist)
assert str(mock_entrypoint) == source.entrypoint
assert "test-entrypoint-plugin==1.0.0: " + str(mock_entrypoint) == str(source)
assert "<em>test-entrypoint-plugin==1.0.0:</em> " + str(mock_entrypoint) == source.__html__()
| true | true |
f730f299d4453e3f89f5e91f3e64c62d116cabde | 1,003 | py | Python | person/management/commands/run_person_connector.py | AlenaYanish/Data_converter | dbeeb5c135de7f2d74dff9260a4080e7f07727f6 | [
"MIT"
] | null | null | null | person/management/commands/run_person_connector.py | AlenaYanish/Data_converter | dbeeb5c135de7f2d74dff9260a4080e7f07727f6 | [
"MIT"
] | null | null | null | person/management/commands/run_person_connector.py | AlenaYanish/Data_converter | dbeeb5c135de7f2d74dff9260a4080e7f07727f6 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand, CommandError
from data_ocean.command_progress import CommandProgress
from data_ocean.savepoint import Savepoint
from person.controllers import ConnectorsController, SOURCES
class Command(BaseCommand):
help = '---'
def add_arguments(self, parser):
parser.add_argument('source', type=str, nargs=1, choices=[s.name for s in SOURCES])
def handle(self, *args, **options):
source = options['source'][0]
controller = ConnectorsController(source)
savepoint = Savepoint(f'run_person_connector-{source}')
progress = CommandProgress(self, controller.get_count())
for obj in controller.iter_objects():
if savepoint.has(obj.pk):
progress.next(silent=True)
continue
progress.next()
controller.migrate_object(obj)
savepoint.add(obj.pk)
progress.end()
savepoint.close()
self.stdout.write('Done.')
| 34.586207 | 91 | 0.663011 | from django.core.management.base import BaseCommand, CommandError
from data_ocean.command_progress import CommandProgress
from data_ocean.savepoint import Savepoint
from person.controllers import ConnectorsController, SOURCES
class Command(BaseCommand):
help = '---'
def add_arguments(self, parser):
parser.add_argument('source', type=str, nargs=1, choices=[s.name for s in SOURCES])
def handle(self, *args, **options):
source = options['source'][0]
controller = ConnectorsController(source)
savepoint = Savepoint(f'run_person_connector-{source}')
progress = CommandProgress(self, controller.get_count())
for obj in controller.iter_objects():
if savepoint.has(obj.pk):
progress.next(silent=True)
continue
progress.next()
controller.migrate_object(obj)
savepoint.add(obj.pk)
progress.end()
savepoint.close()
self.stdout.write('Done.')
| true | true |
f730f358f211cb3b98b0df17cabeb3129381fe6a | 4,840 | py | Python | nikola/plugins/task_render_posts.py | kotnik/nikola | d08ed98b1b9bf7cbdabf06a18b51a3acdb745625 | [
"MIT"
] | null | null | null | nikola/plugins/task_render_posts.py | kotnik/nikola | d08ed98b1b9bf7cbdabf06a18b51a3acdb745625 | [
"MIT"
] | null | null | null | nikola/plugins/task_render_posts.py | kotnik/nikola | d08ed98b1b9bf7cbdabf06a18b51a3acdb745625 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from copy import copy
import codecs
import string
from nikola.plugin_categories import Task
from nikola import utils, rc4
def wrap_encrypt(path, password):
"""Wrap a post with encryption."""
with codecs.open(path, 'rb+', 'utf8') as inf:
data = inf.read() + "<!--tail-->"
data = CRYPT.substitute(data=rc4.rc4(password, data))
with codecs.open(path, 'wb+', 'utf8') as outf:
outf.write(data)
class RenderPosts(Task):
"""Build HTML fragments from metadata and text."""
name = "render_posts"
def gen_tasks(self):
"""Build HTML fragments from metadata and text."""
self.site.scan_posts()
kw = {
"translations": self.site.config["TRANSLATIONS"],
"timeline": self.site.timeline,
"default_lang": self.site.config["DEFAULT_LANG"],
"hide_untranslated_posts": self.site.config['HIDE_UNTRANSLATED_POSTS'],
}
flag = False
for lang in kw["translations"]:
deps_dict = copy(kw)
deps_dict.pop('timeline')
for post in kw['timeline']:
source = post.source_path
dest = post.base_path
if not post.is_translation_available(lang) and kw["hide_untranslated_posts"]:
continue
else:
source = post.translated_source_path(lang)
if lang != post.default_lang:
dest = dest + '.' + lang
flag = True
task = {
'basename': self.name,
'name': dest,
'file_dep': post.fragment_deps(lang),
'targets': [dest],
'actions': [(self.site.get_compiler(post.source_path).compile_html,
[source, dest, post.is_two_file])],
'clean': True,
'uptodate': [utils.config_changed(deps_dict)],
}
if post.meta('password'):
task['actions'].append((wrap_encrypt, (dest, post.meta('password'))))
yield task
if flag is False: # Return a dummy task
yield {
'basename': self.name,
'name': 'None',
'uptodate': [True],
'actions': [],
}
CRYPT = string.Template("""\
<script>
function rc4(key, str) {
var s = [], j = 0, x, res = '';
for (var i = 0; i < 256; i++) {
s[i] = i;
}
for (i = 0; i < 256; i++) {
j = (j + s[i] + key.charCodeAt(i % key.length)) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
}
i = 0;
j = 0;
for (var y = 0; y < str.length; y++) {
i = (i + 1) % 256;
j = (j + s[i]) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
res += String.fromCharCode(str.charCodeAt(y) ^ s[(s[i] + s[j]) % 256]);
}
return res;
}
function decrypt() {
key = $$("#key").val();
crypt_div = $$("#encr")
crypted = crypt_div.html();
decrypted = rc4(key, window.atob(crypted));
if (decrypted.substr(decrypted.length - 11) == "<!--tail-->"){
crypt_div.html(decrypted);
$$("#pwform").hide();
crypt_div.show();
} else { alert("Wrong password"); };
}
</script>
<div id="encr" style="display: none;">${data}</div>
<div id="pwform">
<form onsubmit="javascript:decrypt(); return false;" class="form-inline">
<fieldset>
<legend>This post is password-protected.</legend>
<input type="password" id="key" placeholder="Type password here">
<button type="submit" class="btn">Show Content</button>
</fieldset>
</form>
</div>""")
| 33.846154 | 93 | 0.568595 |
from copy import copy
import codecs
import string
from nikola.plugin_categories import Task
from nikola import utils, rc4
def wrap_encrypt(path, password):
with codecs.open(path, 'rb+', 'utf8') as inf:
data = inf.read() + "<!--tail-->"
data = CRYPT.substitute(data=rc4.rc4(password, data))
with codecs.open(path, 'wb+', 'utf8') as outf:
outf.write(data)
class RenderPosts(Task):
name = "render_posts"
def gen_tasks(self):
self.site.scan_posts()
kw = {
"translations": self.site.config["TRANSLATIONS"],
"timeline": self.site.timeline,
"default_lang": self.site.config["DEFAULT_LANG"],
"hide_untranslated_posts": self.site.config['HIDE_UNTRANSLATED_POSTS'],
}
flag = False
for lang in kw["translations"]:
deps_dict = copy(kw)
deps_dict.pop('timeline')
for post in kw['timeline']:
source = post.source_path
dest = post.base_path
if not post.is_translation_available(lang) and kw["hide_untranslated_posts"]:
continue
else:
source = post.translated_source_path(lang)
if lang != post.default_lang:
dest = dest + '.' + lang
flag = True
task = {
'basename': self.name,
'name': dest,
'file_dep': post.fragment_deps(lang),
'targets': [dest],
'actions': [(self.site.get_compiler(post.source_path).compile_html,
[source, dest, post.is_two_file])],
'clean': True,
'uptodate': [utils.config_changed(deps_dict)],
}
if post.meta('password'):
task['actions'].append((wrap_encrypt, (dest, post.meta('password'))))
yield task
if flag is False:
yield {
'basename': self.name,
'name': 'None',
'uptodate': [True],
'actions': [],
}
CRYPT = string.Template("""\
<script>
function rc4(key, str) {
var s = [], j = 0, x, res = '';
for (var i = 0; i < 256; i++) {
s[i] = i;
}
for (i = 0; i < 256; i++) {
j = (j + s[i] + key.charCodeAt(i % key.length)) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
}
i = 0;
j = 0;
for (var y = 0; y < str.length; y++) {
i = (i + 1) % 256;
j = (j + s[i]) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
res += String.fromCharCode(str.charCodeAt(y) ^ s[(s[i] + s[j]) % 256]);
}
return res;
}
function decrypt() {
key = $$("#key").val();
crypt_div = $$("#encr")
crypted = crypt_div.html();
decrypted = rc4(key, window.atob(crypted));
if (decrypted.substr(decrypted.length - 11) == "<!--tail-->"){
crypt_div.html(decrypted);
$$("#pwform").hide();
crypt_div.show();
} else { alert("Wrong password"); };
}
</script>
<div id="encr" style="display: none;">${data}</div>
<div id="pwform">
<form onsubmit="javascript:decrypt(); return false;" class="form-inline">
<fieldset>
<legend>This post is password-protected.</legend>
<input type="password" id="key" placeholder="Type password here">
<button type="submit" class="btn">Show Content</button>
</fieldset>
</form>
</div>""")
| true | true |
f730f40810a9ab1ca74ee1b70e362abf40ace529 | 3,700 | py | Python | Visualize.py | BlindBMan/Facial-Recognition | 1f0c65174de2223a81797fee7722227a712a37bc | [
"MIT"
] | null | null | null | Visualize.py | BlindBMan/Facial-Recognition | 1f0c65174de2223a81797fee7722227a712a37bc | [
"MIT"
] | null | null | null | Visualize.py | BlindBMan/Facial-Recognition | 1f0c65174de2223a81797fee7722227a712a37bc | [
"MIT"
] | null | null | null | import cv2 as cv
import os
import numpy as np
import pdb
import ntpath
import glob
from Parameters import *
def show_detections_without_ground_truth(detections, scores, file_names, params: Parameters):
"""
Afiseaza si salveaza imaginile adnotate.
detections: numpy array de dimensiune NX4, unde N este numarul de detectii pentru toate imaginile.
detections[i, :] = [x_min, y_min, x_max, y_max]
scores: numpy array de dimensiune N, scorurile pentru toate detectiile pentru toate imaginile.
file_names: numpy array de dimensiune N, pentru fiecare detectie trebuie sa salvam numele imaginii.
(doar numele, nu toata calea).
"""
test_images_path = os.path.join(params.dir_test_examples, '*.jpg')
test_files = glob.glob(test_images_path)
for test_file in test_files:
image = cv.imread(test_file)
short_file_name = ntpath.basename(test_file)
indices_detections_current_image = np.where(file_names == short_file_name)
current_detections = detections[indices_detections_current_image]
current_scores = scores[indices_detections_current_image]
for idx, detection in enumerate(current_detections):
cv.rectangle(image, (detection[0], detection[1]), (detection[2], detection[3]), (0, 0, 255), thickness=1)
cv.putText(image, 'score:' + str(current_scores[idx])[:4], (detection[0], detection[1]),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
cv.imwrite(os.path.join(params.dir_save_files, "detections_" + short_file_name), image)
print('Apasa orice tasta pentru a continua...')
cv.imshow('image', np.uint8(image))
cv.waitKey(0)
def show_detections_with_ground_truth(detections, scores, file_names, params: Parameters):
"""
Afiseaza si salveaza imaginile adnotate. Deseneaza bounding box-urile prezice si cele corecte.
detections: numpy array de dimensiune NX4, unde N este numarul de detectii pentru toate imaginile.
detections[i, :] = [x_min, y_min, x_max, y_max]
scores: numpy array de dimensiune N, scorurile pentru toate detectiile pentru toate imaginile.
file_names: numpy array de dimensiune N, pentru fiecare detectie trebuie sa salvam numele imaginii.
(doar numele, nu toata calea).
"""
ground_truth_bboxes = np.loadtxt(params.path_annotations, dtype='str')
test_images_path = os.path.join(params.dir_test_examples, '*.jpg')
test_files = glob.glob(test_images_path)
for test_file in test_files:
image = cv.imread(test_file)
short_file_name = ntpath.basename(test_file)
indices_detections_current_image = np.where(file_names == short_file_name)
current_detections = detections[indices_detections_current_image]
current_scores = scores[indices_detections_current_image]
for idx, detection in enumerate(current_detections):
cv.rectangle(image, (detection[0], detection[1]), (detection[2], detection[3]), (0, 0, 255), thickness=1)
cv.putText(image, 'score:' + str(current_scores[idx])[:4], (detection[0], detection[1]),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
annotations = ground_truth_bboxes[ground_truth_bboxes[:, 0] == short_file_name]
# show ground truth bboxes
for detection in annotations:
cv.rectangle(image, (int(detection[1]), int(detection[2])), (int(detection[3]), int(detection[4])), (0, 255, 0), thickness=1)
cv.imwrite(os.path.join(params.dir_save_files, "detections_" + short_file_name), image)
print('Apasa orice tasta pentru a continua...')
cv.imshow('image', np.uint8(image))
cv.waitKey(0)
| 48.684211 | 137 | 0.699189 | import cv2 as cv
import os
import numpy as np
import pdb
import ntpath
import glob
from Parameters import *
def show_detections_without_ground_truth(detections, scores, file_names, params: Parameters):
test_images_path = os.path.join(params.dir_test_examples, '*.jpg')
test_files = glob.glob(test_images_path)
for test_file in test_files:
image = cv.imread(test_file)
short_file_name = ntpath.basename(test_file)
indices_detections_current_image = np.where(file_names == short_file_name)
current_detections = detections[indices_detections_current_image]
current_scores = scores[indices_detections_current_image]
for idx, detection in enumerate(current_detections):
cv.rectangle(image, (detection[0], detection[1]), (detection[2], detection[3]), (0, 0, 255), thickness=1)
cv.putText(image, 'score:' + str(current_scores[idx])[:4], (detection[0], detection[1]),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
cv.imwrite(os.path.join(params.dir_save_files, "detections_" + short_file_name), image)
print('Apasa orice tasta pentru a continua...')
cv.imshow('image', np.uint8(image))
cv.waitKey(0)
def show_detections_with_ground_truth(detections, scores, file_names, params: Parameters):
ground_truth_bboxes = np.loadtxt(params.path_annotations, dtype='str')
test_images_path = os.path.join(params.dir_test_examples, '*.jpg')
test_files = glob.glob(test_images_path)
for test_file in test_files:
image = cv.imread(test_file)
short_file_name = ntpath.basename(test_file)
indices_detections_current_image = np.where(file_names == short_file_name)
current_detections = detections[indices_detections_current_image]
current_scores = scores[indices_detections_current_image]
for idx, detection in enumerate(current_detections):
cv.rectangle(image, (detection[0], detection[1]), (detection[2], detection[3]), (0, 0, 255), thickness=1)
cv.putText(image, 'score:' + str(current_scores[idx])[:4], (detection[0], detection[1]),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
annotations = ground_truth_bboxes[ground_truth_bboxes[:, 0] == short_file_name]
for detection in annotations:
cv.rectangle(image, (int(detection[1]), int(detection[2])), (int(detection[3]), int(detection[4])), (0, 255, 0), thickness=1)
cv.imwrite(os.path.join(params.dir_save_files, "detections_" + short_file_name), image)
print('Apasa orice tasta pentru a continua...')
cv.imshow('image', np.uint8(image))
cv.waitKey(0)
| true | true |
f730f477cc08bbd4b7ffa1a11537af10fd058b92 | 1,852 | py | Python | couch/setup.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | 4 | 2021-06-21T19:21:49.000Z | 2021-06-23T21:21:55.000Z | couch/setup.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | null | null | null | couch/setup.py | brentm5/integrations-core | 5cac8788c95d8820435ef9c5d32d6a5463cf491d | [
"BSD-3-Clause"
] | 1 | 2021-06-21T19:21:51.000Z | 2021-06-21T19:21:51.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "couch", "__about__.py")) as f:
exec(f.read(), ABOUT)
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-couch',
version=ABOUT["__version__"],
description='The CouchDB check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent couch check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.couch'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| 27.235294 | 75 | 0.682505 |
from setuptools import setup
from codecs import open
from os import path
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "couch", "__about__.py")) as f:
exec(f.read(), ABOUT)
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-couch',
version=ABOUT["__version__"],
description='The CouchDB check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent couch check',
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.couch'],
install_requires=[CHECKS_BASE_REQ],
include_package_data=True,
)
| true | true |
f730f49df8a3829de88cd50a3529de49a0e5a694 | 691 | py | Python | FirstStepsInPython/Basics/Exams/exam_22_may_2021/04. Computer Firm.py | Pittor052/SoftUni-Studies | 1ee6341082f6ccfa45b3e82824c37722bcf2fb31 | [
"MIT"
] | null | null | null | FirstStepsInPython/Basics/Exams/exam_22_may_2021/04. Computer Firm.py | Pittor052/SoftUni-Studies | 1ee6341082f6ccfa45b3e82824c37722bcf2fb31 | [
"MIT"
] | null | null | null | FirstStepsInPython/Basics/Exams/exam_22_may_2021/04. Computer Firm.py | Pittor052/SoftUni-Studies | 1ee6341082f6ccfa45b3e82824c37722bcf2fb31 | [
"MIT"
] | 1 | 2021-10-07T18:30:42.000Z | 2021-10-07T18:30:42.000Z | computers = int ( input () )
total_computers = computers
total_rating = 0
total_sales = 0
while not computers == 0:
command = int ( input () )
possible_sales = 0
last_digit = command % 10
first_two_digits = command // 10
rating = last_digit
if rating == 3:
possible_sales = first_two_digits * 0.5
elif rating == 4:
possible_sales = first_two_digits * 0.70
elif rating == 5:
possible_sales = first_two_digits * 0.85
elif rating == 6:
possible_sales += first_two_digits
total_rating += rating
total_sales += possible_sales
computers -= 1
print(f"{total_sales:.2f}")
print(f"{total_rating / total_computers:.2f}")
| 28.791667 | 48 | 0.652677 | computers = int ( input () )
total_computers = computers
total_rating = 0
total_sales = 0
while not computers == 0:
command = int ( input () )
possible_sales = 0
last_digit = command % 10
first_two_digits = command // 10
rating = last_digit
if rating == 3:
possible_sales = first_two_digits * 0.5
elif rating == 4:
possible_sales = first_two_digits * 0.70
elif rating == 5:
possible_sales = first_two_digits * 0.85
elif rating == 6:
possible_sales += first_two_digits
total_rating += rating
total_sales += possible_sales
computers -= 1
print(f"{total_sales:.2f}")
print(f"{total_rating / total_computers:.2f}")
| true | true |
f730f61c0558a354a0cd7c399108c3d7b08479b3 | 4,692 | py | Python | Tools/SeeDot/seedot/predictor.py | krantikiran/EdgeML | e5c7bd7c56884ca61f6d54cedb0074553cfdc896 | [
"MIT"
] | 1 | 2020-03-26T17:19:54.000Z | 2020-03-26T17:19:54.000Z | Tools/SeeDot/seedot/predictor.py | krantikiran/EdgeML | e5c7bd7c56884ca61f6d54cedb0074553cfdc896 | [
"MIT"
] | 2 | 2020-03-26T02:59:12.000Z | 2020-04-23T19:09:00.000Z | Tools/SeeDot/seedot/predictor.py | krantikiran/EdgeML | e5c7bd7c56884ca61f6d54cedb0074553cfdc896 | [
"MIT"
] | 3 | 2020-03-25T18:45:39.000Z | 2020-12-17T19:09:54.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import os
import subprocess
import seedot.config as config
import seedot.util as Util
# Program to build and run the predictor project using msbuild
# The accuracy and other statistics are written to the output file specified
class Predictor:
def __init__(self, algo, version, datasetType, outputDir, scaleForX):
self.algo, self.version, self.datasetType = algo, version, datasetType
self.outputDir = outputDir
os.makedirs(self.outputDir, exist_ok=True)
self.scaleForX = scaleForX
self.genHeaderFile()
def genHeaderFile(self):
with open("datatypes.h", 'w') as file:
file.write("#pragma once\n\n")
if config.wordLength == 8:
file.write("#define INT8\n")
file.write("typedef int8_t MYINT;\n\n")
elif config.wordLength == 16:
file.write("#define INT16\n")
file.write("typedef int16_t MYINT;\n\n")
elif config.wordLength == 32:
file.write("#define INT32\n")
file.write("typedef int32_t MYINT;\n\n")
file.write("typedef int16_t MYITE;\n")
file.write("typedef uint16_t MYUINT;\n\n")
file.write("const int scaleForX = %d;\n\n" % (self.scaleForX))
if Util.debugMode():
file.write("const bool debugMode = true;\n")
else:
file.write("const bool debugMode = false;\n")
def buildForWindows(self):
'''
Builds using the Predictor.vcxproj project file and creates the executable
The target platform is currently set to x64
'''
print("Build...", end='')
projFile = "Predictor.vcxproj"
args = [config.msbuildPath, projFile, r"/t:Build",
r"/p:Configuration=Release", r"/p:Platform=x64"]
logFile = os.path.join(self.outputDir, "msbuild.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return False
else:
print("success")
return True
def buildForLinux(self):
print("Build...", end='')
args = ["make"]
logFile = os.path.join(self.outputDir, "build.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return False
else:
print("success")
return True
def build(self):
if Util.windows():
return self.buildForWindows()
else:
return self.buildForLinux()
def executeForWindows(self):
'''
Invokes the executable with arguments
'''
print("Execution...", end='')
exeFile = os.path.join("x64", "Release", "Predictor.exe")
args = [exeFile, self.version, self.datasetType]
logFile = os.path.join(self.outputDir, "exec.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return None
else:
print("success")
acc = self.readStatsFile()
return acc
def executeForLinux(self):
print("Execution...", end='')
exeFile = os.path.join("./Predictor")
args = [exeFile, self.version, self.datasetType]
logFile = os.path.join(self.outputDir, "exec.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return None
else:
print("success")
acc = self.readStatsFile()
return acc
def execute(self):
if Util.windows():
return self.executeForWindows()
else:
return self.executeForLinux()
# Read statistics of execution (currently only accuracy)
def readStatsFile(self):
statsFile = os.path.join(
"output", self.version, "stats-" + self.datasetType + ".txt")
with open(statsFile, 'r') as file:
content = file.readlines()
stats = [x.strip() for x in content]
return float(stats[0])
def run(self):
res = self.build()
if res == False:
return None
acc = self.execute()
return acc
| 29.509434 | 82 | 0.563725 |
import os
import subprocess
import seedot.config as config
import seedot.util as Util
class Predictor:
def __init__(self, algo, version, datasetType, outputDir, scaleForX):
self.algo, self.version, self.datasetType = algo, version, datasetType
self.outputDir = outputDir
os.makedirs(self.outputDir, exist_ok=True)
self.scaleForX = scaleForX
self.genHeaderFile()
def genHeaderFile(self):
with open("datatypes.h", 'w') as file:
file.write("#pragma once\n\n")
if config.wordLength == 8:
file.write("#define INT8\n")
file.write("typedef int8_t MYINT;\n\n")
elif config.wordLength == 16:
file.write("#define INT16\n")
file.write("typedef int16_t MYINT;\n\n")
elif config.wordLength == 32:
file.write("#define INT32\n")
file.write("typedef int32_t MYINT;\n\n")
file.write("typedef int16_t MYITE;\n")
file.write("typedef uint16_t MYUINT;\n\n")
file.write("const int scaleForX = %d;\n\n" % (self.scaleForX))
if Util.debugMode():
file.write("const bool debugMode = true;\n")
else:
file.write("const bool debugMode = false;\n")
def buildForWindows(self):
print("Build...", end='')
projFile = "Predictor.vcxproj"
args = [config.msbuildPath, projFile, r"/t:Build",
r"/p:Configuration=Release", r"/p:Platform=x64"]
logFile = os.path.join(self.outputDir, "msbuild.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return False
else:
print("success")
return True
def buildForLinux(self):
print("Build...", end='')
args = ["make"]
logFile = os.path.join(self.outputDir, "build.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return False
else:
print("success")
return True
def build(self):
if Util.windows():
return self.buildForWindows()
else:
return self.buildForLinux()
def executeForWindows(self):
print("Execution...", end='')
exeFile = os.path.join("x64", "Release", "Predictor.exe")
args = [exeFile, self.version, self.datasetType]
logFile = os.path.join(self.outputDir, "exec.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return None
else:
print("success")
acc = self.readStatsFile()
return acc
def executeForLinux(self):
print("Execution...", end='')
exeFile = os.path.join("./Predictor")
args = [exeFile, self.version, self.datasetType]
logFile = os.path.join(self.outputDir, "exec.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return None
else:
print("success")
acc = self.readStatsFile()
return acc
def execute(self):
if Util.windows():
return self.executeForWindows()
else:
return self.executeForLinux()
def readStatsFile(self):
statsFile = os.path.join(
"output", self.version, "stats-" + self.datasetType + ".txt")
with open(statsFile, 'r') as file:
content = file.readlines()
stats = [x.strip() for x in content]
return float(stats[0])
def run(self):
res = self.build()
if res == False:
return None
acc = self.execute()
return acc
| true | true |
f730f62a1e645556614119a1ae7ca46137ef0109 | 16,444 | py | Python | lib/python3.8/site-packages/ansible_collections/cisco/iosxr/plugins/modules/iosxr_lldp_interfaces.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/cisco/iosxr/plugins/modules/iosxr_lldp_interfaces.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/cisco/iosxr/plugins/modules/iosxr_lldp_interfaces.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for iosxr_lldp_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: iosxr_lldp_interfaces
short_description: LLDP interfaces resource module
description:
- This module manages Link Layer Discovery Protocol (LLDP) attributes of interfaces
on IOS-XR devices.
version_added: 1.0.0
notes:
- Tested against IOS-XR 6.1.3.
- This module works with connection C(network_cli). See L(the IOS-XR Platform Options,../network/user_guide/platform_iosxr.html).
author: Nilashish Chakraborty (@nilashishc)
options:
config:
description: A dictionary of LLDP interfaces options.
type: list
elements: dict
suboptions:
name:
description:
- Name/Identifier of the interface or Ether-Bundle.
type: str
destination:
description:
- Specifies LLDP destination configuration on the interface.
suboptions:
mac_address:
description:
- Specifies the LLDP destination mac address on the interface.
type: str
choices:
- ieee-nearest-bridge
- ieee-nearest-non-tmpr-bridge
type: dict
receive:
description:
- Enable/disable LLDP RX on an interface.
type: bool
transmit:
description:
- Enable/disable LLDP TX on an interface.
type: bool
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the IOS-XR device
by executing the command B(show running-config int).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- parsed
- rendered
- gathered
default: merged
"""
EXAMPLES = """
# Using merged
#
#
# ------------
# Before state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 12:40:23.104 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# !
# interface preconfigure GigabitEthernet0/0/0/2
# !
#
#
- name: Merge provided configuration with running configuration
cisco.iosxr.iosxr_lldp_interfaces:
config:
- name: GigabitEthernet0/0/0/1
destination:
mac_address: ieee-nearest-non-tmpr-bridge
transmit: false
- name: GigabitEthernet0/0/0/2
destination:
mac_address: ieee-nearest-bridge
receive: false
state: merged
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
#
# "before": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "name": "GigabitEthernet0/0/0/1"
# },
# {
# "name": "GigabitEthernet0/0/0/2"
# }
# ]
#
# "commands": [
# "interface GigabitEthernet0/0/0/2",
# "lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "lldp transmit disable",
# "interface GigabitEthernet0/0/0/1",
# "lldp receive disable",
# "lldp destination mac-address ieee-nearest-bridge"
# ]
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 12:49:51.517 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
# Using replaced
#
#
# -------------
# Before state
# -------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 12:49:51.517 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
- name: Replace existing LLDP configurations of specified interfaces with provided
configuration
cisco.iosxr.iosxr_lldp_interfaces:
config:
- name: GigabitEthernet0/0/0/1
destination:
mac_address: ieee-nearest-non-tmpr-bridge
state: replaced
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
# "before": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# "commands": [
# "interface GigabitEthernet0/0/0/1",
# "no lldp receive disable",
# "lldp destination mac-address ieee-nearest-non-tmpr-bridge"
# ]
#
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/1"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:02:57.062 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
# Using overridden
#
#
# -------------
# Before state
# -------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:15:40.465 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
- name: Override the LLDP configurations of all the interfaces with provided configurations
cisco.iosxr.iosxr_lldp_interfaces:
config:
- name: GigabitEthernet0/0/0/1
transmit: false
state: overridden
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
#
# "before": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
# "commands": [
# "interface GigabitEthernet0/0/0/2",
# "no lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "no lldp transmit disable",
# "interface GigabitEthernet0/0/0/1",
# "no lldp destination mac-address ieee-nearest-bridge",
# "no lldp receive disable",
# "lldp transmit disable"
# ]
#
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "name": "GigabitEthernet0/0/0/1",
# "transmit": false
# },
# {
# "name": "GigabitEthernet0/0/0/2"
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:22:25.604 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# transmit disable
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# !
#
#
# Using deleted
#
#
# -------------
# Before state
# -------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:26:21.498 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
- name: Delete LLDP configurations of all interfaces (Note - This won't delete the
interfaces themselves)
cisco.iosxr.iosxr_lldp_interfaces:
state: deleted
#
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
#
# "before": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# "commands": [
# "interface GigabitEthernet0/0/0/1",
# "no lldp destination mac-address ieee-nearest-bridge",
# "no lldp receive disable",
# "interface GigabitEthernet0/0/0/2",
# "no lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "no lldp transmit disable"
# ]
#
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "name": "GigabitEthernet0/0/0/1"
# },
# {
# "name": "GigabitEthernet0/0/0/2"
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:30:14.618 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# !
# interface preconfigure GigabitEthernet0/0/0/2
# !
#
#
# Using parsed:
# parsed.cfg
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
- name: Convert lacp interfaces config to argspec without connecting to the appliance
cisco.iosxr.iosxr_lldp_interfaces:
running_config: "{{ lookup('file', './parsed.cfg') }}"
state: parsed
# ------------------------
# Module Execution Result
# ------------------------
# parsed: [
# - name: GigabitEthernet0/0/0/1
# destination:
# mac_address: ieee-nearest-non-tmpr-bridge
# transmit: False
# - name: GigabitEthernet0/0/0/2
# destination:
# mac_address: ieee-nearest-bridge
# receive: False
# ]
# Using gathered:
# Device config:
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 12:49:51.517 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
- name: Gather IOSXR lldp interfaces configuration
cisco.iosxr.iosxr_lldp_interfaces:
config:
state: gathered
# ------------------------
# Module Execution Result
# ------------------------
# gathered:
# - name: GigabitEthernet0/0/0/1
# destination:
# mac_address: ieee-nearest-non-tmpr-bridge
# transmit: False
# - name: GigabitEthernet0/0/0/2
# destination:
# mac_address: ieee-nearest-bridge
# receive: False
# Using rendred:
- name: Render platform specific commands from task input using rendered state
cisco.iosxr.iosxr_lldp_interfaces:
config:
- name: GigabitEthernet0/0/0/1
destination:
mac_address: ieee-nearest-non-tmpr-bridge
transmit: false
- name: GigabitEthernet0/0/0/2
destination:
mac_address: ieee-nearest-bridge
receive: false
state: rendered
# ------------------------
# Module Execution Result
# ------------------------
# "rendered": [
# "interface GigabitEthernet0/0/0/2",
# "lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "lldp transmit disable",
# "interface GigabitEthernet0/0/0/1",
# "lldp receive disable",
# "lldp destination mac-address ieee-nearest-bridge"
# ]
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface GigabitEthernet0/0/0/1', 'lldp destination mac-address ieee-nearest-non-tmpr-bridge', 'no lldp transmit disable']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.argspec.lldp_interfaces.lldp_interfaces import (
Lldp_interfacesArgs,
)
from ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.config.lldp_interfaces.lldp_interfaces import (
Lldp_interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "overridden", ("config",)),
("state", "rendered", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=Lldp_interfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
result = Lldp_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 22.681379 | 135 | 0.580151 |
TenGigE0/0/0/0"
# },
# {
# "name": "GigabitEthernet0/0/0/1"
# },
# {
# "name": "GigabitEthernet0/0/0/2"
# }
# ]
#
# "commands": [
# "interface GigabitEthernet0/0/0/2",
# "lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "lldp transmit disable",
# "interface GigabitEthernet0/0/0/1",
# "lldp receive disable",
# "lldp destination mac-address ieee-nearest-bridge"
# ]
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 12:49:51.517 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
# Using replaced
#
#
# -------------
# Before state
# -------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 12:49:51.517 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
- name: Replace existing LLDP configurations of specified interfaces with provided
configuration
cisco.iosxr.iosxr_lldp_interfaces:
config:
- name: GigabitEthernet0/0/0/1
destination:
mac_address: ieee-nearest-non-tmpr-bridge
state: replaced
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
# "before": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# "commands": [
# "interface GigabitEthernet0/0/0/1",
# "no lldp receive disable",
# "lldp destination mac-address ieee-nearest-non-tmpr-bridge"
# ]
#
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/1"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:02:57.062 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
# Using overridden
#
#
# -------------
# Before state
# -------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:15:40.465 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
- name: Override the LLDP configurations of all the interfaces with provided configurations
cisco.iosxr.iosxr_lldp_interfaces:
config:
- name: GigabitEthernet0/0/0/1
transmit: false
state: overridden
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
#
# "before": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
# "commands": [
# "interface GigabitEthernet0/0/0/2",
# "no lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "no lldp transmit disable",
# "interface GigabitEthernet0/0/0/1",
# "no lldp destination mac-address ieee-nearest-bridge",
# "no lldp receive disable",
# "lldp transmit disable"
# ]
#
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "name": "GigabitEthernet0/0/0/1",
# "transmit": false
# },
# {
# "name": "GigabitEthernet0/0/0/2"
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:22:25.604 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# transmit disable
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# !
#
#
# Using deleted
#
#
# -------------
# Before state
# -------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:26:21.498 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
# !
# !
# !
#
#
- name: Delete LLDP configurations of all interfaces (Note - This won't delete the
interfaces themselves)
cisco.iosxr.iosxr_lldp_interfaces:
state: deleted
#
#
#
# ------------------------
# Module Execution Result
# ------------------------
#
#
# "before": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-bridge"
# },
# "name": "GigabitEthernet0/0/0/1",
# "receive": false
# },
# {
# "destination": {
# "mac_address": "ieee-nearest-non-tmpr-bridge"
# },
# "name": "GigabitEthernet0/0/0/2",
# "transmit": false
# }
# ]
#
#
# "commands": [
# "interface GigabitEthernet0/0/0/1",
# "no lldp destination mac-address ieee-nearest-bridge",
# "no lldp receive disable",
# "interface GigabitEthernet0/0/0/2",
# "no lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "no lldp transmit disable"
# ]
#
#
# "after": [
# {
# "name": "TenGigE0/0/0/0"
# },
# {
# "name": "GigabitEthernet0/0/0/1"
# },
# {
# "name": "GigabitEthernet0/0/0/2"
# }
# ]
#
#
# ------------
# After state
# ------------
#
#
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 13:30:14.618 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# !
# interface preconfigure GigabitEthernet0/0/0/2
# !
#
#
# Using parsed:
# parsed.cfg
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
- name: Convert lacp interfaces config to argspec without connecting to the appliance
cisco.iosxr.iosxr_lldp_interfaces:
running_config: "{{ lookup('file', './parsed.cfg') }}"
state: parsed
# ------------------------
# Module Execution Result
# ------------------------
# parsed: [
# - name: GigabitEthernet0/0/0/1
# destination:
# mac_address: ieee-nearest-non-tmpr-bridge
# transmit: False
# - name: GigabitEthernet0/0/0/2
# destination:
# mac_address: ieee-nearest-bridge
# receive: False
# ]
# Using gathered:
# Device config:
# RP/0/RP0/CPU0:ios#sh run int
# Mon Aug 12 12:49:51.517 UTC
# interface TenGigE0/0/0/0
# ipv4 address 192.0.2.11 255.255.255.192
# !
# interface preconfigure GigabitEthernet0/0/0/1
# lldp
# receive disable
# destination mac-address
# ieee-nearest-bridge
# !
# !
# !
# interface preconfigure GigabitEthernet0/0/0/2
# lldp
# transmit disable
# destination mac-address
# ieee-nearest-non-tmpr-bridge
- name: Gather IOSXR lldp interfaces configuration
cisco.iosxr.iosxr_lldp_interfaces:
config:
state: gathered
# ------------------------
# Module Execution Result
# ------------------------
# gathered:
# - name: GigabitEthernet0/0/0/1
# destination:
# mac_address: ieee-nearest-non-tmpr-bridge
# transmit: False
# - name: GigabitEthernet0/0/0/2
# destination:
# mac_address: ieee-nearest-bridge
# receive: False
# Using rendred:
- name: Render platform specific commands from task input using rendered state
cisco.iosxr.iosxr_lldp_interfaces:
config:
- name: GigabitEthernet0/0/0/1
destination:
mac_address: ieee-nearest-non-tmpr-bridge
transmit: false
- name: GigabitEthernet0/0/0/2
destination:
mac_address: ieee-nearest-bridge
receive: false
state: rendered
# ------------------------
# Module Execution Result
# ------------------------
# "rendered": [
# "interface GigabitEthernet0/0/0/2",
# "lldp destination mac-address ieee-nearest-non-tmpr-bridge",
# "lldp transmit disable",
# "interface GigabitEthernet0/0/0/1",
# "lldp receive disable",
# "lldp destination mac-address ieee-nearest-bridge"
# ]
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface GigabitEthernet0/0/0/1', 'lldp destination mac-address ieee-nearest-non-tmpr-bridge', 'no lldp transmit disable']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.argspec.lldp_interfaces.lldp_interfaces import (
Lldp_interfacesArgs,
)
from ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.config.lldp_interfaces.lldp_interfaces import (
Lldp_interfaces,
)
def main():
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "overridden", ("config",)),
("state", "rendered", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=Lldp_interfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
result = Lldp_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| true | true |
f730f63d759dc5797a07be61282a64af7a309b97 | 1,495 | py | Python | pool/models/move_to_origin.py | VisualMine/PlaneNet | a4327f0233dd3a5c4de375c2857402d28b308230 | [
"MIT"
] | 372 | 2018-03-27T23:26:38.000Z | 2022-03-30T02:58:51.000Z | pool/models/move_to_origin.py | Piratelhx/PlaneNet | 88eb76d88bb678bd26b7f101f0bb273e4897e92d | [
"MIT"
] | 38 | 2018-04-17T02:03:23.000Z | 2022-02-17T07:44:28.000Z | pool/models/move_to_origin.py | Piratelhx/PlaneNet | 88eb76d88bb678bd26b7f101f0bb273e4897e92d | [
"MIT"
] | 90 | 2018-03-27T13:47:17.000Z | 2022-03-16T03:22:27.000Z | import csv
import sys
import numpy as np
with open(sys.argv[1]) as modelFile:
modelLoader = csv.reader(modelFile, delimiter=' ')
xs = []
ys = []
zs = []
for lineIndex, line in enumerate(modelLoader):
if len(line) == 0:
continue
if line[0] == 'v':
xs.append(float(line[1]))
ys.append(float(line[2]))
zs.append(float(line[3]))
pass
continue
modelFile.close()
pass
xs = np.array(xs)
ys = np.array(ys)
zs = np.array(zs)
print(xs.shape)
minX = xs.min()
maxX = xs.max()
minY = ys.min()
maxY = ys.max()
minZ = zs.min()
maxZ = zs.max()
centerX = (minX + maxX) / 2
centerY = (minY + maxY) / 2
centerZ = (minZ + maxZ) / 2
sizeX = (maxX - minX)
sizeY = (maxY - minY)
sizeZ = (maxZ - minZ)
scale = 2 / max(sizeX, sizeY, sizeZ)
with open(sys.argv[1]) as modelFile, open(sys.argv[2], 'w') as outputFile:
modelLoader = csv.reader(modelFile, delimiter=' ')
xs = []
ys = []
zs = []
for lineIndex, line in enumerate(modelLoader):
if len(line) == 0:
outputFile.write('\n')
continue
if line[0] == 'v':
line[1] = str((float(line[1]) - centerX) * scale)
line[2] = str((float(line[2]) - centerY) * scale)
line[3] = str((float(line[3]) - centerZ) * scale)
pass
outputFile.write(' '.join(line) + '\n')
continue
modelFile.close()
outputFile.close()
pass
| 25.338983 | 74 | 0.53311 | import csv
import sys
import numpy as np
with open(sys.argv[1]) as modelFile:
modelLoader = csv.reader(modelFile, delimiter=' ')
xs = []
ys = []
zs = []
for lineIndex, line in enumerate(modelLoader):
if len(line) == 0:
continue
if line[0] == 'v':
xs.append(float(line[1]))
ys.append(float(line[2]))
zs.append(float(line[3]))
pass
continue
modelFile.close()
pass
xs = np.array(xs)
ys = np.array(ys)
zs = np.array(zs)
print(xs.shape)
minX = xs.min()
maxX = xs.max()
minY = ys.min()
maxY = ys.max()
minZ = zs.min()
maxZ = zs.max()
centerX = (minX + maxX) / 2
centerY = (minY + maxY) / 2
centerZ = (minZ + maxZ) / 2
sizeX = (maxX - minX)
sizeY = (maxY - minY)
sizeZ = (maxZ - minZ)
scale = 2 / max(sizeX, sizeY, sizeZ)
with open(sys.argv[1]) as modelFile, open(sys.argv[2], 'w') as outputFile:
modelLoader = csv.reader(modelFile, delimiter=' ')
xs = []
ys = []
zs = []
for lineIndex, line in enumerate(modelLoader):
if len(line) == 0:
outputFile.write('\n')
continue
if line[0] == 'v':
line[1] = str((float(line[1]) - centerX) * scale)
line[2] = str((float(line[2]) - centerY) * scale)
line[3] = str((float(line[3]) - centerZ) * scale)
pass
outputFile.write(' '.join(line) + '\n')
continue
modelFile.close()
outputFile.close()
pass
| true | true |
f730f66e6200f2f186c044ebd1d89110eb96e4d6 | 1,179 | py | Python | FlaskRESTFULAPITest_JE/venv/Lib/site-packages/itsdangerous/_compat.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | 2 | 2022-01-06T11:52:57.000Z | 2022-01-09T01:53:56.000Z | FlaskRESTFULAPITest_JE/venv/Lib/site-packages/itsdangerous/_compat.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | FlaskRESTFULAPITest_JE/venv/Lib/site-packages/itsdangerous/_compat.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | import decimal
import hmac
import numbers
import sys
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode # noqa: 821
else:
izip = zip
text_type = str
number_types = (numbers.Real, decimal.Decimal)
def _constant_time_compare(val1, val2):
"""Return ``True`` if the two strings are equal, ``False``
otherwise.
The time taken is independent of the number of characters that
match. Do not use this function for anything else than comparision
with known length targets.
This is should be implemented in C in order to get it completely
right.
This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3.
"""
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
# Starting with 2.7/3.3 the standard library has a c-implementation for
# constant time string compares.
constant_time_compare = getattr(hmac, "compare_digest", _constant_time_compare)
| 25.085106 | 80 | 0.645462 | import decimal
import hmac
import numbers
import sys
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
else:
izip = zip
text_type = str
number_types = (numbers.Real, decimal.Decimal)
def _constant_time_compare(val1, val2):
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
constant_time_compare = getattr(hmac, "compare_digest", _constant_time_compare)
| true | true |
f730f6f1c65f95e6079be44f28f2c5b909f64b71 | 1,999 | py | Python | flagging_site/data/_store/refresh.py | danivalades/flagging | b1a097594e9b7d9e101edef9bb6abf98a333383c | [
"MIT"
] | null | null | null | flagging_site/data/_store/refresh.py | danivalades/flagging | b1a097594e9b7d9e101edef9bb6abf98a333383c | [
"MIT"
] | null | null | null | flagging_site/data/_store/refresh.py | danivalades/flagging | b1a097594e9b7d9e101edef9bb6abf98a333383c | [
"MIT"
] | null | null | null | """The data store contains offline versions of the data so that you can run a
demo version of the website without the vault keys, or simply develop parts of
the website that don't require actively updated data without having to worry.
This data is used for the actual website when the `USE_MOCK_DATA` config
variable is True. It is useful for dev, but it should never be used in
production.
This file is a CLI to refresh the data store. You can run it with:
`python flagging_site/data/_store/refresh.py`
"""
import os
import sys
from typing import Optional
import click
DATA_STORE_PATH = os.path.dirname(__file__)
@click.command()
@click.option('--vault_password',
prompt=True,
default=lambda: os.environ.get('VAULT_PASSWORD', None))
def refresh_data_store(vault_password: Optional[str] = None) -> None:
"""When run, this function runs all the functions that compose the data
store. The app itself should not be running this function; in fact, this
function will raise an error if the app is turned on. This should only be
run from the command line or a Python console.
"""
os.environ['USE_MOCK_DATA'] = 'false'
if vault_password:
os.environ['VAULT_PASSWORD'] = vault_password
from flask import current_app
if current_app:
raise Exception('The app should not be running when the data store is '
'being refreshed.')
from flagging_site.data.hobolink import get_live_hobolink_data
from flagging_site.data.hobolink import HOBOLINK_STATIC_FILE_NAME
get_live_hobolink_data('code_for_boston_export_21d')\
.to_pickle(os.path.join(DATA_STORE_PATH, HOBOLINK_STATIC_FILE_NAME))
from flagging_site.data.usgs import get_live_usgs_data
from flagging_site.data.usgs import USGS_STATIC_FILE_NAME
get_live_usgs_data()\
.to_pickle(os.path.join(DATA_STORE_PATH, USGS_STATIC_FILE_NAME))
if __name__ == '__main__':
sys.path.append('.')
refresh_data_store()
| 37.018519 | 79 | 0.737869 | import os
import sys
from typing import Optional
import click
DATA_STORE_PATH = os.path.dirname(__file__)
@click.command()
@click.option('--vault_password',
prompt=True,
default=lambda: os.environ.get('VAULT_PASSWORD', None))
def refresh_data_store(vault_password: Optional[str] = None) -> None:
os.environ['USE_MOCK_DATA'] = 'false'
if vault_password:
os.environ['VAULT_PASSWORD'] = vault_password
from flask import current_app
if current_app:
raise Exception('The app should not be running when the data store is '
'being refreshed.')
from flagging_site.data.hobolink import get_live_hobolink_data
from flagging_site.data.hobolink import HOBOLINK_STATIC_FILE_NAME
get_live_hobolink_data('code_for_boston_export_21d')\
.to_pickle(os.path.join(DATA_STORE_PATH, HOBOLINK_STATIC_FILE_NAME))
from flagging_site.data.usgs import get_live_usgs_data
from flagging_site.data.usgs import USGS_STATIC_FILE_NAME
get_live_usgs_data()\
.to_pickle(os.path.join(DATA_STORE_PATH, USGS_STATIC_FILE_NAME))
if __name__ == '__main__':
sys.path.append('.')
refresh_data_store()
| true | true |
f730f74e730a3d369e068ece021021184632f611 | 5,546 | py | Python | src/pyscaffold/exceptions.py | CarliJoy/pyscaffold | 6e6f8779c4a20fbab81b72c6a12716c877d8c062 | [
"MIT"
] | null | null | null | src/pyscaffold/exceptions.py | CarliJoy/pyscaffold | 6e6f8779c4a20fbab81b72c6a12716c877d8c062 | [
"MIT"
] | null | null | null | src/pyscaffold/exceptions.py | CarliJoy/pyscaffold | 6e6f8779c4a20fbab81b72c6a12716c877d8c062 | [
"MIT"
] | null | null | null | """
Functions for exception manipulation + custom exceptions used by PyScaffold to identify
common deviations from the expected behavior.
"""
import functools
import logging
import sys
import traceback
from typing import Optional, cast
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import EntryPoint # pragma: no cover
else:
from importlib_metadata import EntryPoint # pragma: no cover
from . import __version__ as pyscaffold_version
def exceptions2exit(exception_list):
"""Decorator to convert given exceptions to exit messages
This avoids displaying nasty stack traces to end-users
Args:
exception_list [Exception]: list of exceptions to convert
"""
def exceptions2exit_decorator(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except tuple(exception_list) as ex:
from pyscaffold.log import logger
if logger.level <= logging.DEBUG:
# user surely wants to see the stacktrace
traceback.print_exc()
print(f"ERROR: {ex}")
sys.exit(1)
return func_wrapper
return exceptions2exit_decorator
class ActionNotFound(KeyError):
"""Impossible to find the required action."""
def __init__(self, name, *args, **kwargs):
message = ActionNotFound.__doc__[:-1] + f": `{name}`"
super().__init__(message, *args, **kwargs)
class DirectoryAlreadyExists(RuntimeError):
"""The project directory already exists, but no ``update`` or ``force``
option was used.
"""
class DirectoryDoesNotExist(RuntimeError):
"""No directory was found to be updated."""
class GitNotInstalled(RuntimeError):
"""PyScaffold requires git to run."""
DEFAULT_MESSAGE = "Make sure git is installed and working."
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class GitNotConfigured(RuntimeError):
"""PyScaffold tries to read user.name and user.email from git config."""
DEFAULT_MESSAGE = (
"Make sure git is configured. Run:\n"
' git config --global user.email "you@example.com"\n'
' git config --global user.name "Your Name"\n'
"to set your account's default identity."
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class GitDirtyWorkspace(RuntimeError):
"""Workspace of git is empty."""
DEFAULT_MESSAGE = (
"Your working tree is dirty. Commit your changes first" " or use '--force'."
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class InvalidIdentifier(RuntimeError):
"""Python requires a specific format for its identifiers.
https://docs.python.org/3.6/reference/lexical_analysis.html#identifiers
"""
class OldSetuptools(RuntimeError):
"""PyScaffold requires a recent version of setuptools."""
DEFAULT_MESSAGE = (
"Your setuptools version is too old (<38.3). "
"Use `pip install -U setuptools` to upgrade.\n"
"If you have the deprecated `distribute` package installed "
"remove it or update to version 0.7.3."
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class PyScaffoldTooOld(RuntimeError):
"""PyScaffold cannot update a pre 3.0 version"""
DEFAULT_MESSAGE = (
"setup.cfg has no section [pyscaffold]! "
"Are you trying to update a pre 3.0 version?"
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class NoPyScaffoldProject(RuntimeError):
"""PyScaffold cannot update a project that it hasn't generated"""
DEFAULT_MESSAGE = "Could not update project. Was it generated with PyScaffold?"
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class ShellCommandException(RuntimeError):
"""Outputs proper logging when a ShellCommand fails"""
class ImpossibleToFindConfigDir(RuntimeError):
"""An expected error occurred when trying to find the config dir.
This might be related to not being able to read the $HOME env var in Unix
systems, or %USERPROFILE% in Windows, or even the username.
"""
def __init__(self, message=None, *args, **kwargs):
message = message or self.__class__.__doc__
super().__init__(message, *args, **kwargs)
class ErrorLoadingExtension(RuntimeError):
"""There was an error loading '{extension}'.
Please make sure you have installed a version of the extension that is compatible
with PyScaffold {version}. You can also try unininstalling it.
"""
def __init__(self, extension: str = "", entry_point: Optional[EntryPoint] = None):
if entry_point and not extension:
extension = getattr(entry_point, "module", entry_point.name)
if extension.endswith(".extension"):
extension = extension[: -len(".extension")]
extension = extension.replace("pyscaffoldext.", "pyscaffoldext-")
message = cast(str, self.__doc__)
message = message.format(extension=extension, version=pyscaffold_version)
super().__init__(message)
| 31.691429 | 87 | 0.671114 | import functools
import logging
import sys
import traceback
from typing import Optional, cast
if sys.version_info[:2] >= (3, 8):
from importlib.metadata import EntryPoint
else:
from importlib_metadata import EntryPoint
from . import __version__ as pyscaffold_version
def exceptions2exit(exception_list):
def exceptions2exit_decorator(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except tuple(exception_list) as ex:
from pyscaffold.log import logger
if logger.level <= logging.DEBUG:
traceback.print_exc()
print(f"ERROR: {ex}")
sys.exit(1)
return func_wrapper
return exceptions2exit_decorator
class ActionNotFound(KeyError):
def __init__(self, name, *args, **kwargs):
message = ActionNotFound.__doc__[:-1] + f": `{name}`"
super().__init__(message, *args, **kwargs)
class DirectoryAlreadyExists(RuntimeError):
class DirectoryDoesNotExist(RuntimeError):
class GitNotInstalled(RuntimeError):
DEFAULT_MESSAGE = "Make sure git is installed and working."
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class GitNotConfigured(RuntimeError):
DEFAULT_MESSAGE = (
"Make sure git is configured. Run:\n"
' git config --global user.email "you@example.com"\n'
' git config --global user.name "Your Name"\n'
"to set your account's default identity."
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class GitDirtyWorkspace(RuntimeError):
DEFAULT_MESSAGE = (
"Your working tree is dirty. Commit your changes first" " or use '--force'."
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class InvalidIdentifier(RuntimeError):
class OldSetuptools(RuntimeError):
DEFAULT_MESSAGE = (
"Your setuptools version is too old (<38.3). "
"Use `pip install -U setuptools` to upgrade.\n"
"If you have the deprecated `distribute` package installed "
"remove it or update to version 0.7.3."
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class PyScaffoldTooOld(RuntimeError):
DEFAULT_MESSAGE = (
"setup.cfg has no section [pyscaffold]! "
"Are you trying to update a pre 3.0 version?"
)
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class NoPyScaffoldProject(RuntimeError):
DEFAULT_MESSAGE = "Could not update project. Was it generated with PyScaffold?"
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super().__init__(message, *args, **kwargs)
class ShellCommandException(RuntimeError):
class ImpossibleToFindConfigDir(RuntimeError):
def __init__(self, message=None, *args, **kwargs):
message = message or self.__class__.__doc__
super().__init__(message, *args, **kwargs)
class ErrorLoadingExtension(RuntimeError):
def __init__(self, extension: str = "", entry_point: Optional[EntryPoint] = None):
if entry_point and not extension:
extension = getattr(entry_point, "module", entry_point.name)
if extension.endswith(".extension"):
extension = extension[: -len(".extension")]
extension = extension.replace("pyscaffoldext.", "pyscaffoldext-")
message = cast(str, self.__doc__)
message = message.format(extension=extension, version=pyscaffold_version)
super().__init__(message)
| true | true |
f730f8239795450f1ada30cddacbcdbea7bc8310 | 4,465 | py | Python | test/functional/wallet_keypool_topup.py | t-bast/bitcoin | 42fedb4acd3cfa813059fcc3f96b2a41f78d9074 | [
"MIT"
] | 213 | 2015-01-25T19:45:22.000Z | 2022-02-24T22:48:03.000Z | test/functional/wallet_keypool_topup.py | t-bast/bitcoin | 42fedb4acd3cfa813059fcc3f96b2a41f78d9074 | [
"MIT"
] | 51 | 2019-11-03T04:00:14.000Z | 2022-03-30T07:17:34.000Z | test/functional/wallet_keypool_topup.py | t-bast/bitcoin | 42fedb4acd3cfa813059fcc3f96b2a41f78d9074 | [
"MIT"
] | 47 | 2018-08-03T04:55:45.000Z | 2022-03-10T07:50:31.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class KeypoolRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename)
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
assert address_details["isscript"] and not address_details["iswitness"]
else:
assert not address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.generate(self.nodes[0], 1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.generate(self.nodes[0], 1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
self.connect_nodes(0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if self.options.descriptors:
if output_type == 'legacy':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/44'/1'/0'/0/110")
elif output_type == 'p2sh-segwit':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49'/1'/0'/0/110")
elif output_type == 'bech32':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84'/1'/0'/0/110")
else:
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| 47 | 164 | 0.657783 |
import os
import shutil
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class KeypoolRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename)
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
assert address_details["isscript"] and not address_details["iswitness"]
else:
assert not address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.generate(self.nodes[0], 1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.generate(self.nodes[0], 1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
self.connect_nodes(0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if self.options.descriptors:
if output_type == 'legacy':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/44'/1'/0'/0/110")
elif output_type == 'p2sh-segwit':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49'/1'/0'/0/110")
elif output_type == 'bech32':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84'/1'/0'/0/110")
else:
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| true | true |
f730f8777658d70018bebdae1f628b30914e95b6 | 14,193 | py | Python | tests/workflows/test_imaging_component_workflows.py | mfarrera/algorithm-reference-library | 7331812aa7cc3501a15d3392cecf6ea65b43f91e | [
"Apache-2.0"
] | null | null | null | tests/workflows/test_imaging_component_workflows.py | mfarrera/algorithm-reference-library | 7331812aa7cc3501a15d3392cecf6ea65b43f91e | [
"Apache-2.0"
] | null | null | null | tests/workflows/test_imaging_component_workflows.py | mfarrera/algorithm-reference-library | 7331812aa7cc3501a15d3392cecf6ea65b43f91e | [
"Apache-2.0"
] | null | null | null | """ Unit tests for pipelines expressed via dask.delayed
"""
import logging
import sys
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from processing_components.image.operations import export_image_to_fits, smooth_image
from processing_components.imaging.base import predict_skycomponent_visibility
from processing_components.skycomponent.operations import find_skycomponents, find_nearest_skycomponent, insert_skycomponent
from processing_components.simulation.testing_support import create_named_configuration, ingest_unittest_visibility, create_unittest_model, \
insert_unittest_errors, create_unittest_components
from workflows.arlexecute.execution_support.arlexecute import arlexecute
from workflows.arlexecute.imaging.imaging_workflows import zero_vislist_workflow, predict_workflow, \
invert_workflow, subtract_vislist_workflow
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestImaging(unittest.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
def tearDown(self):
arlexecute.close()
def actualSetUp(self, add_errors=False, freqwin=1, block=False, dospectral=True, dopol=False, zerow=False):
arlexecute.set_client(use_dask=False)
self.npixel = 256
self.low = create_named_configuration('LOWBD2', rmax=750.0)
self.freqwin = freqwin
self.vis_list = list()
self.ntimes = 5
self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0
if freqwin > 1:
self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)
self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])
else:
self.frequency = numpy.array([0.8e8])
self.channelwidth = numpy.array([1e6])
if dopol:
self.vis_pol = PolarisationFrame('linear')
self.image_pol = PolarisationFrame('stokesIQUV')
f = numpy.array([100.0, 20.0, -10.0, 1.0])
else:
self.vis_pol = PolarisationFrame('stokesI')
self.image_pol = PolarisationFrame('stokesI')
f = numpy.array([100.0])
if dospectral:
flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency])
else:
flux = numpy.array([f])
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.vis_list = [arlexecute.execute(ingest_unittest_visibility)(self.low,
[self.frequency[freqwin]],
[self.channelwidth[freqwin]],
self.times,
self.vis_pol,
self.phasecentre, block=block,
zerow=zerow)
for freqwin, _ in enumerate(self.frequency)]
self.model_graph = [arlexecute.execute(create_unittest_model, nout=freqwin)(self.vis_list[freqwin],
self.image_pol,
npixel=self.npixel)
for freqwin, _ in enumerate(self.frequency)]
self.components_graph = [arlexecute.execute(create_unittest_components)(self.model_graph[freqwin],
flux[freqwin, :][numpy.newaxis, :])
for freqwin, _ in enumerate(self.frequency)]
self.model_graph = [arlexecute.execute(insert_skycomponent, nout=1)(self.model_graph[freqwin],
self.components_graph[freqwin])
for freqwin, _ in enumerate(self.frequency)]
self.vis_list = [arlexecute.execute(predict_skycomponent_visibility)(self.vis_list[freqwin],
self.components_graph[freqwin])
for freqwin, _ in enumerate(self.frequency)]
# Calculate the model convolved with a Gaussian.
self.model = arlexecute.compute(self.model_graph[0], sync=True)
self.cmodel = smooth_image(self.model)
export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir)
export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir)
if add_errors and block:
self.vis_list = [arlexecute.execute(insert_unittest_errors)(self.vis_list[i])
for i, _ in enumerate(self.frequency)]
self.vis = arlexecute.compute(self.vis_list[0], sync=True)
self.components = arlexecute.compute(self.components_graph[0], sync=True)
def test_time_setup(self):
self.actualSetUp()
def _checkcomponents(self, dirty, fluxthreshold=0.6, positionthreshold=1.0):
comps = find_skycomponents(dirty, fwhm=1.0, threshold=10 * fluxthreshold, npixels=5)
assert len(comps) == len(self.components), "Different number of components found: original %d, recovered %d" % \
(len(self.components), len(comps))
cellsize = abs(dirty.wcs.wcs.cdelt[0])
for comp in comps:
# Check for agreement in direction
ocomp, separation = find_nearest_skycomponent(comp.direction, self.components)
assert separation / cellsize < positionthreshold, "Component differs in position %.3f pixels" % \
separation / cellsize
def _predict_base(self, context='2d', extra='', fluxthreshold=1.0, facets=1, vis_slices=1, **kwargs):
vis_list = zero_vislist_workflow(self.vis_list)
vis_list = predict_workflow(vis_list, self.model_graph, context=context,
vis_slices=vis_slices, facets=facets, **kwargs)
vis_list = subtract_vislist_workflow(self.vis_list, vis_list)[0]
vis_list = arlexecute.compute(vis_list, sync=True)
dirty = invert_workflow([vis_list], [self.model_graph[0]], context='2d', dopsf=False,
normalize=True)[0]
dirty = arlexecute.compute(dirty, sync=True)
assert numpy.max(numpy.abs(dirty[0].data)), "Residual image is empty"
export_image_to_fits(dirty[0], '%s/test_imaging_predict_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
maxabs = numpy.max(numpy.abs(dirty[0].data))
assert maxabs < fluxthreshold, "Error %.3f greater than fluxthreshold %.3f " % (maxabs, fluxthreshold)
def _invert_base(self, context, extra='', fluxthreshold=1.0, positionthreshold=1.0, check_components=True,
facets=1, vis_slices=1, **kwargs):
dirty = invert_workflow(self.vis_list, self.model_graph, context=context,
dopsf=False, normalize=True, facets=facets, vis_slices=vis_slices,
**kwargs)[0]
dirty = arlexecute.compute(dirty, sync=True)
export_image_to_fits(dirty[0], '%s/test_imaging_invert_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
assert numpy.max(numpy.abs(dirty[0].data)), "Image is empty"
if check_components:
self._checkcomponents(dirty[0], fluxthreshold, positionthreshold)
def test_predict_2d(self):
self.actualSetUp(zerow=True)
self._predict_base(context='2d')
@unittest.skip("Facets requires overlap")
def test_predict_facets(self):
self.actualSetUp()
self._predict_base(context='facets', fluxthreshold=15.0, facets=4)
@unittest.skip("Timeslice predict needs better interpolation")
def test_predict_facets_timeslice(self):
self.actualSetUp()
self._predict_base(context='facets_timeslice', fluxthreshold=19.0, facets=8, vis_slices=self.ntimes)
@unittest.skip("Facets requires overlap")
def test_predict_facets_wprojection(self):
self.actualSetUp()
self._predict_base(context='facets', extra='_wprojection', facets=8, wstep=8.0, fluxthreshold=15.0,
oversampling=2)
@unittest.skip("Correcting twice?")
def test_predict_facets_wstack(self):
self.actualSetUp()
self._predict_base(context='facets_wstack', fluxthreshold=15.0, facets=8, vis_slices=41)
@unittest.skip("Timeslice predict needs better interpolation")
def test_predict_timeslice(self):
self.actualSetUp()
self._predict_base(context='timeslice', fluxthreshold=19.0, vis_slices=self.ntimes)
@unittest.skip("Timeslice predict needs better interpolation")
def test_predict_timeslice_wprojection(self):
self.actualSetUp()
self._predict_base(context='timeslice', extra='_wprojection', fluxthreshold=3.0, wstep=10.0,
vis_slices=self.ntimes, oversampling=2)
def test_predict_wprojection(self):
self.actualSetUp()
self._predict_base(context='2d', extra='_wprojection', wstep=10.0, fluxthreshold=2.0, oversampling=2)
def test_predict_wstack(self):
self.actualSetUp()
self._predict_base(context='wstack', fluxthreshold=2.0, vis_slices=41)
def test_predict_wstack_wprojection(self):
self.actualSetUp()
self._predict_base(context='wstack', extra='_wprojection', fluxthreshold=3.0, wstep=2.5, vis_slices=11,
oversampling=2)
def test_predict_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=41)
def test_predict_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=41)
def test_invert_2d(self):
self.actualSetUp(zerow=True)
self._invert_base(context='2d', positionthreshold=2.0, check_components=False)
def test_invert_facets(self):
self.actualSetUp()
self._invert_base(context='facets', positionthreshold=2.0, check_components=True, facets=8)
@unittest.skip("Correcting twice?")
def test_invert_facets_timeslice(self):
self.actualSetUp()
self._invert_base(context='facets_timeslice', check_components=True, vis_slices=self.ntimes,
positionthreshold=5.0, flux_threshold=1.0, facets=8)
def test_invert_facets_wprojection(self):
self.actualSetUp()
self._invert_base(context='facets', extra='_wprojection', check_components=True,
positionthreshold=2.0, wstep=10.0, oversampling=2, facets=4)
@unittest.skip("Correcting twice?")
def test_invert_facets_wstack(self):
self.actualSetUp()
self._invert_base(context='facets_wstack', positionthreshold=1.0, check_components=False, facets=4,
vis_slices=11)
def test_invert_timeslice(self):
self.actualSetUp()
self._invert_base(context='timeslice', positionthreshold=1.0, check_components=True,
vis_slices=self.ntimes)
def test_invert_timeslice_wprojection(self):
self.actualSetUp()
self._invert_base(context='timeslice', extra='_wprojection', positionthreshold=1.0,
check_components=True, wstep=20.0, vis_slices=self.ntimes, oversampling=2)
def test_invert_wprojection(self):
self.actualSetUp()
self._invert_base(context='2d', extra='_wprojection', positionthreshold=2.0, wstep=10.0, oversampling=2)
def test_invert_wprojection_wstack(self):
self.actualSetUp()
self._invert_base(context='wstack', extra='_wprojection', positionthreshold=1.0, wstep=2.5, vis_slices=11,
oversampling=2)
def test_invert_wstack(self):
self.actualSetUp()
self._invert_base(context='wstack', positionthreshold=1.0, vis_slices=41)
def test_invert_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._invert_base(context='wstack', extra='_spectral', positionthreshold=2.0,
vis_slices=41)
def test_invert_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._invert_base(context='wstack', extra='_spectral_pol', positionthreshold=2.0,
vis_slices=41)
def test_weighting(self):
self.actualSetUp()
context = 'wstack'
vis_slices = 41
facets = 1
dirty_graph = invert_workflow(self.vis_list, self.model_graph, context=context,
dopsf=False, normalize=True, facets=facets, vis_slices=vis_slices)
dirty = arlexecute.compute(dirty_graph[0], sync=True)
export_image_to_fits(dirty[0], '%s/test_imaging_noweighting_%s_dirty.fits' % (self.dir,
arlexecute.type()))
if __name__ == '__main__':
unittest.main()
| 47.949324 | 141 | 0.606989 |
import logging
import sys
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from data_models.polarisation import PolarisationFrame
from processing_components.image.operations import export_image_to_fits, smooth_image
from processing_components.imaging.base import predict_skycomponent_visibility
from processing_components.skycomponent.operations import find_skycomponents, find_nearest_skycomponent, insert_skycomponent
from processing_components.simulation.testing_support import create_named_configuration, ingest_unittest_visibility, create_unittest_model, \
insert_unittest_errors, create_unittest_components
from workflows.arlexecute.execution_support.arlexecute import arlexecute
from workflows.arlexecute.imaging.imaging_workflows import zero_vislist_workflow, predict_workflow, \
invert_workflow, subtract_vislist_workflow
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestImaging(unittest.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
def tearDown(self):
arlexecute.close()
def actualSetUp(self, add_errors=False, freqwin=1, block=False, dospectral=True, dopol=False, zerow=False):
arlexecute.set_client(use_dask=False)
self.npixel = 256
self.low = create_named_configuration('LOWBD2', rmax=750.0)
self.freqwin = freqwin
self.vis_list = list()
self.ntimes = 5
self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0
if freqwin > 1:
self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)
self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])
else:
self.frequency = numpy.array([0.8e8])
self.channelwidth = numpy.array([1e6])
if dopol:
self.vis_pol = PolarisationFrame('linear')
self.image_pol = PolarisationFrame('stokesIQUV')
f = numpy.array([100.0, 20.0, -10.0, 1.0])
else:
self.vis_pol = PolarisationFrame('stokesI')
self.image_pol = PolarisationFrame('stokesI')
f = numpy.array([100.0])
if dospectral:
flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency])
else:
flux = numpy.array([f])
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
self.vis_list = [arlexecute.execute(ingest_unittest_visibility)(self.low,
[self.frequency[freqwin]],
[self.channelwidth[freqwin]],
self.times,
self.vis_pol,
self.phasecentre, block=block,
zerow=zerow)
for freqwin, _ in enumerate(self.frequency)]
self.model_graph = [arlexecute.execute(create_unittest_model, nout=freqwin)(self.vis_list[freqwin],
self.image_pol,
npixel=self.npixel)
for freqwin, _ in enumerate(self.frequency)]
self.components_graph = [arlexecute.execute(create_unittest_components)(self.model_graph[freqwin],
flux[freqwin, :][numpy.newaxis, :])
for freqwin, _ in enumerate(self.frequency)]
self.model_graph = [arlexecute.execute(insert_skycomponent, nout=1)(self.model_graph[freqwin],
self.components_graph[freqwin])
for freqwin, _ in enumerate(self.frequency)]
self.vis_list = [arlexecute.execute(predict_skycomponent_visibility)(self.vis_list[freqwin],
self.components_graph[freqwin])
for freqwin, _ in enumerate(self.frequency)]
self.model = arlexecute.compute(self.model_graph[0], sync=True)
self.cmodel = smooth_image(self.model)
export_image_to_fits(self.model, '%s/test_imaging_model.fits' % self.dir)
export_image_to_fits(self.cmodel, '%s/test_imaging_cmodel.fits' % self.dir)
if add_errors and block:
self.vis_list = [arlexecute.execute(insert_unittest_errors)(self.vis_list[i])
for i, _ in enumerate(self.frequency)]
self.vis = arlexecute.compute(self.vis_list[0], sync=True)
self.components = arlexecute.compute(self.components_graph[0], sync=True)
def test_time_setup(self):
self.actualSetUp()
def _checkcomponents(self, dirty, fluxthreshold=0.6, positionthreshold=1.0):
comps = find_skycomponents(dirty, fwhm=1.0, threshold=10 * fluxthreshold, npixels=5)
assert len(comps) == len(self.components), "Different number of components found: original %d, recovered %d" % \
(len(self.components), len(comps))
cellsize = abs(dirty.wcs.wcs.cdelt[0])
for comp in comps:
ocomp, separation = find_nearest_skycomponent(comp.direction, self.components)
assert separation / cellsize < positionthreshold, "Component differs in position %.3f pixels" % \
separation / cellsize
def _predict_base(self, context='2d', extra='', fluxthreshold=1.0, facets=1, vis_slices=1, **kwargs):
vis_list = zero_vislist_workflow(self.vis_list)
vis_list = predict_workflow(vis_list, self.model_graph, context=context,
vis_slices=vis_slices, facets=facets, **kwargs)
vis_list = subtract_vislist_workflow(self.vis_list, vis_list)[0]
vis_list = arlexecute.compute(vis_list, sync=True)
dirty = invert_workflow([vis_list], [self.model_graph[0]], context='2d', dopsf=False,
normalize=True)[0]
dirty = arlexecute.compute(dirty, sync=True)
assert numpy.max(numpy.abs(dirty[0].data)), "Residual image is empty"
export_image_to_fits(dirty[0], '%s/test_imaging_predict_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
maxabs = numpy.max(numpy.abs(dirty[0].data))
assert maxabs < fluxthreshold, "Error %.3f greater than fluxthreshold %.3f " % (maxabs, fluxthreshold)
def _invert_base(self, context, extra='', fluxthreshold=1.0, positionthreshold=1.0, check_components=True,
facets=1, vis_slices=1, **kwargs):
dirty = invert_workflow(self.vis_list, self.model_graph, context=context,
dopsf=False, normalize=True, facets=facets, vis_slices=vis_slices,
**kwargs)[0]
dirty = arlexecute.compute(dirty, sync=True)
export_image_to_fits(dirty[0], '%s/test_imaging_invert_%s%s_%s_dirty.fits' %
(self.dir, context, extra, arlexecute.type()))
assert numpy.max(numpy.abs(dirty[0].data)), "Image is empty"
if check_components:
self._checkcomponents(dirty[0], fluxthreshold, positionthreshold)
def test_predict_2d(self):
self.actualSetUp(zerow=True)
self._predict_base(context='2d')
@unittest.skip("Facets requires overlap")
def test_predict_facets(self):
self.actualSetUp()
self._predict_base(context='facets', fluxthreshold=15.0, facets=4)
@unittest.skip("Timeslice predict needs better interpolation")
def test_predict_facets_timeslice(self):
self.actualSetUp()
self._predict_base(context='facets_timeslice', fluxthreshold=19.0, facets=8, vis_slices=self.ntimes)
@unittest.skip("Facets requires overlap")
def test_predict_facets_wprojection(self):
self.actualSetUp()
self._predict_base(context='facets', extra='_wprojection', facets=8, wstep=8.0, fluxthreshold=15.0,
oversampling=2)
@unittest.skip("Correcting twice?")
def test_predict_facets_wstack(self):
self.actualSetUp()
self._predict_base(context='facets_wstack', fluxthreshold=15.0, facets=8, vis_slices=41)
@unittest.skip("Timeslice predict needs better interpolation")
def test_predict_timeslice(self):
self.actualSetUp()
self._predict_base(context='timeslice', fluxthreshold=19.0, vis_slices=self.ntimes)
@unittest.skip("Timeslice predict needs better interpolation")
def test_predict_timeslice_wprojection(self):
self.actualSetUp()
self._predict_base(context='timeslice', extra='_wprojection', fluxthreshold=3.0, wstep=10.0,
vis_slices=self.ntimes, oversampling=2)
def test_predict_wprojection(self):
self.actualSetUp()
self._predict_base(context='2d', extra='_wprojection', wstep=10.0, fluxthreshold=2.0, oversampling=2)
def test_predict_wstack(self):
self.actualSetUp()
self._predict_base(context='wstack', fluxthreshold=2.0, vis_slices=41)
def test_predict_wstack_wprojection(self):
self.actualSetUp()
self._predict_base(context='wstack', extra='_wprojection', fluxthreshold=3.0, wstep=2.5, vis_slices=11,
oversampling=2)
def test_predict_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=41)
def test_predict_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._predict_base(context='wstack', extra='_spectral', fluxthreshold=4.0, vis_slices=41)
def test_invert_2d(self):
self.actualSetUp(zerow=True)
self._invert_base(context='2d', positionthreshold=2.0, check_components=False)
def test_invert_facets(self):
self.actualSetUp()
self._invert_base(context='facets', positionthreshold=2.0, check_components=True, facets=8)
@unittest.skip("Correcting twice?")
def test_invert_facets_timeslice(self):
self.actualSetUp()
self._invert_base(context='facets_timeslice', check_components=True, vis_slices=self.ntimes,
positionthreshold=5.0, flux_threshold=1.0, facets=8)
def test_invert_facets_wprojection(self):
self.actualSetUp()
self._invert_base(context='facets', extra='_wprojection', check_components=True,
positionthreshold=2.0, wstep=10.0, oversampling=2, facets=4)
@unittest.skip("Correcting twice?")
def test_invert_facets_wstack(self):
self.actualSetUp()
self._invert_base(context='facets_wstack', positionthreshold=1.0, check_components=False, facets=4,
vis_slices=11)
def test_invert_timeslice(self):
self.actualSetUp()
self._invert_base(context='timeslice', positionthreshold=1.0, check_components=True,
vis_slices=self.ntimes)
def test_invert_timeslice_wprojection(self):
self.actualSetUp()
self._invert_base(context='timeslice', extra='_wprojection', positionthreshold=1.0,
check_components=True, wstep=20.0, vis_slices=self.ntimes, oversampling=2)
def test_invert_wprojection(self):
self.actualSetUp()
self._invert_base(context='2d', extra='_wprojection', positionthreshold=2.0, wstep=10.0, oversampling=2)
def test_invert_wprojection_wstack(self):
self.actualSetUp()
self._invert_base(context='wstack', extra='_wprojection', positionthreshold=1.0, wstep=2.5, vis_slices=11,
oversampling=2)
def test_invert_wstack(self):
self.actualSetUp()
self._invert_base(context='wstack', positionthreshold=1.0, vis_slices=41)
def test_invert_wstack_spectral(self):
self.actualSetUp(dospectral=True)
self._invert_base(context='wstack', extra='_spectral', positionthreshold=2.0,
vis_slices=41)
def test_invert_wstack_spectral_pol(self):
self.actualSetUp(dospectral=True, dopol=True)
self._invert_base(context='wstack', extra='_spectral_pol', positionthreshold=2.0,
vis_slices=41)
def test_weighting(self):
self.actualSetUp()
context = 'wstack'
vis_slices = 41
facets = 1
dirty_graph = invert_workflow(self.vis_list, self.model_graph, context=context,
dopsf=False, normalize=True, facets=facets, vis_slices=vis_slices)
dirty = arlexecute.compute(dirty_graph[0], sync=True)
export_image_to_fits(dirty[0], '%s/test_imaging_noweighting_%s_dirty.fits' % (self.dir,
arlexecute.type()))
if __name__ == '__main__':
unittest.main()
| true | true |
f730f92c5485f29626ae1c7b418475779522b722 | 1,420 | py | Python | channel.py | michael-riess/language-proctor-bot | 86eb22e298a86ecd79e484c7a3ece2797fe5f027 | [
"MIT"
] | null | null | null | channel.py | michael-riess/language-proctor-bot | 86eb22e298a86ecd79e484c7a3ece2797fe5f027 | [
"MIT"
] | 1 | 2021-03-03T12:37:20.000Z | 2021-03-03T15:37:28.000Z | channel.py | michael-riess/language-proctor-bot | 86eb22e298a86ecd79e484c7a3ece2797fe5f027 | [
"MIT"
] | null | null | null | from replit import db
from locales import locales
# ========================================================
# Posts message to discord channel (translated according to channel)
# ========================================================
async def say(channel, message, arguments=None):
message = locales.get(get_channel_lang(channel.id), message)
if arguments is not None:
await channel.send(message.format(*arguments))
else:
await channel.send(message)
# ========================================================
# Sets the language to montitor for the given channel
# ========================================================
def set_channel_lang(channel, code):
db[channel] = code
# ========================================================
# Gets the language to montitor for the given channel
# ========================================================
def get_channel_lang(channel):
try:
return db[channel]
except:
return 'en'
# ========================================================
# Removes channel lanaguage association
# ========================================================
def remove_channel_lang(channel):
if channel_has_lang(channel):
del db[channel]
# ========================================================
# Determine if channel has language
# ========================================================
def channel_has_lang(channel):
return channel in db.keys() | 35.5 | 68 | 0.445775 | from replit import db
from locales import locales
async def say(channel, message, arguments=None):
message = locales.get(get_channel_lang(channel.id), message)
if arguments is not None:
await channel.send(message.format(*arguments))
else:
await channel.send(message)
def set_channel_lang(channel, code):
db[channel] = code
def get_channel_lang(channel):
try:
return db[channel]
except:
return 'en'
def remove_channel_lang(channel):
if channel_has_lang(channel):
del db[channel]
def channel_has_lang(channel):
return channel in db.keys() | true | true |
f730fa6f7583c67acbf528ee804850ee2c0663f1 | 1,663 | py | Python | modules/demo/ipc/graph/fa2.py | mzegar/node-rapids | 5b2c3dafbec5f17dcedf8147a8f668a986622cc4 | [
"Apache-2.0"
] | 39 | 2021-08-04T17:30:03.000Z | 2022-03-27T18:20:05.000Z | modules/demo/ipc/graph/fa2.py | love-lena/node-rapids | 27c9e2468372df4fae3779d859089b54c8d32c4f | [
"Apache-2.0"
] | 77 | 2021-03-12T18:04:01.000Z | 2021-08-02T20:07:24.000Z | modules/demo/ipc/graph/fa2.py | love-lena/node-rapids | 27c9e2468372df4fae3779d859089b54c8d32c4f | [
"Apache-2.0"
] | 7 | 2021-03-12T19:42:40.000Z | 2021-07-27T21:57:26.000Z | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
import python.test_data as datasets
from python.callback import GraphZmqCallback
import zmq
import cudf
import cugraph
import asyncio
import zmq.asyncio
graph, nodes, edges = datasets.make_synthetic_dataset()
print("num_nodes:", graph.number_of_nodes())
print("num_edges:", graph.number_of_edges())
async def main(zmq_ctx):
def map_positions(pos):
return cudf.DataFrame(pos, columns=["x", "y"]).astype("float32")
callback = GraphZmqCallback(
zmq_ctx=zmq_ctx,
map_positions=map_positions,
nodes=nodes[["id", "color", "size"]],
edges=edges[["edge", "bundle", "color"]],
edge_col_names=["edge", "color", "bundle"],
node_col_names=["id", "color", "size", "x", "y"],
)
cugraph.force_atlas2(
graph,
max_iter=500,
callback=callback,
)
callback.update(msg=b"close")
callback.close()
asyncio.run(main(zmq.Context.instance()))
| 29.696429 | 74 | 0.708358 |
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
import python.test_data as datasets
from python.callback import GraphZmqCallback
import zmq
import cudf
import cugraph
import asyncio
import zmq.asyncio
graph, nodes, edges = datasets.make_synthetic_dataset()
print("num_nodes:", graph.number_of_nodes())
print("num_edges:", graph.number_of_edges())
async def main(zmq_ctx):
def map_positions(pos):
return cudf.DataFrame(pos, columns=["x", "y"]).astype("float32")
callback = GraphZmqCallback(
zmq_ctx=zmq_ctx,
map_positions=map_positions,
nodes=nodes[["id", "color", "size"]],
edges=edges[["edge", "bundle", "color"]],
edge_col_names=["edge", "color", "bundle"],
node_col_names=["id", "color", "size", "x", "y"],
)
cugraph.force_atlas2(
graph,
max_iter=500,
callback=callback,
)
callback.update(msg=b"close")
callback.close()
asyncio.run(main(zmq.Context.instance()))
| true | true |
f730fa84e14eb5fbcbc033f780025ae0e6b55f8c | 2,664 | py | Python | facebook_business/adobjects/lookalikespec.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 576 | 2018-05-01T19:09:32.000Z | 2022-03-31T11:45:11.000Z | facebook_business/adobjects/lookalikespec.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 217 | 2018-05-03T07:31:59.000Z | 2022-03-29T14:19:52.000Z | facebook_business/adobjects/lookalikespec.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 323 | 2018-05-01T20:32:26.000Z | 2022-03-29T07:05:12.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class LookalikeSpec(
AbstractObject,
):
def __init__(self, api=None):
super(LookalikeSpec, self).__init__()
self._isLookalikeSpec = True
self._api = api
class Field(AbstractObject.Field):
country = 'country'
is_financial_service = 'is_financial_service'
origin = 'origin'
origin_event_name = 'origin_event_name'
origin_event_source_name = 'origin_event_source_name'
origin_event_source_type = 'origin_event_source_type'
product_set_name = 'product_set_name'
ratio = 'ratio'
starting_ratio = 'starting_ratio'
target_countries = 'target_countries'
target_country_names = 'target_country_names'
type = 'type'
_field_types = {
'country': 'string',
'is_financial_service': 'bool',
'origin': 'list<Object>',
'origin_event_name': 'string',
'origin_event_source_name': 'string',
'origin_event_source_type': 'string',
'product_set_name': 'string',
'ratio': 'float',
'starting_ratio': 'float',
'target_countries': 'list<string>',
'target_country_names': 'list',
'type': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 36 | 79 | 0.704204 |
from facebook_business.adobjects.abstractobject import AbstractObject
class LookalikeSpec(
AbstractObject,
):
def __init__(self, api=None):
super(LookalikeSpec, self).__init__()
self._isLookalikeSpec = True
self._api = api
class Field(AbstractObject.Field):
country = 'country'
is_financial_service = 'is_financial_service'
origin = 'origin'
origin_event_name = 'origin_event_name'
origin_event_source_name = 'origin_event_source_name'
origin_event_source_type = 'origin_event_source_type'
product_set_name = 'product_set_name'
ratio = 'ratio'
starting_ratio = 'starting_ratio'
target_countries = 'target_countries'
target_country_names = 'target_country_names'
type = 'type'
_field_types = {
'country': 'string',
'is_financial_service': 'bool',
'origin': 'list<Object>',
'origin_event_name': 'string',
'origin_event_source_name': 'string',
'origin_event_source_type': 'string',
'product_set_name': 'string',
'ratio': 'float',
'starting_ratio': 'float',
'target_countries': 'list<string>',
'target_country_names': 'list',
'type': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| true | true |
f730fbd03f80351c68af026f82203af4a85f5514 | 421 | py | Python | sphinxnotes/snippet/builder.py | sphinx-notes/khufu | 4225086f0e5d7d8262598d755a5f19e8ad4ebbba | [
"BSD-3-Clause"
] | 1 | 2021-02-28T12:18:23.000Z | 2021-02-28T12:18:23.000Z | sphinxnotes/snippet/builder.py | sphinx-notes/khufu | 4225086f0e5d7d8262598d755a5f19e8ad4ebbba | [
"BSD-3-Clause"
] | 10 | 2021-02-28T08:54:00.000Z | 2022-01-16T05:17:03.000Z | sphinxnotes/snippet/builder.py | sphinx-notes/khufu | 4225086f0e5d7d8262598d755a5f19e8ad4ebbba | [
"BSD-3-Clause"
] | 1 | 2021-12-24T11:34:15.000Z | 2021-12-24T11:34:15.000Z | """
sphinxnotes.snippet.builder
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dummy builder for triggering extension.
:copyright: Copyright 2021 Shengyu Zhang.
:license: BSD, see LICENSE for details.
"""
from sphinx.builders.dummy import DummyBuilder
from sphinx.locale import __
class Builder(DummyBuilder):
name = 'snippet'
epilog = __('The snippet builder is a dummy builder for triggering extension.')
| 23.388889 | 83 | 0.67696 |
from sphinx.builders.dummy import DummyBuilder
from sphinx.locale import __
class Builder(DummyBuilder):
name = 'snippet'
epilog = __('The snippet builder is a dummy builder for triggering extension.')
| true | true |
f730fd394f2382010c04a8a3319d21739e722bf6 | 252 | py | Python | linear-search-ordered.py | vaibhavmule/PythonExercise | 5b32d8db5290db5ced2c8730191fd73936d4c3d4 | [
"MIT"
] | null | null | null | linear-search-ordered.py | vaibhavmule/PythonExercise | 5b32d8db5290db5ced2c8730191fd73936d4c3d4 | [
"MIT"
] | null | null | null | linear-search-ordered.py | vaibhavmule/PythonExercise | 5b32d8db5290db5ced2c8730191fd73936d4c3d4 | [
"MIT"
] | null | null | null | foo = [1, 2, 3, 4, 6, 9, 10]
def search(arr, item):
for i in arr:
if i == item:
return True
elif i > item:
return False
return False
print(search(foo, 5))
print(search(foo, 9))
print(search(foo, 20))
| 15.75 | 28 | 0.515873 | foo = [1, 2, 3, 4, 6, 9, 10]
def search(arr, item):
for i in arr:
if i == item:
return True
elif i > item:
return False
return False
print(search(foo, 5))
print(search(foo, 9))
print(search(foo, 20))
| true | true |
f730fe3ff4f0f24911ddbed8aa3feac509e8a169 | 3,707 | py | Python | lib/rucio/tests/mock/fts3.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/mock/fts3.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/mock/fts3.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2013
import datetime
import random
import time
from sqlalchemy import and_, or_
from rucio.common.utils import generate_uuid
from rucio.core.monitor import record_counter, record_timer
from rucio.db.sqla import test_models
from rucio.db.sqla.constants import FTSState
from rucio.db.sqla.session import read_session, transactional_session
"""
This mock FTS3 server provides basic job control, with a random job progression model.
"""
@read_session
def list_all(session):
"""
List all transfer jobs.
:returns: List of dictionaries with job information
"""
record_counter('daemons.mock.fts3.list_all')
query = session.query(test_models.MockFTSTransfer).order_by(test_models.MockFTSTransfer.lastmodified.desc())
for row in query.yield_per(5):
yield row
@transactional_session
def submit(tinfo, session):
"""
Create a new transfer job in state QUEUED.
:param tinfo: The transfer job information as a string.
:returns: The transfer job id.
"""
record_counter('daemons.mock.fts3.submit')
ts = time.time()
tid = generate_uuid()
record_timer('daemons.mock.fts3.submit.000-generate_uuid', (time.time() - ts) * 1000)
ts = time.time()
new_transfer = test_models.MockFTSTransfer(transfer_id=tid, transfer_metadata=str(tinfo))
new_transfer.save(session=session)
record_timer('daemons.mock.fts3.submit.001-new_transfer', (time.time() - ts) * 1000)
return {'job_id': tid}
@transactional_session
def query(tid, session):
"""
Query the transfer job information of a single job. Has a chance to progress the job from QUEUED to either DONE or FAILED.
:param tid: The transfer job id.
:returns: The transfer job information.
"""
record_counter('daemons.mock.fts3.query')
ts = time.time()
new_state = random.sample(sum([[FTSState.FINISHED] * 15, [FTSState.FAILED] * 3, [FTSState.FINISHEDDIRTY] * 2, [FTSState.ACTIVE] * 80], []), 1)[0]
record_timer('daemons.mock.fts3.query.000-random_sample', (time.time() - ts) * 1000)
ts = time.time()
query = session.query(test_models.MockFTSTransfer).filter(and_(test_models.MockFTSTransfer.transfer_id == tid,
or_(test_models.MockFTSTransfer.state == FTSState.SUBMITTED,
test_models.MockFTSTransfer.state == FTSState.ACTIVE)))
if query.update({'state': new_state,
'last_modified': datetime.datetime.utcnow()}) == 0:
return None
r = {'job_state': str(new_state)}
if new_state == FTSState.FAILED or new_state == FTSState.FINISHEDDIRTY:
r['reason'] = 'Mock FTS decided to kill your transfer.'
r['files'] = [{'source_surl': 'mock_src', 'dest_surl': 'mock_dest', 'reason': 'mock failure'}]
return r
@transactional_session
def cancel(tid, session):
"""
Kills a transfer by setting its state to CANCELLED.
:param tid: The transfer job id.
"""
record_counter('daemons.mock.fts3.cancel')
ts = time.time()
query = session.query(test_models.MockFTSTransfer).filter(tid=tid)
query.update({'state': FTSState.CANCELED,
'last_modified': datetime.datetime.utcnow()})
record_timer('daemons.mock.fts3.cancel.update_state', (time.time() - ts) * 1000)
| 32.80531 | 149 | 0.677367 |
import datetime
import random
import time
from sqlalchemy import and_, or_
from rucio.common.utils import generate_uuid
from rucio.core.monitor import record_counter, record_timer
from rucio.db.sqla import test_models
from rucio.db.sqla.constants import FTSState
from rucio.db.sqla.session import read_session, transactional_session
@read_session
def list_all(session):
record_counter('daemons.mock.fts3.list_all')
query = session.query(test_models.MockFTSTransfer).order_by(test_models.MockFTSTransfer.lastmodified.desc())
for row in query.yield_per(5):
yield row
@transactional_session
def submit(tinfo, session):
record_counter('daemons.mock.fts3.submit')
ts = time.time()
tid = generate_uuid()
record_timer('daemons.mock.fts3.submit.000-generate_uuid', (time.time() - ts) * 1000)
ts = time.time()
new_transfer = test_models.MockFTSTransfer(transfer_id=tid, transfer_metadata=str(tinfo))
new_transfer.save(session=session)
record_timer('daemons.mock.fts3.submit.001-new_transfer', (time.time() - ts) * 1000)
return {'job_id': tid}
@transactional_session
def query(tid, session):
record_counter('daemons.mock.fts3.query')
ts = time.time()
new_state = random.sample(sum([[FTSState.FINISHED] * 15, [FTSState.FAILED] * 3, [FTSState.FINISHEDDIRTY] * 2, [FTSState.ACTIVE] * 80], []), 1)[0]
record_timer('daemons.mock.fts3.query.000-random_sample', (time.time() - ts) * 1000)
ts = time.time()
query = session.query(test_models.MockFTSTransfer).filter(and_(test_models.MockFTSTransfer.transfer_id == tid,
or_(test_models.MockFTSTransfer.state == FTSState.SUBMITTED,
test_models.MockFTSTransfer.state == FTSState.ACTIVE)))
if query.update({'state': new_state,
'last_modified': datetime.datetime.utcnow()}) == 0:
return None
r = {'job_state': str(new_state)}
if new_state == FTSState.FAILED or new_state == FTSState.FINISHEDDIRTY:
r['reason'] = 'Mock FTS decided to kill your transfer.'
r['files'] = [{'source_surl': 'mock_src', 'dest_surl': 'mock_dest', 'reason': 'mock failure'}]
return r
@transactional_session
def cancel(tid, session):
record_counter('daemons.mock.fts3.cancel')
ts = time.time()
query = session.query(test_models.MockFTSTransfer).filter(tid=tid)
query.update({'state': FTSState.CANCELED,
'last_modified': datetime.datetime.utcnow()})
record_timer('daemons.mock.fts3.cancel.update_state', (time.time() - ts) * 1000)
| true | true |
f730ff473e3acc560575f38f17d8a90a26b95e0f | 1,191 | py | Python | mainsite/models/link_note.py | xjlin0/attendee | 6f0f3bfd64b2c6410af18823cd423d71a2ca59c7 | [
"MIT"
] | 2 | 2019-08-26T09:16:09.000Z | 2019-11-04T15:07:06.000Z | mainsite/models/link_note.py | xjlin0/attendee | 6f0f3bfd64b2c6410af18823cd423d71a2ca59c7 | [
"MIT"
] | null | null | null | mainsite/models/link_note.py | xjlin0/attendee | 6f0f3bfd64b2c6410af18823cd423d71a2ca59c7 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from . import RecordStatusEnum
class LinkNote(models.Model):
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
content_type = models.ForeignKey(ContentType, on_delete=models.SET(0))
object_id = models.BigIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
note_type = models.CharField(max_length=20, blank=True, null=True)
note_text = models.CharField(max_length=2000)
created_at = models.DateTimeField(auto_now_add=True, blank=False)
updated_at = models.DateTimeField(auto_now=True, blank=False)
status = models.CharField(max_length=10, db_index=True, default=RecordStatusEnum.ACTIVE, null=False, choices=RecordStatusEnum.choices())
def __str__(self):
return '%s %s %s' % (self.content_type, self.content_object, self.note_text)
class Meta:
db_table = 'mainsite_link_notes'
ordering = ('-updated_at',)
@property
def iso_updated_at(self):
return self.updated_at.isoformat()
| 42.535714 | 140 | 0.751469 | from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from . import RecordStatusEnum
class LinkNote(models.Model):
id = models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
content_type = models.ForeignKey(ContentType, on_delete=models.SET(0))
object_id = models.BigIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
note_type = models.CharField(max_length=20, blank=True, null=True)
note_text = models.CharField(max_length=2000)
created_at = models.DateTimeField(auto_now_add=True, blank=False)
updated_at = models.DateTimeField(auto_now=True, blank=False)
status = models.CharField(max_length=10, db_index=True, default=RecordStatusEnum.ACTIVE, null=False, choices=RecordStatusEnum.choices())
def __str__(self):
return '%s %s %s' % (self.content_type, self.content_object, self.note_text)
class Meta:
db_table = 'mainsite_link_notes'
ordering = ('-updated_at',)
@property
def iso_updated_at(self):
return self.updated_at.isoformat()
| true | true |
f73102329030fad1a68ba5bb5eb01f8d626f5c90 | 2,780 | py | Python | data/cirq_new/cirq_program/startCirq_pragma440.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma440.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma440.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=20
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=6
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=17
c.append(cirq.X.on(input_qubit[0])) # number=18
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=19
c.append(cirq.X.on(input_qubit[0])) # number=10
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma440.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 33.095238 | 92 | 0.646043 |
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.X.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0]))
c.append(cirq.X.on(input_qubit[0]))
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma440.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | true | true |
f731026b4115633e600f543fa4ca1b692d3b160d | 8,578 | py | Python | mjrl/algos/model_accel/sampling.py | YujieLu10/tslam | 1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd | [
"Apache-2.0"
] | null | null | null | mjrl/algos/model_accel/sampling.py | YujieLu10/tslam | 1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd | [
"Apache-2.0"
] | null | null | null | mjrl/algos/model_accel/sampling.py | YujieLu10/tslam | 1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd | [
"Apache-2.0"
] | null | null | null | import logging
import numpy as np
from mjrl.utils.gym_env import GymEnv
from mjrl.utils import tensor_utils
logging.disable(logging.CRITICAL)
import multiprocessing as mp
from multiprocessing import set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
import time as timer
import torch
logging.disable(logging.CRITICAL)
# ===========================================================
# Rollout parameteric policy on fitted env to collect data
# ===========================================================
def policy_rollout(
num_traj,
env,
policy,
fitted_model,
init_state=None,
eval_mode=False,
horizon=1e6,
env_kwargs=None,
seed=None,
):
# get the correct env behavior
if type(env) == str:
env = GymEnv(env)
elif isinstance(env, GymEnv):
env = env
elif callable(env):
env = env(**env_kwargs)
else:
print("Unsupported environment format")
raise AttributeError
if seed is not None:
env.set_seed(seed)
torch.manual_seed(seed)
# get initial states
if init_state is None:
st = np.array([env.reset() for _ in range(num_traj)])
st = torch.from_numpy(st).float()
elif type(init_state) == np.ndarray:
st = torch.from_numpy(init_state).float()
elif type(init_state) == list:
st = torch.from_numpy(np.array(init_state)).float()
else:
print("Unsupported format for init state")
quit()
# perform batched rollouts
horizon = min(horizon, env.horizon)
obs = []
act = []
for t in range(horizon):
at = policy.model.forward(st)
if eval_mode is not True:
at = at + torch.randn(at.shape) * torch.exp(policy.log_std)
stp1 = fitted_model.forward(st, at)
obs.append(st.to('cpu').data.numpy())
act.append(at.to('cpu').data.numpy())
st = stp1
obs = np.array(obs)
obs = np.swapaxes(obs, 0, 1) # (num_traj, horizon, state_dim)
act = np.array(act)
act = np.swapaxes(act, 0, 1) # (num_traj, horizon, action_dim)
paths = dict(observations = obs,
actions = act)
return paths
# ===========================================================
# Rollout action sequences on the fitted model
# ===========================================================
def trajectory_rollout(actions, fitted_model, init_states):
# init_states: (num_traj, state_dim) : numpy array
# actions : (num_traj, horizon, action_dim) : numpy array
# fitted_model : model(s, a) = s_tp1
actions = np.array(actions) if type(actions) == list else actions
num_traj = actions.shape[0]
horizon = actions.shape[1]
if len(init_states.shape) == 1:
init_states = np.tile(init_states, (num_traj, 1))
obs = []
st = torch.from_numpy(init_states).float()
for t in range(horizon):
at = actions[:, t, :]
at = torch.from_numpy(at).float()
stp1 = fitted_model.forward(st, at)
obs.append(st.data.numpy().copy())
st = stp1
obs = np.array(obs)
obs = np.swapaxes(obs, 0, 1)
paths = dict(observations=obs, actions=actions)
return paths
# ===========================================================
# Rollout policy (parametric or implicit MPC) on real env
# ===========================================================
# TODO(Aravind): Remove redundancy. This can be coupled with the standard sample_paths in MJRL utils
def sample_paths(num_traj,
env,
policy, # mpc policy on fitted model
horizon=1e6,
eval_mode=True,
base_seed=None,
noise_level=0.1,
):
# get the correct env behavior
if type(env) == str:
env = GymEnv(env)
elif isinstance(env, GymEnv):
env = env
elif callable(env):
env = env()
else:
print("Unsupported environment format")
raise AttributeError
if base_seed is not None:
env.set_seed(base_seed)
horizon = min(horizon, env.horizon)
paths = []
for ep in range(num_traj):
env.reset()
observations=[]
actions=[]
rewards=[]
env_infos=[]
t = 0
done = False
while t < horizon and done is False:
obs = env.get_obs()
ifo = env.get_env_infos()
act = policy.get_action(obs)
if eval_mode is False and type(act) != list:
act = act + np.random.uniform(low=-noise_level, high=noise_level, size=act.shape[0])
if type(act) == list:
act = act[0] if eval_mode is False else act[1]['evaluation']
next_obs, reward, done, _ = env.step(act)
t = t + 1
observations.append(obs)
actions.append(act)
rewards.append(reward)
env_infos.append(ifo)
path = dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
terminated=done,
env_infos=tensor_utils.stack_tensor_dict_list(env_infos)
)
paths.append(path)
return paths
# ===========================================================
# Utility functions
# ===========================================================
def discount_sum(x, gamma, discounted_terminal=0.0):
"""
discount sum a sequence with terminal value
"""
y = []
run_sum = discounted_terminal
for t in range( len(x)-1, -1, -1):
run_sum = x[t] + gamma*run_sum
y.append(run_sum)
return np.array(y[::-1])
def generate_perturbed_actions(base_act, filter_coefs):
"""
Generate perturbed actions around a base action sequence
"""
sigma, beta_0, beta_1, beta_2 = filter_coefs
eps = np.random.normal(loc=0, scale=1.0, size=base_act.shape) * sigma
eps = base_act + eps
eps[0] = eps[0] * (beta_0 + beta_1 + beta_2)
eps[1] = beta_0 * eps[1] + (beta_1 + beta_2) * eps[0]
for i in range(2, eps.shape[0]):
eps[i] = beta_0*eps[i] + beta_1*eps[i-1] + beta_2*eps[i-2]
return eps
def generate_paths(num_traj, fitted_model, start_state, base_act, filter_coefs, base_seed=None):
"""
first generate enough perturbed actions
then do rollouts with generated actions
set seed inside this function for multiprocessing
"""
if base_seed is not None:
np.random.seed(base_seed)
act_list = []
for i in range(num_traj):
act = generate_perturbed_actions(base_act, filter_coefs)
act_list.append(act)
act = np.array(act_list)
paths = trajectory_rollout(act, fitted_model, start_state)
return paths
def evaluate_policy(e, policy, fitted_model, noise_level=0.0,
real_step=False, num_episodes=10, visualize=False):
# rollout the policy on env and record performance
paths = []
for ep in range(num_episodes):
e.reset()
observations = []
actions = []
rewards = []
env_infos = []
t = 0
done = False
while t < e.horizon and done is False:
o = e.get_obs()
ifo = e.get_env_infos()
a = policy.get_action(o)
if type(a) == list:
a = a[1]['evaluation']
if noise_level > 0.0:
a = a + e.env.env.np_random.uniform(low=-noise_level, high=noise_level, size=a.shape[0])
if real_step is False:
next_s = fitted_model.predict(o, a)
r = 0.0 # temporarily
e.env.env.set_fitted_state(next_s)
else:
next_o, r, done, ifo2 = e.step(a)
ifo = ifo2 if ifo == {} else ifo
if visualize:
e.render()
t = t + 1
observations.append(o)
actions.append(a)
rewards.append(r)
env_infos.append(ifo)
path = dict(observations=np.array(observations), actions=np.array(actions),
rewards=np.array(rewards),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos))
if real_step is False:
e.env.env.compute_path_rewards(path)
try:
path = e.env.env.truncate_paths([path])[0]
except:
pass
paths.append(path)
if visualize:
print("episode score = %f " % np.sum(path['rewards']))
return paths | 31.653137 | 104 | 0.551877 | import logging
import numpy as np
from mjrl.utils.gym_env import GymEnv
from mjrl.utils import tensor_utils
logging.disable(logging.CRITICAL)
import multiprocessing as mp
from multiprocessing import set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
import time as timer
import torch
logging.disable(logging.CRITICAL)
def policy_rollout(
num_traj,
env,
policy,
fitted_model,
init_state=None,
eval_mode=False,
horizon=1e6,
env_kwargs=None,
seed=None,
):
if type(env) == str:
env = GymEnv(env)
elif isinstance(env, GymEnv):
env = env
elif callable(env):
env = env(**env_kwargs)
else:
print("Unsupported environment format")
raise AttributeError
if seed is not None:
env.set_seed(seed)
torch.manual_seed(seed)
if init_state is None:
st = np.array([env.reset() for _ in range(num_traj)])
st = torch.from_numpy(st).float()
elif type(init_state) == np.ndarray:
st = torch.from_numpy(init_state).float()
elif type(init_state) == list:
st = torch.from_numpy(np.array(init_state)).float()
else:
print("Unsupported format for init state")
quit()
horizon = min(horizon, env.horizon)
obs = []
act = []
for t in range(horizon):
at = policy.model.forward(st)
if eval_mode is not True:
at = at + torch.randn(at.shape) * torch.exp(policy.log_std)
stp1 = fitted_model.forward(st, at)
obs.append(st.to('cpu').data.numpy())
act.append(at.to('cpu').data.numpy())
st = stp1
obs = np.array(obs)
obs = np.swapaxes(obs, 0, 1)
act = np.array(act)
act = np.swapaxes(act, 0, 1)
paths = dict(observations = obs,
actions = act)
return paths
def trajectory_rollout(actions, fitted_model, init_states):
actions = np.array(actions) if type(actions) == list else actions
num_traj = actions.shape[0]
horizon = actions.shape[1]
if len(init_states.shape) == 1:
init_states = np.tile(init_states, (num_traj, 1))
obs = []
st = torch.from_numpy(init_states).float()
for t in range(horizon):
at = actions[:, t, :]
at = torch.from_numpy(at).float()
stp1 = fitted_model.forward(st, at)
obs.append(st.data.numpy().copy())
st = stp1
obs = np.array(obs)
obs = np.swapaxes(obs, 0, 1)
paths = dict(observations=obs, actions=actions)
return paths
def sample_paths(num_traj,
env,
policy,
horizon=1e6,
eval_mode=True,
base_seed=None,
noise_level=0.1,
):
if type(env) == str:
env = GymEnv(env)
elif isinstance(env, GymEnv):
env = env
elif callable(env):
env = env()
else:
print("Unsupported environment format")
raise AttributeError
if base_seed is not None:
env.set_seed(base_seed)
horizon = min(horizon, env.horizon)
paths = []
for ep in range(num_traj):
env.reset()
observations=[]
actions=[]
rewards=[]
env_infos=[]
t = 0
done = False
while t < horizon and done is False:
obs = env.get_obs()
ifo = env.get_env_infos()
act = policy.get_action(obs)
if eval_mode is False and type(act) != list:
act = act + np.random.uniform(low=-noise_level, high=noise_level, size=act.shape[0])
if type(act) == list:
act = act[0] if eval_mode is False else act[1]['evaluation']
next_obs, reward, done, _ = env.step(act)
t = t + 1
observations.append(obs)
actions.append(act)
rewards.append(reward)
env_infos.append(ifo)
path = dict(
observations=np.array(observations),
actions=np.array(actions),
rewards=np.array(rewards),
terminated=done,
env_infos=tensor_utils.stack_tensor_dict_list(env_infos)
)
paths.append(path)
return paths
def discount_sum(x, gamma, discounted_terminal=0.0):
y = []
run_sum = discounted_terminal
for t in range( len(x)-1, -1, -1):
run_sum = x[t] + gamma*run_sum
y.append(run_sum)
return np.array(y[::-1])
def generate_perturbed_actions(base_act, filter_coefs):
sigma, beta_0, beta_1, beta_2 = filter_coefs
eps = np.random.normal(loc=0, scale=1.0, size=base_act.shape) * sigma
eps = base_act + eps
eps[0] = eps[0] * (beta_0 + beta_1 + beta_2)
eps[1] = beta_0 * eps[1] + (beta_1 + beta_2) * eps[0]
for i in range(2, eps.shape[0]):
eps[i] = beta_0*eps[i] + beta_1*eps[i-1] + beta_2*eps[i-2]
return eps
def generate_paths(num_traj, fitted_model, start_state, base_act, filter_coefs, base_seed=None):
if base_seed is not None:
np.random.seed(base_seed)
act_list = []
for i in range(num_traj):
act = generate_perturbed_actions(base_act, filter_coefs)
act_list.append(act)
act = np.array(act_list)
paths = trajectory_rollout(act, fitted_model, start_state)
return paths
def evaluate_policy(e, policy, fitted_model, noise_level=0.0,
real_step=False, num_episodes=10, visualize=False):
paths = []
for ep in range(num_episodes):
e.reset()
observations = []
actions = []
rewards = []
env_infos = []
t = 0
done = False
while t < e.horizon and done is False:
o = e.get_obs()
ifo = e.get_env_infos()
a = policy.get_action(o)
if type(a) == list:
a = a[1]['evaluation']
if noise_level > 0.0:
a = a + e.env.env.np_random.uniform(low=-noise_level, high=noise_level, size=a.shape[0])
if real_step is False:
next_s = fitted_model.predict(o, a)
r = 0.0
e.env.env.set_fitted_state(next_s)
else:
next_o, r, done, ifo2 = e.step(a)
ifo = ifo2 if ifo == {} else ifo
if visualize:
e.render()
t = t + 1
observations.append(o)
actions.append(a)
rewards.append(r)
env_infos.append(ifo)
path = dict(observations=np.array(observations), actions=np.array(actions),
rewards=np.array(rewards),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos))
if real_step is False:
e.env.env.compute_path_rewards(path)
try:
path = e.env.env.truncate_paths([path])[0]
except:
pass
paths.append(path)
if visualize:
print("episode score = %f " % np.sum(path['rewards']))
return paths | true | true |
f7310364126b99c5b9600e09110e9f438c281858 | 3,594 | py | Python | tests/test_fast/test_timeout.py | ponty/EasyProcess | 4b3f5ab487ec46133e361958d6061262bfad91c3 | [
"BSD-2-Clause"
] | 86 | 2015-02-17T11:41:18.000Z | 2022-03-05T08:05:29.000Z | tests/test_fast/test_timeout.py | ponty/EasyProcess | 4b3f5ab487ec46133e361958d6061262bfad91c3 | [
"BSD-2-Clause"
] | 18 | 2015-02-11T21:03:13.000Z | 2022-03-20T14:32:51.000Z | tests/test_fast/test_timeout.py | ponty/EasyProcess | 4b3f5ab487ec46133e361958d6061262bfad91c3 | [
"BSD-2-Clause"
] | 22 | 2015-02-11T20:47:00.000Z | 2021-11-01T15:26:23.000Z | import sys
import time
import pytest
from easyprocess import EasyProcess
python = sys.executable
def test_timeout():
p = EasyProcess("sleep 1").start()
p.wait(0.2)
assert p.is_alive()
p.wait(0.2)
assert p.is_alive()
p.wait(2)
assert not p.is_alive()
assert EasyProcess("sleep 0.3").call().return_code == 0
assert EasyProcess("sleep 0.3").call(timeout=0.1).return_code != 0
assert EasyProcess("sleep 0.3").call(timeout=1).return_code == 0
assert EasyProcess("sleep 0.3").call().timeout_happened is False
assert EasyProcess("sleep 0.3").call(timeout=0.1).timeout_happened
assert EasyProcess("sleep 0.3").call(timeout=1).timeout_happened is False
@pytest.mark.timeout(10)
def test_time_cli1():
p = EasyProcess(
[
python,
"-c",
"import logging;logging.basicConfig(level=logging.DEBUG);from easyprocess import EasyProcess;EasyProcess('sleep 15').start()",
]
)
p.call()
assert p.return_code == 0
@pytest.mark.timeout(10)
def test_time_cli2():
p = EasyProcess(
[
python,
"-c",
"import logging;logging.basicConfig(level=logging.DEBUG);from easyprocess import EasyProcess;EasyProcess('sleep 15').call(timeout=0.5)",
]
)
p.call()
assert p.return_code == 0
@pytest.mark.timeout(10)
def test_time2():
p = EasyProcess("sleep 15").call(timeout=1)
assert p.is_alive() is False
assert p.timeout_happened
assert p.return_code != 0
assert p.stdout == ""
@pytest.mark.timeout(10)
def test_timeout_out():
p = EasyProcess(
[python, "-c", "import time;print( 'start');time.sleep(15);print( 'end')"]
).call(timeout=1)
assert p.is_alive() is False
assert p.timeout_happened
assert p.return_code != 0
assert p.stdout == ""
@pytest.mark.timeout(3)
def test_time3():
EasyProcess("sleep 15").start()
ignore_term = """
import signal;
import time;
signal.signal(signal.SIGTERM, lambda *args: None);
while True:
time.sleep(0.5);
"""
@pytest.mark.timeout(10)
def test_force_timeout():
proc = EasyProcess([python, "-c", ignore_term]).start()
# Calling stop() right away actually stops python before it
# has a change to actually compile and run the input code,
# meaning the signal handlers aren't registered yet. Give it
# a moment to setup
time.sleep(1)
proc.stop(kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_force_0_timeout():
proc = EasyProcess([python, "-c", ignore_term]).start()
time.sleep(1)
proc.stop(kill_after=0)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_force_timeout2():
proc = EasyProcess([python, "-c", ignore_term]).call(timeout=1, kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_stop_wait():
proc = EasyProcess([python, "-c", ignore_term]).start()
time.sleep(1)
proc.sendstop().wait(timeout=1)
# On windows, Popen.terminate actually behaves like kill,
# so don't check that our hanging process code is actually hanging.
# The end result is still what we want. On other platforms, leave
# this assertion to make sure we are correctly testing the ability
# to stop a hung process
if not sys.platform.startswith("win"):
assert proc.is_alive() is True
proc.stop(kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
| 27.022556 | 148 | 0.664162 | import sys
import time
import pytest
from easyprocess import EasyProcess
python = sys.executable
def test_timeout():
p = EasyProcess("sleep 1").start()
p.wait(0.2)
assert p.is_alive()
p.wait(0.2)
assert p.is_alive()
p.wait(2)
assert not p.is_alive()
assert EasyProcess("sleep 0.3").call().return_code == 0
assert EasyProcess("sleep 0.3").call(timeout=0.1).return_code != 0
assert EasyProcess("sleep 0.3").call(timeout=1).return_code == 0
assert EasyProcess("sleep 0.3").call().timeout_happened is False
assert EasyProcess("sleep 0.3").call(timeout=0.1).timeout_happened
assert EasyProcess("sleep 0.3").call(timeout=1).timeout_happened is False
@pytest.mark.timeout(10)
def test_time_cli1():
p = EasyProcess(
[
python,
"-c",
"import logging;logging.basicConfig(level=logging.DEBUG);from easyprocess import EasyProcess;EasyProcess('sleep 15').start()",
]
)
p.call()
assert p.return_code == 0
@pytest.mark.timeout(10)
def test_time_cli2():
p = EasyProcess(
[
python,
"-c",
"import logging;logging.basicConfig(level=logging.DEBUG);from easyprocess import EasyProcess;EasyProcess('sleep 15').call(timeout=0.5)",
]
)
p.call()
assert p.return_code == 0
@pytest.mark.timeout(10)
def test_time2():
p = EasyProcess("sleep 15").call(timeout=1)
assert p.is_alive() is False
assert p.timeout_happened
assert p.return_code != 0
assert p.stdout == ""
@pytest.mark.timeout(10)
def test_timeout_out():
p = EasyProcess(
[python, "-c", "import time;print( 'start');time.sleep(15);print( 'end')"]
).call(timeout=1)
assert p.is_alive() is False
assert p.timeout_happened
assert p.return_code != 0
assert p.stdout == ""
@pytest.mark.timeout(3)
def test_time3():
EasyProcess("sleep 15").start()
ignore_term = """
import signal;
import time;
signal.signal(signal.SIGTERM, lambda *args: None);
while True:
time.sleep(0.5);
"""
@pytest.mark.timeout(10)
def test_force_timeout():
proc = EasyProcess([python, "-c", ignore_term]).start()
# a moment to setup
time.sleep(1)
proc.stop(kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_force_0_timeout():
proc = EasyProcess([python, "-c", ignore_term]).start()
time.sleep(1)
proc.stop(kill_after=0)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_force_timeout2():
proc = EasyProcess([python, "-c", ignore_term]).call(timeout=1, kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
@pytest.mark.timeout(10)
def test_stop_wait():
proc = EasyProcess([python, "-c", ignore_term]).start()
time.sleep(1)
proc.sendstop().wait(timeout=1)
# On windows, Popen.terminate actually behaves like kill,
# so don't check that our hanging process code is actually hanging.
if not sys.platform.startswith("win"):
assert proc.is_alive() is True
proc.stop(kill_after=1)
assert proc.is_alive() is False
assert proc.return_code != 0
| true | true |
f73103cefcd8b17fc9dcdef65d4a5a5b4f9acc4f | 695 | py | Python | src/api/tests/test_profile_views.py | ThaDeveloper/grind | fa90b65d12e6d9b3d658b132874801ecda08c57f | [
"MIT"
] | 1 | 2019-11-06T22:26:26.000Z | 2019-11-06T22:26:26.000Z | src/api/tests/test_profile_views.py | ThaDeveloper/grind | fa90b65d12e6d9b3d658b132874801ecda08c57f | [
"MIT"
] | 5 | 2021-03-19T02:49:44.000Z | 2021-06-10T19:13:00.000Z | src/api/tests/test_profile_views.py | ThaDeveloper/grind | fa90b65d12e6d9b3d658b132874801ecda08c57f | [
"MIT"
] | null | null | null | from api.tests.base import BaseTestCase
class TestUserViews(BaseTestCase):
""" Test Profile views """
def test_get_profile(self):
""" Test can get single user profile """
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/baduism/profile/')
self.assertEqual(200, response.status_code)
self.assertIn("baduism", str(response.data))
def test_get_users(self):
""" Test can get all users """
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/')
self.assertEqual(200, response.status_code)
self.assertIn("baduism", str(response.data))
| 36.578947 | 78 | 0.657554 | from api.tests.base import BaseTestCase
class TestUserViews(BaseTestCase):
def test_get_profile(self):
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/baduism/profile/')
self.assertEqual(200, response.status_code)
self.assertIn("baduism", str(response.data))
def test_get_users(self):
self.create_user(self.new_user)
response = self.test_client().get('/api/v1/accounts/')
self.assertEqual(200, response.status_code)
self.assertIn("baduism", str(response.data))
| true | true |
f73103d5420139f7c03250e39dd0213ce905d5f1 | 80 | py | Python | Python/01. Introduction/002. Python If-Else.py | stonehengee/HackerrankPractice | ec052e7447391e40d1919cf0b641ff5023da3da3 | [
"MIT"
] | null | null | null | Python/01. Introduction/002. Python If-Else.py | stonehengee/HackerrankPractice | ec052e7447391e40d1919cf0b641ff5023da3da3 | [
"MIT"
] | null | null | null | Python/01. Introduction/002. Python If-Else.py | stonehengee/HackerrankPractice | ec052e7447391e40d1919cf0b641ff5023da3da3 | [
"MIT"
] | null | null | null | # Problem: https://www.hackerrank.com/challenges/py-if-else/problem
# Score: 10
| 26.666667 | 67 | 0.75 | true | true | |
f731070f8dc603977540d4cf5cd5175788bed0e5 | 2,389 | py | Python | src/data_layers/ffeedbackcm100k_layer.py | emiliocuestaf/CycloRec | cf0bf39ff185f0b1ef8d1c6fe1d424a23c7716f7 | [
"MIT"
] | 3 | 2021-01-26T06:05:17.000Z | 2021-02-11T21:47:09.000Z | src/data_layers/ffeedbackcm100k_layer.py | emiliocuestaf/CycloRec | cf0bf39ff185f0b1ef8d1c6fe1d424a23c7716f7 | [
"MIT"
] | null | null | null | src/data_layers/ffeedbackcm100k_layer.py | emiliocuestaf/CycloRec | cf0bf39ff185f0b1ef8d1c6fe1d424a23c7716f7 | [
"MIT"
] | 1 | 2021-01-26T06:05:18.000Z | 2021-01-26T06:05:18.000Z | import pandas as pd
import numpy as np
#import random as rd
#from sklearn.neighbors import NearestNeighbors
#from scipy.sparse import csr_matrix
# OWN
from cyclorec.data_layers.data_layer import DataLayer
class FFeedbackCm100kLayer(DataLayer):
""" The only difference between this layer and Cm100kLayer is the feedback the algorihtms are receiving.
In the typical version, if an algorithm succeded in a recommendation it would be updated with a reward.
However, in FFeedback the algorithm will only be rewarded if the recommended item was known by the user.
"""
def __init__(self, name, test_proportion):
""" Constructor
Args:
name (str): DataLayer name
test_proportion (float): proportion of the whole set to be assigned to the test set
"""
# CUSTOMIZABLE RATING VARIABLES
rating_normalization = 'bin'
if rating_normalization == 'bin':
rating_conversor = dict({1: 0, 2: 0, 3: 1, 4: 1})
relevanceThreshold = 0.5
antiRelevanceThreshold = 0.5
else:
rating_conversor = dict({1: -1, 2: 0, 3: 1, 4: 1})
relevanceThreshold = 0.5
antiRelevanceThreshold = -0.5
# ITEMS
# index = id || item_id | url | title | artist
items = pd.read_csv("./data/cm100k/items.txt", sep="\t")
items.columns = items.columns.str.lower()
items = items.rename(columns={"item": "item_id"})
items.index = items.item_id
# ALL RATINGS
### Dataframe with ratings
# index || user | item | rating | known
ratings = pd.read_csv("./data/cm100k/ratings.txt", sep="\t", names=['user_id', 'item_id', 'rating', 'known'], header=None)
ratings.columns = ratings.columns.str.lower()
ratings['rating'] = ratings['rating'].replace(rating_conversor)
super().__init__(name, items=items, splitted=False, whole_set=ratings, test_proportion=test_proportion, \
relevance_threshold=relevanceThreshold, antiRelevance_threshold=antiRelevanceThreshold )
#override
def get_bandit_reward(self, user_id, item_id, rating):
if np.isnan(rating['rating']):
return np.nan
elif rating['known'] == 1:
return rating['rating']
else:
return np.nan
| 38.532258 | 130 | 0.626622 | import pandas as pd
import numpy as np
from cyclorec.data_layers.data_layer import DataLayer
class FFeedbackCm100kLayer(DataLayer):
def __init__(self, name, test_proportion):
rating_normalization = 'bin'
if rating_normalization == 'bin':
rating_conversor = dict({1: 0, 2: 0, 3: 1, 4: 1})
relevanceThreshold = 0.5
antiRelevanceThreshold = 0.5
else:
rating_conversor = dict({1: -1, 2: 0, 3: 1, 4: 1})
relevanceThreshold = 0.5
antiRelevanceThreshold = -0.5
items = pd.read_csv("./data/cm100k/items.txt", sep="\t")
items.columns = items.columns.str.lower()
items = items.rename(columns={"item": "item_id"})
items.index = items.item_id
m100k/ratings.txt", sep="\t", names=['user_id', 'item_id', 'rating', 'known'], header=None)
ratings.columns = ratings.columns.str.lower()
ratings['rating'] = ratings['rating'].replace(rating_conversor)
super().__init__(name, items=items, splitted=False, whole_set=ratings, test_proportion=test_proportion, \
relevance_threshold=relevanceThreshold, antiRelevance_threshold=antiRelevanceThreshold )
def get_bandit_reward(self, user_id, item_id, rating):
if np.isnan(rating['rating']):
return np.nan
elif rating['known'] == 1:
return rating['rating']
else:
return np.nan
| true | true |
f7310722bb4f9358a5e09b900096f1971cd0392f | 1,999 | py | Python | dask/cache.py | dgerlanc/dask | 627b2bc7bca9e86d938767056bc001f819824335 | [
"BSD-3-Clause"
] | 2 | 2020-11-30T14:04:19.000Z | 2021-11-08T11:29:07.000Z | dask/cache.py | dgerlanc/dask | 627b2bc7bca9e86d938767056bc001f819824335 | [
"BSD-3-Clause"
] | 37 | 2020-10-20T08:30:53.000Z | 2020-12-22T13:15:45.000Z | dask/cache.py | dgerlanc/dask | 627b2bc7bca9e86d938767056bc001f819824335 | [
"BSD-3-Clause"
] | 1 | 2019-01-31T02:44:12.000Z | 2019-01-31T02:44:12.000Z | from .callbacks import Callback
from timeit import default_timer
from numbers import Number
import sys
overhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4
class Cache(Callback):
"""Use cache for computation
Examples
--------
>>> cache = Cache(1e9) # doctest: +SKIP
The cache can be used locally as a context manager around ``compute`` or
``get`` calls:
>>> with cache: # doctest: +SKIP
... result = x.compute()
You can also register a cache globally, so that it works for all
computations:
>>> cache.register() # doctest: +SKIP
>>> cache.unregister() # doctest: +SKIP
"""
def __init__(self, cache, *args, **kwargs):
try:
import cachey
except ImportError as ex:
raise ImportError(
'Cache requires cachey, "{ex}" problem ' "importing".format(ex=str(ex))
) from ex
self._nbytes = cachey.nbytes
if isinstance(cache, Number):
cache = cachey.Cache(cache, *args, **kwargs)
else:
assert not args and not kwargs
self.cache = cache
self.starttimes = dict()
def _start(self, dsk):
self.durations = dict()
overlap = set(dsk) & set(self.cache.data)
for key in overlap:
dsk[key] = self.cache.data[key]
def _pretask(self, key, dsk, state):
self.starttimes[key] = default_timer()
def _posttask(self, key, value, dsk, state, id):
duration = default_timer() - self.starttimes[key]
deps = state["dependencies"][key]
if deps:
duration += max(self.durations.get(k, 0) for k in deps)
self.durations[key] = duration
nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4
self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)
def _finish(self, dsk, state, errored):
self.starttimes.clear()
self.durations.clear()
| 30.287879 | 87 | 0.578789 | from .callbacks import Callback
from timeit import default_timer
from numbers import Number
import sys
overhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4
class Cache(Callback):
def __init__(self, cache, *args, **kwargs):
try:
import cachey
except ImportError as ex:
raise ImportError(
'Cache requires cachey, "{ex}" problem ' "importing".format(ex=str(ex))
) from ex
self._nbytes = cachey.nbytes
if isinstance(cache, Number):
cache = cachey.Cache(cache, *args, **kwargs)
else:
assert not args and not kwargs
self.cache = cache
self.starttimes = dict()
def _start(self, dsk):
self.durations = dict()
overlap = set(dsk) & set(self.cache.data)
for key in overlap:
dsk[key] = self.cache.data[key]
def _pretask(self, key, dsk, state):
self.starttimes[key] = default_timer()
def _posttask(self, key, value, dsk, state, id):
duration = default_timer() - self.starttimes[key]
deps = state["dependencies"][key]
if deps:
duration += max(self.durations.get(k, 0) for k in deps)
self.durations[key] = duration
nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4
self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)
def _finish(self, dsk, state, errored):
self.starttimes.clear()
self.durations.clear()
| true | true |
f73108a0572bbd0f825ddaada87e96d987ccfd6b | 8,856 | py | Python | auxiliary/model.py | hellolele/PoseFromShape | b7cd6fc7eab5be7710e34557504c192d36c35000 | [
"MIT"
] | 2 | 2019-07-12T14:10:37.000Z | 2019-07-12T14:10:39.000Z | auxiliary/model.py | tim885/PoseFromShape | 7daf9e4889af065861d2719cd2bca2de8a45d185 | [
"MIT"
] | null | null | null | auxiliary/model.py | tim885/PoseFromShape | 7daf9e4889af065861d2719cd2bca2de8a45d185 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import resnet
# ============================================================================ #
# Baseline network #
# ============================================================================ #
class BaselineEstimator(nn.Module):
"""Pose estimator using image feature with shape feature
Arguments:
img_feature_dim: output feature dimension for image
pretrained_resnet: use the ResNet pretrained on ImageNet if True
Return:
Three angle bin classification probability with a delta value regression for each bin
"""
def __init__(self, img_feature_dim=1024, separate_branch=False,
azi_classes=24, ele_classes=12, inp_classes=24, pretrained_resnet=False):
super(BaselineEstimator, self).__init__()
# RGB image encoder
self.img_encoder = resnet.resnet18(pretrained=pretrained_resnet, num_classes=img_feature_dim)
self.compress = nn.Sequential(nn.Linear(img_feature_dim, 800), nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.separate_branch = separate_branch
# separate branch for classification and regression
if separate_branch:
self.compress_delta = nn.Sequential(nn.Linear(img_feature_dim, 800), nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.fc_cls_azi = nn.Linear(200, azi_classes)
self.fc_cls_ele = nn.Linear(200, ele_classes)
self.fc_cls_inp = nn.Linear(200, inp_classes)
self.fc_reg_azi = nn.Linear(200, azi_classes)
self.fc_reg_ele = nn.Linear(200, ele_classes)
self.fc_reg_inp = nn.Linear(200, inp_classes)
def forward(self, im):
# pass the image through image encoder
img_feature = self.img_encoder(im)
# concatenate the features obtained from two encoders into one feature
x = self.compress(img_feature)
cls_azi = self.fc_cls_azi(x)
cls_ele = self.fc_cls_ele(x)
cls_inp = self.fc_cls_inp(x)
# use the shared features if share branch
x_delta = self.compress_delta(img_feature) if self.separate_branch else x
reg_azi = self.fc_reg_azi(x_delta)
reg_ele = self.fc_reg_ele(x_delta)
reg_inp = self.fc_reg_inp(x_delta)
return [cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp]
# ============================================================================ #
# Proposed network #
# ============================================================================ #
class ShapeEncoderMV(nn.Module):
"""Shape Encoder using rendering images under multiple views
Arguments:
feature_dim: output feature dimension for each rendering image
channels: 3 for normal rendering image, 4 for normal map with depth map, and 3*12 channels for concatenating
pretrained_resnet: use the ResNet pretrained on ImageNet if True
Return:
A tensor of size NxC, where N is the batch size and C is the feature_dim
"""
def __init__(self, feature_dim=256, channels=3, pretrained_resnet=False):
super(ShapeEncoderMV, self).__init__()
self.render_encoder = resnet.resnet18(input_channel=channels, num_classes=feature_dim, pretrained=pretrained_resnet)
def forward(self, renders):
# reshape render images from dimension N*K*C*H*W to (N*K)*C*H*W
N, K, C, H, W = renders.size()
renders = renders.view(N*K, C, H, W)
# pass the encoder and reshape render features from dimension (N*K)*D1 to N*(K*D1)
render_feature = self.render_encoder(renders)
render_feature = render_feature.view(N, -1)
return render_feature
class ShapeEncoderPC(nn.Module):
"""Shape Encoder using point cloud TO BE MODIFIED
"""
def __init__(self, feature_dim=256, channels=3, pretrained_resnet=False):
super(ShapeEncoderPC, self).__init__()
self.pc_encoder = resnet.resnet18(input_channel=channels, num_classes=feature_dim, pretrained=pretrained_resnet)
def forward(self, shapes):
shape_feature = self.pc_encoder(shapes)
return shape_feature
class PoseEstimator(nn.Module):
"""Pose estimator using image feature with shape feature
Arguments:
img_feature_dim: output feature dimension for image
shape_feature_dim: output feature dimension for shape
shape: shape representation in PointCloud or MultiView
channels: channel number for multi-view encoder
pretrained_resnet: use the ResNet pretrained on ImageNet if True
Return:
Three angle bin classification probability with a delta value regression for each bin
"""
def __init__(self, render_number=12, img_feature_dim=1024, shape_feature_dim=256, channels=3, separate_branch=False,
azi_classes=24, ele_classes=12, inp_classes=24, pretrained_resnet=False, shape='PointCloud'):
super(PoseEstimator, self).__init__()
# 3D shape encoder
if shape == 'PointCloud':
self.shape_encoder = ShapeEncoderPC()
else:
self.shape_encoder = ShapeEncoderMV(feature_dim=shape_feature_dim, channels=channels, pretrained_resnet=pretrained_resnet)
shape_feature_dim = shape_feature_dim * render_number if shape != 'PointCloud' else shape_feature_dim
# RGB image encoder
self.img_encoder = resnet.resnet18(pretrained=pretrained_resnet, num_classes=img_feature_dim)
self.compress = nn.Sequential(nn.Linear(shape_feature_dim + img_feature_dim, 800),
nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.separate_branch = separate_branch
# separate branch for classification and regression
if separate_branch:
self.compress_delta = nn.Sequential(nn.Linear(shape_feature_dim + img_feature_dim, 800),
nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.fc_cls_azi = nn.Linear(200, azi_classes)
self.fc_cls_ele = nn.Linear(200, ele_classes)
self.fc_cls_inp = nn.Linear(200, inp_classes)
self.fc_reg_azi = nn.Linear(200, azi_classes)
self.fc_reg_ele = nn.Linear(200, ele_classes)
self.fc_reg_inp = nn.Linear(200, inp_classes)
def forward(self, im, shape):
# pass the image through image encoder
img_feature = self.img_encoder(im)
# pass the shape through shape encoder
shape_feature = self.shape_encoder(shape)
# concatenate the features obtained from two encoders into one feature
global_feature = torch.cat((shape_feature, img_feature), 1)
x = self.compress(global_feature)
cls_azi = self.fc_cls_azi(x)
cls_ele = self.fc_cls_ele(x)
cls_inp = self.fc_cls_inp(x)
# use the shared features if share branch
x_delta = self.compress_delta(global_feature) if self.separate_branch else x
reg_azi = self.fc_reg_azi(x_delta)
reg_ele = self.fc_reg_ele(x_delta)
reg_inp = self.fc_reg_inp(x_delta)
return [cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp]
if __name__ == '__main__':
print('test model')
sim_im = Variable(torch.rand(4, 3, 224, 224))
sim_renders = Variable(torch.rand(4, 12, 3, 224, 224))
sim_im = sim_im.cuda()
sim_renders = sim_renders.cuda()
#model = PoseEstimator(shape='MultiView', separate_branch=False)
model = BaselineEstimator(separate_branch=False, pretrained_resnet=False)
model.cuda()
#cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp = model(sim_im, sim_renders)
cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp = model(sim_im)
print(cls_azi.size(), cls_ele.size(), cls_inp.size(), reg_azi.size(), reg_ele.size(), reg_inp.size())
| 47.106383 | 134 | 0.630872 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import resnet
class BaselineEstimator(nn.Module):
def __init__(self, img_feature_dim=1024, separate_branch=False,
azi_classes=24, ele_classes=12, inp_classes=24, pretrained_resnet=False):
super(BaselineEstimator, self).__init__()
self.img_encoder = resnet.resnet18(pretrained=pretrained_resnet, num_classes=img_feature_dim)
self.compress = nn.Sequential(nn.Linear(img_feature_dim, 800), nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.separate_branch = separate_branch
if separate_branch:
self.compress_delta = nn.Sequential(nn.Linear(img_feature_dim, 800), nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.fc_cls_azi = nn.Linear(200, azi_classes)
self.fc_cls_ele = nn.Linear(200, ele_classes)
self.fc_cls_inp = nn.Linear(200, inp_classes)
self.fc_reg_azi = nn.Linear(200, azi_classes)
self.fc_reg_ele = nn.Linear(200, ele_classes)
self.fc_reg_inp = nn.Linear(200, inp_classes)
def forward(self, im):
img_feature = self.img_encoder(im)
x = self.compress(img_feature)
cls_azi = self.fc_cls_azi(x)
cls_ele = self.fc_cls_ele(x)
cls_inp = self.fc_cls_inp(x)
x_delta = self.compress_delta(img_feature) if self.separate_branch else x
reg_azi = self.fc_reg_azi(x_delta)
reg_ele = self.fc_reg_ele(x_delta)
reg_inp = self.fc_reg_inp(x_delta)
return [cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp]
class ShapeEncoderMV(nn.Module):
def __init__(self, feature_dim=256, channels=3, pretrained_resnet=False):
super(ShapeEncoderMV, self).__init__()
self.render_encoder = resnet.resnet18(input_channel=channels, num_classes=feature_dim, pretrained=pretrained_resnet)
def forward(self, renders):
N, K, C, H, W = renders.size()
renders = renders.view(N*K, C, H, W)
render_feature = self.render_encoder(renders)
render_feature = render_feature.view(N, -1)
return render_feature
class ShapeEncoderPC(nn.Module):
def __init__(self, feature_dim=256, channels=3, pretrained_resnet=False):
super(ShapeEncoderPC, self).__init__()
self.pc_encoder = resnet.resnet18(input_channel=channels, num_classes=feature_dim, pretrained=pretrained_resnet)
def forward(self, shapes):
shape_feature = self.pc_encoder(shapes)
return shape_feature
class PoseEstimator(nn.Module):
def __init__(self, render_number=12, img_feature_dim=1024, shape_feature_dim=256, channels=3, separate_branch=False,
azi_classes=24, ele_classes=12, inp_classes=24, pretrained_resnet=False, shape='PointCloud'):
super(PoseEstimator, self).__init__()
if shape == 'PointCloud':
self.shape_encoder = ShapeEncoderPC()
else:
self.shape_encoder = ShapeEncoderMV(feature_dim=shape_feature_dim, channels=channels, pretrained_resnet=pretrained_resnet)
shape_feature_dim = shape_feature_dim * render_number if shape != 'PointCloud' else shape_feature_dim
self.img_encoder = resnet.resnet18(pretrained=pretrained_resnet, num_classes=img_feature_dim)
self.compress = nn.Sequential(nn.Linear(shape_feature_dim + img_feature_dim, 800),
nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.separate_branch = separate_branch
if separate_branch:
self.compress_delta = nn.Sequential(nn.Linear(shape_feature_dim + img_feature_dim, 800),
nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.fc_cls_azi = nn.Linear(200, azi_classes)
self.fc_cls_ele = nn.Linear(200, ele_classes)
self.fc_cls_inp = nn.Linear(200, inp_classes)
self.fc_reg_azi = nn.Linear(200, azi_classes)
self.fc_reg_ele = nn.Linear(200, ele_classes)
self.fc_reg_inp = nn.Linear(200, inp_classes)
def forward(self, im, shape):
img_feature = self.img_encoder(im)
shape_feature = self.shape_encoder(shape)
global_feature = torch.cat((shape_feature, img_feature), 1)
x = self.compress(global_feature)
cls_azi = self.fc_cls_azi(x)
cls_ele = self.fc_cls_ele(x)
cls_inp = self.fc_cls_inp(x)
x_delta = self.compress_delta(global_feature) if self.separate_branch else x
reg_azi = self.fc_reg_azi(x_delta)
reg_ele = self.fc_reg_ele(x_delta)
reg_inp = self.fc_reg_inp(x_delta)
return [cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp]
if __name__ == '__main__':
print('test model')
sim_im = Variable(torch.rand(4, 3, 224, 224))
sim_renders = Variable(torch.rand(4, 12, 3, 224, 224))
sim_im = sim_im.cuda()
sim_renders = sim_renders.cuda()
model = BaselineEstimator(separate_branch=False, pretrained_resnet=False)
model.cuda()
cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp = model(sim_im)
print(cls_azi.size(), cls_ele.size(), cls_inp.size(), reg_azi.size(), reg_ele.size(), reg_inp.size())
| true | true |
f73109ae32ef645c13d5883015d019e998ed1a8c | 1,758 | py | Python | assignment1/learners/ANN.py | prestononeal/CS-7641-assignments | c3a6815ba1be837084c60c3dd0dc8e8e702aa9b7 | [
"MIT"
] | 148 | 2018-12-18T21:14:04.000Z | 2022-03-04T09:13:21.000Z | assignment1/learners/ANN.py | prestononeal/CS-7641-assignments | c3a6815ba1be837084c60c3dd0dc8e8e702aa9b7 | [
"MIT"
] | 22 | 2019-01-20T00:11:06.000Z | 2021-05-01T17:21:58.000Z | assignment1/learners/ANN.py | prestononeal/CS-7641-assignments | c3a6815ba1be837084c60c3dd0dc8e8e702aa9b7 | [
"MIT"
] | 172 | 2019-01-09T06:01:54.000Z | 2022-03-25T22:53:19.000Z | from sklearn import neural_network
import learners
class ANNLearner(learners.BaseLearner):
def __init__(self,
hidden_layer_sizes=(100,),
activation="relu",
solver='adam',
alpha=0.0001,
batch_size='auto',
learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5,
max_iter=200,
shuffle=True,
random_state=None,
tol=1e-4,
verbose=False,
warm_start=False,
momentum=0.9,
nesterovs_momentum=True,
early_stopping=False,
validation_fraction=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
):
super().__init__(verbose)
self._learner = neural_network.MLPClassifier(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
shuffle=shuffle,
random_state=random_state,
tol=tol,
verbose=verbose,
warm_start=warm_start,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon
)
def learner(self):
return self._learner
| 30.842105 | 53 | 0.512514 | from sklearn import neural_network
import learners
class ANNLearner(learners.BaseLearner):
def __init__(self,
hidden_layer_sizes=(100,),
activation="relu",
solver='adam',
alpha=0.0001,
batch_size='auto',
learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5,
max_iter=200,
shuffle=True,
random_state=None,
tol=1e-4,
verbose=False,
warm_start=False,
momentum=0.9,
nesterovs_momentum=True,
early_stopping=False,
validation_fraction=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
):
super().__init__(verbose)
self._learner = neural_network.MLPClassifier(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
shuffle=shuffle,
random_state=random_state,
tol=tol,
verbose=verbose,
warm_start=warm_start,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon
)
def learner(self):
return self._learner
| true | true |
f73109e94e405161ed8aebf68ae23f9e76eb1142 | 26,888 | py | Python | lib/pybtex-0.19/pybtex/database/__init__.py | cabeen/bibfmt | a2607506f15249f8e0ee900db103d57afec7dec8 | [
"MIT"
] | 1 | 2021-02-20T19:53:48.000Z | 2021-02-20T19:53:48.000Z | lib/pybtex-0.19/pybtex/database/__init__.py | cabeen/bibfmt | a2607506f15249f8e0ee900db103d57afec7dec8 | [
"MIT"
] | null | null | null | lib/pybtex-0.19/pybtex/database/__init__.py | cabeen/bibfmt | a2607506f15249f8e0ee900db103d57afec7dec8 | [
"MIT"
] | null | null | null | # vim: fileencoding=utf-8
# Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
from collections import Mapping
from pybtex.plugin import find_plugin
from pybtex.exceptions import PybtexError
from pybtex.utils import (
deprecated,
OrderedCaseInsensitiveDict, CaseInsensitiveDefaultDict, CaseInsensitiveSet
)
from pybtex.bibtex.utils import split_tex_string, scan_bibtex_string
from pybtex.errors import report_error
class BibliographyDataError(PybtexError):
pass
class InvalidNameString(PybtexError):
def __init__(self, name_string):
message = 'Too many commas in {}'.format(repr(name_string))
super(InvalidNameString, self).__init__(message)
class BibliographyData(object):
def __init__(self, entries=None, preamble=None, wanted_entries=None, min_crossrefs=2):
"""
A :py:class:`.BibliographyData` object contains a dictionary of bibliography
entries referenced by their keys.
Each entry represented by an :py:class:`.Entry` object.
Additionally, :py:class:`.BibliographyData` may contain a LaTeX
preamble defined by ``@PREAMBLE`` commands in the BibTeX file.
"""
self.entries = OrderedCaseInsensitiveDict()
'''A dictionary of bibliography entries referenced by their keys.
The dictionary is case insensitive:
>>> bib_data = parse_string("""
... @ARTICLE{gnats,
... author = {L[eslie] A. Aamport},
... title = {The Gnats and Gnus Document Preparation System},
... }
... """, 'bibtex')
>>> bib_data.entries['gnats'] == bib_data.entries['GNATS']
True
'''
self.crossref_count = CaseInsensitiveDefaultDict(int)
self.min_crossrefs = min_crossrefs
self._preamble = []
if wanted_entries is not None:
self.wanted_entries = CaseInsensitiveSet(wanted_entries)
self.citations = CaseInsensitiveSet(wanted_entries)
else:
self.wanted_entries = None
self.citations = CaseInsensitiveSet()
if entries:
if isinstance(entries, Mapping):
entries = entries.iteritems()
for (key, entry) in entries:
self.add_entry(key, entry)
if preamble:
self._preamble.extend(preamble)
def __eq__(self, other):
if not isinstance(other, BibliographyData):
return super(BibliographyData, self) == other
return (
self.entries == other.entries
and self._preamble == other._preamble
)
def __repr__(self):
return 'BibliographyData(entries={entries}, preamble={preamble})'.format(
entries=repr(self.entries),
preamble=repr(self._preamble),
)
def add_to_preamble(self, *values):
self._preamble.extend(values)
@property
def preamble(self):
r'''
LaTeX preamble.
>>> bib_data = parse_string(r"""
... @PREAMBLE{"\newcommand{\noopsort}[1]{}"}
... """, 'bibtex')
>>> print bib_data.preamble
\newcommand{\noopsort}[1]{}
.. versionadded:: 0.19
Earlier versions used :py:meth:`.get_preamble()`, which is now deprecated.
'''
return ''.join(self._preamble)
@deprecated('0.19', 'use BibliographyData.preamble instead')
def get_preamble(self):
"""
.. deprecated:: 0.19
Use :py:attr:`.preamble` instead.
"""
return self.preamble
def want_entry(self, key):
return (
self.wanted_entries is None
or key in self.wanted_entries
or '*' in self.wanted_entries
)
def get_canonical_key(self, key):
if key in self.citations:
return self.citations.get_canonical_key(key)
else:
return key
def add_entry(self, key, entry):
if not self.want_entry(key):
return
if key in self.entries:
report_error(BibliographyDataError('repeated bibliograhpy entry: %s' % key))
return
entry.collection = self
entry.key = self.get_canonical_key(key)
self.entries[entry.key] = entry
try:
crossref = entry.fields['crossref']
except KeyError:
pass
else:
if self.wanted_entries is not None:
self.wanted_entries.add(crossref)
def add_entries(self, entries):
for key, entry in entries:
self.add_entry(key, entry)
def _get_crossreferenced_citations(self, citations, min_crossrefs):
"""
Get cititations not cited explicitly but referenced by other citations.
>>> from pybtex.database import Entry
>>> data = BibliographyData({
... 'main_article': Entry('article', {'crossref': 'xrefd_arcicle'}),
... 'xrefd_arcicle': Entry('article'),
... })
>>> list(data._get_crossreferenced_citations([], min_crossrefs=1))
[]
>>> list(data._get_crossreferenced_citations(['main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data._get_crossreferenced_citations(['Main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data._get_crossreferenced_citations(['main_article'], min_crossrefs=2))
[]
>>> list(data._get_crossreferenced_citations(['xrefd_arcicle'], min_crossrefs=1))
[]
>>> data2 = BibliographyData(data.entries, wanted_entries=data.entries.keys())
>>> list(data2._get_crossreferenced_citations([], min_crossrefs=1))
[]
>>> list(data2._get_crossreferenced_citations(['main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data2._get_crossreferenced_citations(['Main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data2._get_crossreferenced_citations(['main_article'], min_crossrefs=2))
[]
>>> list(data2._get_crossreferenced_citations(['xrefd_arcicle'], min_crossrefs=1))
[]
>>> list(data2._get_crossreferenced_citations(['xrefd_arcicle'], min_crossrefs=1))
[]
"""
crossref_count = CaseInsensitiveDefaultDict(int)
citation_set = CaseInsensitiveSet(citations)
for citation in citations:
try:
entry = self.entries[citation]
crossref = entry.fields['crossref']
except KeyError:
continue
try:
crossref_entry = self.entries[crossref]
except KeyError:
report_error(BibliographyDataError(
'bad cross-reference: entry "{key}" refers to '
'entry "{crossref}" which does not exist.'.format(
key=citation, crossref=crossref,
)
))
continue
canonical_crossref = crossref_entry.key
crossref_count[canonical_crossref] += 1
if crossref_count[canonical_crossref] >= min_crossrefs and canonical_crossref not in citation_set:
citation_set.add(canonical_crossref)
yield canonical_crossref
def _expand_wildcard_citations(self, citations):
"""
Expand wildcard citations (\citation{*} in .aux file).
>>> from pybtex.database import Entry
>>> data = BibliographyData((
... ('uno', Entry('article')),
... ('dos', Entry('article')),
... ('tres', Entry('article')),
... ('cuatro', Entry('article')),
... ))
>>> list(data._expand_wildcard_citations([]))
[]
>>> list(data._expand_wildcard_citations(['*']))
['uno', 'dos', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['uno', '*']))
['uno', 'dos', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['dos', '*']))
['dos', 'uno', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['*', 'uno']))
['uno', 'dos', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['*', 'DOS']))
['uno', 'dos', 'tres', 'cuatro']
"""
citation_set = CaseInsensitiveSet()
for citation in citations:
if citation == '*':
for key in self.entries:
if key not in citation_set:
citation_set.add(key)
yield key
else:
if citation not in citation_set:
citation_set.add(citation)
yield citation
def add_extra_citations(self, citations, min_crossrefs):
expanded_citations = list(self._expand_wildcard_citations(citations))
crossrefs = list(self._get_crossreferenced_citations(expanded_citations, min_crossrefs))
return expanded_citations + crossrefs
def to_string(self, bib_format, **kwargs):
"""
Return the data as a unicode string in the given format.
:param bib_format: Data format ("bibtex", "yaml", etc.).
.. versionadded:: 0.19
"""
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_string(self)
def to_bytes(self, bib_format, **kwargs):
"""
Return the data as a byte string in the given format.
:param bib_format: Data format ("bibtex", "yaml", etc.).
.. versionadded:: 0.19
"""
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_bytes(self)
def to_file(self, file, bib_format=None, **kwargs):
"""
Save the data to a file.
:param file: A file name or a file-like object.
:param bib_format: Data format ("bibtex", "yaml", etc.).
If not specified, Pybtex will try to guess by the file name.
.. versionadded:: 0.19
"""
if isinstance(file, basestring):
filename = file
else:
filename = getattr(file, 'name', None)
writer = find_plugin('pybtex.database.output', bib_format, filename=filename)(**kwargs)
return writer.write_file(self, file)
def lower(self):
u'''
Return another :py:class:`.BibliographyData` with all identifiers converted to lowercase.
>>> data = parse_string("""
... @BOOK{Obrazy,
... title = "Obrazy z Rus",
... author = "Karel Havlíček Borovský",
... }
... @BOOK{Elegie,
... title = "Tirolské elegie",
... author = "Karel Havlíček Borovský",
... }
... """, 'bibtex')
>>> data_lower = data.lower()
>>> data_lower.entries.keys()
['obrazy', 'elegie']
>>> for entry in data_lower.entries.values():
... entry.key
... entry.persons.keys()
... entry.fields.keys()
'obrazy'
['author']
['title']
'elegie'
['author']
['title']
'''
entries_lower = ((key.lower(), entry.lower()) for key, entry in self.entries.iteritems())
return type(self)(
entries=entries_lower,
preamble=self._preamble,
wanted_entries=self.wanted_entries,
min_crossrefs=self.min_crossrefs,
)
class FieldDict(OrderedCaseInsensitiveDict):
def __init__(self, parent, *args, **kwargw):
self.parent = parent
super(FieldDict, self).__init__(*args, **kwargw)
def __getitem__(self, key):
try:
return super(FieldDict, self).__getitem__(key)
except KeyError:
if key in self.parent.persons:
persons = self.parent.persons[key]
return ' and '.join(unicode(person) for person in persons)
elif 'crossref' in self:
return self.parent.get_crossref().fields[key]
else:
raise KeyError(key)
def lower(self):
lower_dict = super(FieldDict, self).lower()
return type(self)(self.parent, self.iteritems_lower())
class Entry(object):
"""A bibliography entry."""
key = None
"""Entry key (for example, ``'fukushima1980neocognitron'``)."""
def __init__(self, type_, fields=None, persons=None, collection=None):
if fields is None:
fields = {}
if persons is None:
persons = {}
self.type = type_.lower()
"""Entry type (``'book'``, ``'article'``, etc.)."""
self.original_type = type_
self.fields = FieldDict(self, fields)
"""A dictionary of entry fields.
The dictionary is ordered and case-insensitive."""
self.persons = OrderedCaseInsensitiveDict(persons)
"""A dictionary of entry persons, by their roles.
The most often used roles are ``'author'`` and ``'editor'``.
"""
self.collection = collection
# for BibTeX interpreter
self.vars = {}
def __eq__(self, other):
if not isinstance(other, Entry):
return super(Entry, self) == other
return (
self.type == other.type
and self.fields == other.fields
and self.persons == other.persons
)
def __repr__(self):
# representing fields as FieldDict causes problems with representing
# fields.parent, so represent it as a list of tuples
repr_fields = repr(self.fields.items())
return 'Entry({type_}, fields={fields}, persons={persons})'.format(
type_=repr(self.type),
fields=repr_fields,
persons=repr(self.persons),
)
def get_crossref(self):
return self.collection.entries[self.fields['crossref']]
def add_person(self, person, role):
self.persons.setdefault(role, []).append(person)
def lower(self):
return type(self)(
self.type,
fields=self.fields.lower(),
persons=self.persons.lower(),
collection=self.collection,
)
class Person(object):
"""A person or some other person-like entity.
>>> knuth = Person('Donald E. Knuth')
>>> knuth.first_names
['Donald']
>>> knuth.middle_names
['E.']
>>> knuth.last_names
['Knuth']
"""
valid_roles = ['author', 'editor']
style1_re = re.compile('^(.+),\s*(.+)$')
style2_re = re.compile('^(.+),\s*(.+),\s*(.+)$')
def __init__(self, string="", first="", middle="", prelast="", last="", lineage=""):
"""
:param string: The full name string.
It will be parsed and split into separate first, last, middle,
pre-last and lineage name parst.
Supported name formats are:
- von Last, First
- von Last, Jr, First
- First von Last
(see BibTeX manual for explanation)
"""
self.first_names = []
"""
A list of first names.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.first`, which is now deprecated.
"""
self.middle_names = []
"""
A list of middle names.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.middle`, which is now deprecated.
"""
self.prelast_names = []
"""
A list of pre-last (aka von) name parts.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.middle`, which is now deprecated.
"""
self.last_names = []
"""
A list of last names.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.last`, which is now deprecated.
"""
self.lineage_names = []
"""
A list of linage (aka Jr) name parts.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.lineage`, which is now deprecated.
"""
string = string.strip()
if string:
self._parse_string(string)
self.first_names.extend(split_tex_string(first))
self.middle_names.extend(split_tex_string(middle))
self.prelast_names.extend(split_tex_string(prelast))
self.last_names.extend(split_tex_string(last))
self.lineage_names.extend(split_tex_string(lineage))
@property
def bibtex_first_names(self):
"""A list of first and middle names together.
(BibTeX treats all middle names as first.)
.. versionadded:: 0.19
Earlier versions used :py:meth:`Person.bibtex_first`, which is now deprecated.
>>> knuth = Person('Donald E. Knuth')
>>> knuth.bibtex_first_names
['Donald', 'E.']
"""
return self.first_names + self.middle_names
def _parse_string(self, name):
"""Extract various parts of the name from a string.
>>> p = Person('Avinash K. Dixit')
>>> print p.first_names
['Avinash']
>>> print p.middle_names
['K.']
>>> print p.prelast_names
[]
>>> print p.last_names
['Dixit']
>>> print p.lineage_names
[]
>>> print unicode(p)
Dixit, Avinash K.
>>> p == Person(unicode(p))
True
>>> p = Person('Dixit, Jr, Avinash K. ')
>>> print p.first_names
['Avinash']
>>> print p.middle_names
['K.']
>>> print p.prelast_names
[]
>>> print p.last_names
['Dixit']
>>> print p.lineage_names
['Jr']
>>> print unicode(p)
Dixit, Jr, Avinash K.
>>> p == Person(unicode(p))
True
>>> p = Person('abc')
>>> print p.first_names, p.middle_names, p.prelast_names, p.last_names, p.lineage_names
[] [] [] ['abc'] []
>>> p = Person('Viktorov, Michail~Markovitch')
>>> print p.first_names, p.middle_names, p.prelast_names, p.last_names, p.lineage_names
['Michail'] ['Markovitch'] [] ['Viktorov'] []
"""
def process_first_middle(parts):
try:
self.first_names.append(parts[0])
self.middle_names.extend(parts[1:])
except IndexError:
pass
def process_von_last(parts):
# von cannot be the last name in the list
von_last = parts[:-1]
definitely_not_von = parts[-1:]
if von_last:
von, last = rsplit_at(von_last, is_von_name)
self.prelast_names.extend(von)
self.last_names.extend(last)
self.last_names.extend(definitely_not_von)
def find_pos(lst, pred):
for i, item in enumerate(lst):
if pred(item):
return i
return i + 1
def split_at(lst, pred):
"""Split the given list into two parts.
The second part starts with the first item for which the given
predicate is True.
"""
pos = find_pos(lst, pred)
return lst[:pos], lst[pos:]
def rsplit_at(lst, pred):
rpos = find_pos(reversed(lst), pred)
pos = len(lst) - rpos
return lst[:pos], lst[pos:]
def is_von_name(string):
if string[0].isupper():
return False
if string[0].islower():
return True
else:
for char, brace_level in scan_bibtex_string(string):
if brace_level == 0 and char.isalpha():
return char.islower()
elif brace_level == 1 and char.startswith('\\'):
return special_char_islower(char)
return False
def special_char_islower(special_char):
control_sequence = True
for char in special_char[1:]: # skip the backslash
if control_sequence:
if not char.isalpha():
control_sequence = False
else:
if char.isalpha():
return char.islower()
return False
parts = split_tex_string(name, ',')
if len(parts) > 3:
report_error(InvalidNameString(name))
last_parts = parts[2:]
parts = parts[:2] + [' '.join(last_parts)]
if len(parts) == 3: # von Last, Jr, First
process_von_last(split_tex_string(parts[0]))
self.lineage_names.extend(split_tex_string(parts[1]))
process_first_middle(split_tex_string(parts[2]))
elif len(parts) == 2: # von Last, First
process_von_last(split_tex_string(parts[0]))
process_first_middle(split_tex_string(parts[1]))
elif len(parts) == 1: # First von Last
parts = split_tex_string(name)
first_middle, von_last = split_at(parts, is_von_name)
if not von_last and first_middle:
last = first_middle.pop()
von_last.append(last)
process_first_middle(first_middle)
process_von_last(von_last)
else:
# should hot really happen
raise ValueError(name)
def __eq__(self, other):
if not isinstance(other, Person):
return super(Person, self) == other
return (
self.first_names == other.first_names
and self.middle_names == other.middle_names
and self.prelast_names == other.prelast_names
and self.last_names == other.last_names
and self.lineage_names == other.lineage_names
)
def __unicode__(self):
# von Last, Jr, First
von_last = ' '.join(self.prelast_names + self.last_names)
jr = ' '.join(self.lineage_names)
first = ' '.join(self.first_names + self.middle_names)
return ', '.join(part for part in (von_last, jr, first) if part)
def __repr__(self):
return 'Person({0})'.format(repr(unicode(self)))
def get_part_as_text(self, type):
names = getattr(self, type + '_names')
return ' '.join(names)
def get_part(self, type, abbr=False):
"""Get a list of name parts by `type`.
>>> knuth = Person('Donald E. Knuth')
>>> knuth.get_part('first')
['Donald']
>>> knuth.get_part('last')
['Knuth']
"""
names = getattr(self, type + '_names')
if abbr:
import warnings
warnings.warn('Person.get_part(abbr=True) is deprecated since 0.19: use pybtex.textutils.abbreviate()', stacklevel=2)
from pybtex.textutils import abbreviate
names = [abbreviate(name) for name in names]
return names
@deprecated('0.19', 'use Person.first_names instead')
def first(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.first_names` instead.
"""
return self.get_part('first', abbr)
@deprecated('0.19', 'use Person.middle_names instead')
def middle(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.middle_names` instead.
"""
return self.get_part('middle', abbr)
@deprecated('0.19', 'use Person.prelast_names instead')
def prelast(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.prelast_names` instead.
"""
return self.get_part('prelast', abbr)
@deprecated('0.19', 'use Person.last_names instead')
def last(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.last_names` instead.
"""
return self.get_part('last', abbr)
@deprecated('0.19', 'use Person.lineage_names instead')
def lineage(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.lineage_names` instead.
"""
return self.get_part('lineage', abbr)
@deprecated('0.19', 'use Person.bibtex_first_names instead')
def bibtex_first(self):
"""
.. deprecated:: 0.19
Use :py:attr:`.bibtex_first_names` instead.
"""
return self.bibtex_first_names
def parse_file(file, bib_format=None, **kwargs):
"""
Read bibliography data from file and return a :py:class:`.BibliographyData` object.
:param file: A file name or a file-like object.
:param bib_format: Data format ("bibtex", "yaml", etc.).
If not specified, Pybtex will try to guess by the file name.
.. versionadded:: 0.19
"""
if isinstance(file, basestring):
filename = file
else:
filename = geattr(file, 'name', None)
parser = find_plugin('pybtex.database.input', bib_format, filename=filename)(**kwargs)
return parser.parse_file(file)
def parse_string(value, bib_format, **kwargs):
"""
Parse a Unicode string containing bibliography data and return a :py:class:`.BibliographyData` object.
:param value: Unicode string.
:param bib_format: Data format ("bibtex", "yaml", etc.).
.. versionadded:: 0.19
"""
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_string(value)
def parse_bytes(value, bib_format, **kwargs):
"""
Parse a byte string containing bibliography data and return a :py:class:`.BibliographyData` object.
:param value: Byte string.
:param bib_format: Data format (for example, "bibtexml").
.. versionadded:: 0.19
"""
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_bytes(value)
| 33.652065 | 129 | 0.576056 |
import re
from collections import Mapping
from pybtex.plugin import find_plugin
from pybtex.exceptions import PybtexError
from pybtex.utils import (
deprecated,
OrderedCaseInsensitiveDict, CaseInsensitiveDefaultDict, CaseInsensitiveSet
)
from pybtex.bibtex.utils import split_tex_string, scan_bibtex_string
from pybtex.errors import report_error
class BibliographyDataError(PybtexError):
pass
class InvalidNameString(PybtexError):
def __init__(self, name_string):
message = 'Too many commas in {}'.format(repr(name_string))
super(InvalidNameString, self).__init__(message)
class BibliographyData(object):
def __init__(self, entries=None, preamble=None, wanted_entries=None, min_crossrefs=2):
self.entries = OrderedCaseInsensitiveDict()
self.crossref_count = CaseInsensitiveDefaultDict(int)
self.min_crossrefs = min_crossrefs
self._preamble = []
if wanted_entries is not None:
self.wanted_entries = CaseInsensitiveSet(wanted_entries)
self.citations = CaseInsensitiveSet(wanted_entries)
else:
self.wanted_entries = None
self.citations = CaseInsensitiveSet()
if entries:
if isinstance(entries, Mapping):
entries = entries.iteritems()
for (key, entry) in entries:
self.add_entry(key, entry)
if preamble:
self._preamble.extend(preamble)
def __eq__(self, other):
if not isinstance(other, BibliographyData):
return super(BibliographyData, self) == other
return (
self.entries == other.entries
and self._preamble == other._preamble
)
def __repr__(self):
return 'BibliographyData(entries={entries}, preamble={preamble})'.format(
entries=repr(self.entries),
preamble=repr(self._preamble),
)
def add_to_preamble(self, *values):
self._preamble.extend(values)
@property
def preamble(self):
return ''.join(self._preamble)
@deprecated('0.19', 'use BibliographyData.preamble instead')
def get_preamble(self):
return self.preamble
def want_entry(self, key):
return (
self.wanted_entries is None
or key in self.wanted_entries
or '*' in self.wanted_entries
)
def get_canonical_key(self, key):
if key in self.citations:
return self.citations.get_canonical_key(key)
else:
return key
def add_entry(self, key, entry):
if not self.want_entry(key):
return
if key in self.entries:
report_error(BibliographyDataError('repeated bibliograhpy entry: %s' % key))
return
entry.collection = self
entry.key = self.get_canonical_key(key)
self.entries[entry.key] = entry
try:
crossref = entry.fields['crossref']
except KeyError:
pass
else:
if self.wanted_entries is not None:
self.wanted_entries.add(crossref)
def add_entries(self, entries):
for key, entry in entries:
self.add_entry(key, entry)
def _get_crossreferenced_citations(self, citations, min_crossrefs):
crossref_count = CaseInsensitiveDefaultDict(int)
citation_set = CaseInsensitiveSet(citations)
for citation in citations:
try:
entry = self.entries[citation]
crossref = entry.fields['crossref']
except KeyError:
continue
try:
crossref_entry = self.entries[crossref]
except KeyError:
report_error(BibliographyDataError(
'bad cross-reference: entry "{key}" refers to '
'entry "{crossref}" which does not exist.'.format(
key=citation, crossref=crossref,
)
))
continue
canonical_crossref = crossref_entry.key
crossref_count[canonical_crossref] += 1
if crossref_count[canonical_crossref] >= min_crossrefs and canonical_crossref not in citation_set:
citation_set.add(canonical_crossref)
yield canonical_crossref
def _expand_wildcard_citations(self, citations):
citation_set = CaseInsensitiveSet()
for citation in citations:
if citation == '*':
for key in self.entries:
if key not in citation_set:
citation_set.add(key)
yield key
else:
if citation not in citation_set:
citation_set.add(citation)
yield citation
def add_extra_citations(self, citations, min_crossrefs):
expanded_citations = list(self._expand_wildcard_citations(citations))
crossrefs = list(self._get_crossreferenced_citations(expanded_citations, min_crossrefs))
return expanded_citations + crossrefs
def to_string(self, bib_format, **kwargs):
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_string(self)
def to_bytes(self, bib_format, **kwargs):
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_bytes(self)
def to_file(self, file, bib_format=None, **kwargs):
if isinstance(file, basestring):
filename = file
else:
filename = getattr(file, 'name', None)
writer = find_plugin('pybtex.database.output', bib_format, filename=filename)(**kwargs)
return writer.write_file(self, file)
def lower(self):
entries_lower = ((key.lower(), entry.lower()) for key, entry in self.entries.iteritems())
return type(self)(
entries=entries_lower,
preamble=self._preamble,
wanted_entries=self.wanted_entries,
min_crossrefs=self.min_crossrefs,
)
class FieldDict(OrderedCaseInsensitiveDict):
def __init__(self, parent, *args, **kwargw):
self.parent = parent
super(FieldDict, self).__init__(*args, **kwargw)
def __getitem__(self, key):
try:
return super(FieldDict, self).__getitem__(key)
except KeyError:
if key in self.parent.persons:
persons = self.parent.persons[key]
return ' and '.join(unicode(person) for person in persons)
elif 'crossref' in self:
return self.parent.get_crossref().fields[key]
else:
raise KeyError(key)
def lower(self):
lower_dict = super(FieldDict, self).lower()
return type(self)(self.parent, self.iteritems_lower())
class Entry(object):
key = None
def __init__(self, type_, fields=None, persons=None, collection=None):
if fields is None:
fields = {}
if persons is None:
persons = {}
self.type = type_.lower()
self.original_type = type_
self.fields = FieldDict(self, fields)
self.persons = OrderedCaseInsensitiveDict(persons)
self.collection = collection
self.vars = {}
def __eq__(self, other):
if not isinstance(other, Entry):
return super(Entry, self) == other
return (
self.type == other.type
and self.fields == other.fields
and self.persons == other.persons
)
def __repr__(self):
repr_fields = repr(self.fields.items())
return 'Entry({type_}, fields={fields}, persons={persons})'.format(
type_=repr(self.type),
fields=repr_fields,
persons=repr(self.persons),
)
def get_crossref(self):
return self.collection.entries[self.fields['crossref']]
def add_person(self, person, role):
self.persons.setdefault(role, []).append(person)
def lower(self):
return type(self)(
self.type,
fields=self.fields.lower(),
persons=self.persons.lower(),
collection=self.collection,
)
class Person(object):
valid_roles = ['author', 'editor']
style1_re = re.compile('^(.+),\s*(.+)$')
style2_re = re.compile('^(.+),\s*(.+),\s*(.+)$')
def __init__(self, string="", first="", middle="", prelast="", last="", lineage=""):
self.first_names = []
self.middle_names = []
self.prelast_names = []
self.last_names = []
self.lineage_names = []
string = string.strip()
if string:
self._parse_string(string)
self.first_names.extend(split_tex_string(first))
self.middle_names.extend(split_tex_string(middle))
self.prelast_names.extend(split_tex_string(prelast))
self.last_names.extend(split_tex_string(last))
self.lineage_names.extend(split_tex_string(lineage))
@property
def bibtex_first_names(self):
return self.first_names + self.middle_names
def _parse_string(self, name):
def process_first_middle(parts):
try:
self.first_names.append(parts[0])
self.middle_names.extend(parts[1:])
except IndexError:
pass
def process_von_last(parts):
von_last = parts[:-1]
definitely_not_von = parts[-1:]
if von_last:
von, last = rsplit_at(von_last, is_von_name)
self.prelast_names.extend(von)
self.last_names.extend(last)
self.last_names.extend(definitely_not_von)
def find_pos(lst, pred):
for i, item in enumerate(lst):
if pred(item):
return i
return i + 1
def split_at(lst, pred):
pos = find_pos(lst, pred)
return lst[:pos], lst[pos:]
def rsplit_at(lst, pred):
rpos = find_pos(reversed(lst), pred)
pos = len(lst) - rpos
return lst[:pos], lst[pos:]
def is_von_name(string):
if string[0].isupper():
return False
if string[0].islower():
return True
else:
for char, brace_level in scan_bibtex_string(string):
if brace_level == 0 and char.isalpha():
return char.islower()
elif brace_level == 1 and char.startswith('\\'):
return special_char_islower(char)
return False
def special_char_islower(special_char):
control_sequence = True
for char in special_char[1:]:
if control_sequence:
if not char.isalpha():
control_sequence = False
else:
if char.isalpha():
return char.islower()
return False
parts = split_tex_string(name, ',')
if len(parts) > 3:
report_error(InvalidNameString(name))
last_parts = parts[2:]
parts = parts[:2] + [' '.join(last_parts)]
if len(parts) == 3:
process_von_last(split_tex_string(parts[0]))
self.lineage_names.extend(split_tex_string(parts[1]))
process_first_middle(split_tex_string(parts[2]))
elif len(parts) == 2:
process_von_last(split_tex_string(parts[0]))
process_first_middle(split_tex_string(parts[1]))
elif len(parts) == 1:
parts = split_tex_string(name)
first_middle, von_last = split_at(parts, is_von_name)
if not von_last and first_middle:
last = first_middle.pop()
von_last.append(last)
process_first_middle(first_middle)
process_von_last(von_last)
else:
raise ValueError(name)
def __eq__(self, other):
if not isinstance(other, Person):
return super(Person, self) == other
return (
self.first_names == other.first_names
and self.middle_names == other.middle_names
and self.prelast_names == other.prelast_names
and self.last_names == other.last_names
and self.lineage_names == other.lineage_names
)
def __unicode__(self):
von_last = ' '.join(self.prelast_names + self.last_names)
jr = ' '.join(self.lineage_names)
first = ' '.join(self.first_names + self.middle_names)
return ', '.join(part for part in (von_last, jr, first) if part)
def __repr__(self):
return 'Person({0})'.format(repr(unicode(self)))
def get_part_as_text(self, type):
names = getattr(self, type + '_names')
return ' '.join(names)
def get_part(self, type, abbr=False):
names = getattr(self, type + '_names')
if abbr:
import warnings
warnings.warn('Person.get_part(abbr=True) is deprecated since 0.19: use pybtex.textutils.abbreviate()', stacklevel=2)
from pybtex.textutils import abbreviate
names = [abbreviate(name) for name in names]
return names
@deprecated('0.19', 'use Person.first_names instead')
def first(self, abbr=False):
return self.get_part('first', abbr)
@deprecated('0.19', 'use Person.middle_names instead')
def middle(self, abbr=False):
return self.get_part('middle', abbr)
@deprecated('0.19', 'use Person.prelast_names instead')
def prelast(self, abbr=False):
return self.get_part('prelast', abbr)
@deprecated('0.19', 'use Person.last_names instead')
def last(self, abbr=False):
return self.get_part('last', abbr)
@deprecated('0.19', 'use Person.lineage_names instead')
def lineage(self, abbr=False):
return self.get_part('lineage', abbr)
@deprecated('0.19', 'use Person.bibtex_first_names instead')
def bibtex_first(self):
return self.bibtex_first_names
def parse_file(file, bib_format=None, **kwargs):
if isinstance(file, basestring):
filename = file
else:
filename = geattr(file, 'name', None)
parser = find_plugin('pybtex.database.input', bib_format, filename=filename)(**kwargs)
return parser.parse_file(file)
def parse_string(value, bib_format, **kwargs):
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_string(value)
def parse_bytes(value, bib_format, **kwargs):
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_bytes(value)
| true | true |
f7310a03ff8c470b1a97fb65773a13fadaa84311 | 17,145 | py | Python | cinder/tests/unit/attachments/test_attachments_api.py | helenwalsh/cinder | 307fccea4cc9c6496334b0fe137206cb48499bd5 | [
"Apache-2.0"
] | 1 | 2019-02-17T17:49:41.000Z | 2019-02-17T17:49:41.000Z | cinder/tests/unit/attachments/test_attachments_api.py | BelieveInFuture/cinder | fff95fa6a68a054488ee087b6e31f4f5e28209dc | [
"Apache-2.0"
] | 1 | 2020-12-22T20:40:20.000Z | 2020-12-23T18:34:42.000Z | cinder/tests/unit/attachments/test_attachments_api.py | BelieveInFuture/cinder | fff95fa6a68a054488ee087b6e31f4f5e28209dc | [
"Apache-2.0"
] | 3 | 2020-06-16T07:29:48.000Z | 2020-06-21T10:22:57.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.policies import attachments as attachment_policy
from cinder.policies import base as base_policy
from cinder import policy
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder.tests.unit import utils as tests_utils
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
CONF = cfg.CONF
class AttachmentManagerTestCase(test.TestCase):
"""Attachment related test for volume/api.py."""
def setUp(self):
"""Setup test class."""
super(AttachmentManagerTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
self.project_id = fake.PROJECT3_ID
self.context.project_id = self.project_id
self.volume_api = volume_api.API()
self.user_context = context.RequestContext(
user_id=fake.USER_ID,
project_id=fake.PROJECT3_ID)
def test_attachment_create_no_connector(self):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_with_connector(self,
mock_rpc_attachment_update):
"""Test attachment_create with connector."""
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
connector = {'fake': 'connector'}
attachment = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector)
mock_rpc_attachment_update.assert_called_once_with(self.context,
mock.ANY,
connector,
mock.ANY)
new_attachment = objects.VolumeAttachment.get_by_id(self.context,
attachment.id)
self.assertEqual(connection_info, new_attachment.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
def test_attachment_delete_reserved(self,
mock_rpc_attachment_delete):
"""Test attachment_delete with reserved."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aobj = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aobj)
# Since it's just reserved and never finalized, we should never make an
# rpc call
mock_rpc_attachment_delete.assert_not_called()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_update_and_delete(
self,
mock_rpc_attachment_update,
mock_rpc_attachment_delete):
"""Test attachment_delete."""
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
vref = objects.Volume.get_by_id(self.context,
vref.id)
connector = {'fake': 'connector',
'host': 'somehost'}
self.volume_api.attachment_update(self.context,
aref,
connector)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(connection_info, aref.connection_info)
# We mock the actual call that updates the status
# so force it here
values = {'volume_id': vref.id,
'volume_host': vref.host,
'attach_status': 'attached',
'instance_uuid': fake.UUID2}
aref = db.volume_attach(self.context, values)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aref)
mock_rpc_attachment_delete.assert_called_once_with(self.context,
aref.id,
mock.ANY)
def test_additional_attachment_create_no_connector(self):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual(2, len(vref.volume_attachment))
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_reserve_delete(
self,
mock_rpc_attachment_update):
volume_params = {'status': 'available'}
connector = {
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector=connector)
vref = objects.Volume.get_by_id(self.context,
vref.id)
# Need to set the status here because our mock isn't doing it for us
vref.status = 'in-use'
vref.save()
# Now a second attachment acting as a reserve
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
# We should now be able to delete the original attachment that gave us
# 'in-use' status, and in turn we should revert to the outstanding
# attachments reserve
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
def test_reserve_reserve_delete(self):
"""Test that we keep reserved status across multiple reserves."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.assertEqual(1, len(vref.volume_attachment))
def test_attachment_create_bootable_multiattach_policy(self):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.multiattach = True
vref.bootable = True
vref.status = 'in-use'
rules = {
attachment_policy.MULTIATTACH_BOOTABLE_VOLUME_POLICY: base_policy.RULE_ADMIN_API # noqa
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.addCleanup(policy.reset)
self.assertRaises(exception.PolicyNotAuthorized,
self.volume_api.attachment_create,
self.user_context,
vref,
fake.UUID2)
def test_attachment_create_readonly_volume(self):
"""Test attachment_create on a readonly volume."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.volume_api.update_readonly_flag(self.context, vref, True)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('ro', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
def test_attachment_create_volume_in_error_state(self):
"""Test attachment_create volume in error state."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.status = "error"
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID2)
def test_attachment_update_volume_in_error_state(self):
"""Test attachment_update volumem in error state."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
vref.status = 'error'
vref.save()
connector = {'fake': 'connector',
'host': 'somehost'}
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
aref,
connector)
@mock.patch('cinder.db.sqlalchemy.api.volume_attachment_update',
return_value={})
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update',
return_value={})
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get',
v2_fakes.fake_volume_type_get)
def test_attachment_update_duplicate(self, mock_va_update, mock_db_upd):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context,
deleted=0,
**volume_params)
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID1,
'somehost',
'somemountpoint')
# Update volume with another attachment
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID2,
'somehost2',
'somemountpoint2')
vref.refresh()
# This attachment will collide with the first
connector = {'host': 'somehost'}
vref.volume_attachment[0]['connector'] = {'host': 'somehost'}
vref.volume_attachment[0]['connection_info'] = {'c': 'd'}
with mock.patch('cinder.objects.Volume.get_by_id', return_value=vref):
with mock.patch.object(self.volume_api.volume_rpcapi,
'attachment_update') as m_au:
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
vref.volume_attachment[1],
connector)
m_au.assert_not_called()
mock_va_update.assert_not_called()
mock_db_upd.assert_not_called()
def test_attachment_create_creating_volume(self):
"""Test attachment_create on a creating volume."""
volume_params = {'status': 'creating'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
| 45.72 | 100 | 0.552289 |
from unittest import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.policies import attachments as attachment_policy
from cinder.policies import base as base_policy
from cinder import policy
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder.tests.unit import utils as tests_utils
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
CONF = cfg.CONF
class AttachmentManagerTestCase(test.TestCase):
def setUp(self):
super(AttachmentManagerTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
self.project_id = fake.PROJECT3_ID
self.context.project_id = self.project_id
self.volume_api = volume_api.API()
self.user_context = context.RequestContext(
user_id=fake.USER_ID,
project_id=fake.PROJECT3_ID)
def test_attachment_create_no_connector(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_with_connector(self,
mock_rpc_attachment_update):
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
connector = {'fake': 'connector'}
attachment = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector)
mock_rpc_attachment_update.assert_called_once_with(self.context,
mock.ANY,
connector,
mock.ANY)
new_attachment = objects.VolumeAttachment.get_by_id(self.context,
attachment.id)
self.assertEqual(connection_info, new_attachment.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
def test_attachment_delete_reserved(self,
mock_rpc_attachment_delete):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aobj = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aobj)
# rpc call
mock_rpc_attachment_delete.assert_not_called()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_update_and_delete(
self,
mock_rpc_attachment_update,
mock_rpc_attachment_delete):
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
vref = objects.Volume.get_by_id(self.context,
vref.id)
connector = {'fake': 'connector',
'host': 'somehost'}
self.volume_api.attachment_update(self.context,
aref,
connector)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(connection_info, aref.connection_info)
# We mock the actual call that updates the status
# so force it here
values = {'volume_id': vref.id,
'volume_host': vref.host,
'attach_status': 'attached',
'instance_uuid': fake.UUID2}
aref = db.volume_attach(self.context, values)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aref)
mock_rpc_attachment_delete.assert_called_once_with(self.context,
aref.id,
mock.ANY)
def test_additional_attachment_create_no_connector(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual(2, len(vref.volume_attachment))
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_reserve_delete(
self,
mock_rpc_attachment_update):
volume_params = {'status': 'available'}
connector = {
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector=connector)
vref = objects.Volume.get_by_id(self.context,
vref.id)
# Need to set the status here because our mock isn't doing it for us
vref.status = 'in-use'
vref.save()
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
def test_reserve_reserve_delete(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.assertEqual(1, len(vref.volume_attachment))
def test_attachment_create_bootable_multiattach_policy(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.multiattach = True
vref.bootable = True
vref.status = 'in-use'
rules = {
attachment_policy.MULTIATTACH_BOOTABLE_VOLUME_POLICY: base_policy.RULE_ADMIN_API
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.addCleanup(policy.reset)
self.assertRaises(exception.PolicyNotAuthorized,
self.volume_api.attachment_create,
self.user_context,
vref,
fake.UUID2)
def test_attachment_create_readonly_volume(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.volume_api.update_readonly_flag(self.context, vref, True)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('ro', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
def test_attachment_create_volume_in_error_state(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.status = "error"
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID2)
def test_attachment_update_volume_in_error_state(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
vref.status = 'error'
vref.save()
connector = {'fake': 'connector',
'host': 'somehost'}
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
aref,
connector)
@mock.patch('cinder.db.sqlalchemy.api.volume_attachment_update',
return_value={})
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update',
return_value={})
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get',
v2_fakes.fake_volume_type_get)
def test_attachment_update_duplicate(self, mock_va_update, mock_db_upd):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context,
deleted=0,
**volume_params)
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID1,
'somehost',
'somemountpoint')
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID2,
'somehost2',
'somemountpoint2')
vref.refresh()
connector = {'host': 'somehost'}
vref.volume_attachment[0]['connector'] = {'host': 'somehost'}
vref.volume_attachment[0]['connection_info'] = {'c': 'd'}
with mock.patch('cinder.objects.Volume.get_by_id', return_value=vref):
with mock.patch.object(self.volume_api.volume_rpcapi,
'attachment_update') as m_au:
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
vref.volume_attachment[1],
connector)
m_au.assert_not_called()
mock_va_update.assert_not_called()
mock_db_upd.assert_not_called()
def test_attachment_create_creating_volume(self):
volume_params = {'status': 'creating'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
| true | true |
f7310a6506b1291656e1d48a8acd6e1397f886e7 | 1,296 | py | Python | docs/scripts/build-go.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | docs/scripts/build-go.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | docs/scripts/build-go.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Build go.html
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import sys
import os
# Third-party modules
from sphinx.util.inventory import InventoryFile
JS = """
function redirect(rmap) {
var href = window.location.href;
var label = href.split('#')[1];
var base = href.substr(0, href.indexOf("go.html"))
window.location = base + rmap[label];
}
"""
def process(path):
r = [
"<html>",
"<head>",
"<title>NOC go</title>",
"</head>",
"<body>",
"<script>",
JS,
"redirect({",
]
with open(path) as f:
data = InventoryFile.load(f, "", os.path.join) or {}
rr = []
for entry, einfo in sorted(data["std:label"].items()):
rr += ["'%s': '%s'" % (entry, einfo[2])]
r += [",".join(rr), "});", "</script>", "</body>", "</html>"]
base = os.path.dirname(path)
go_path = os.path.join(base, "go.html")
with open(go_path, "w") as f:
f.write("".join(r))
if __name__ == "__main__":
process(sys.argv[1])
| 25.411765 | 72 | 0.45216 |
import sys
import os
from sphinx.util.inventory import InventoryFile
JS = """
function redirect(rmap) {
var href = window.location.href;
var label = href.split('#')[1];
var base = href.substr(0, href.indexOf("go.html"))
window.location = base + rmap[label];
}
"""
def process(path):
r = [
"<html>",
"<head>",
"<title>NOC go</title>",
"</head>",
"<body>",
"<script>",
JS,
"redirect({",
]
with open(path) as f:
data = InventoryFile.load(f, "", os.path.join) or {}
rr = []
for entry, einfo in sorted(data["std:label"].items()):
rr += ["'%s': '%s'" % (entry, einfo[2])]
r += [",".join(rr), "});", "</script>", "</body>", "</html>"]
base = os.path.dirname(path)
go_path = os.path.join(base, "go.html")
with open(go_path, "w") as f:
f.write("".join(r))
if __name__ == "__main__":
process(sys.argv[1])
| true | true |
f7310adaaf9f436b99dc44a44f9873149854a8c7 | 312 | py | Python | trabajos/ejercicio8.py | marilynmamani/marilyn-M.Q | 54090978b1f6e2f12b79b5dd39c59e9594226414 | [
"Apache-2.0"
] | null | null | null | trabajos/ejercicio8.py | marilynmamani/marilyn-M.Q | 54090978b1f6e2f12b79b5dd39c59e9594226414 | [
"Apache-2.0"
] | null | null | null | trabajos/ejercicio8.py | marilynmamani/marilyn-M.Q | 54090978b1f6e2f12b79b5dd39c59e9594226414 | [
"Apache-2.0"
] | null | null | null | def votoElecciones():
print("Como saber si puedes votar por tu edad")
mensaje =""
edadP=int(input("ingrese la edad que tiene:"))
if edadP>=18:
mensaje ="Usted esta apto para votar"
else:
mensaje ="Usted no cumple con la edadad minima y no esta apto para votar"
print(mensaje)
votoElecciones() | 31.2 | 77 | 0.701923 | def votoElecciones():
print("Como saber si puedes votar por tu edad")
mensaje =""
edadP=int(input("ingrese la edad que tiene:"))
if edadP>=18:
mensaje ="Usted esta apto para votar"
else:
mensaje ="Usted no cumple con la edadad minima y no esta apto para votar"
print(mensaje)
votoElecciones() | true | true |
f7310ade1292bec44ede20c2ba3965b6ae5472ce | 26,481 | py | Python | openstack_dashboard/test/unit/api/test_nova.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/unit/api/test_nova.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/unit/api/test_nova.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.conf import settings
from django.test.utils import override_settings
import mock
from novaclient import api_versions
from novaclient import exceptions as nova_exceptions
from novaclient.v2 import flavor_access as nova_flavor_access
from novaclient.v2 import servers
from horizon import exceptions as horizon_exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class ServerWrapperTests(test.TestCase):
use_mox = False
def test_get_base_attribute(self):
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(self.servers.first().id, server.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name(self, mock_image_get):
image = self.images.first()
mock_image_get.return_value = image
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(image.name, server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name_no_glance_service(self, mock_image_get):
server = self.servers.first()
exc_catalog = horizon_exceptions.ServiceCatalogException('image')
mock_image_get.side_effect = exc_catalog
server = api.nova.Server(server, self.request)
self.assertIsNone(server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
server.image['id'])
class ComputeApiTests(test.APIMockTestCase):
def _mock_current_version(self, mock_novaclient, version,
min_version=None):
ver = mock.Mock()
ver.min_version = min_version or '2.1'
ver.version = version
mock_novaclient.versions.get_current.return_value = ver
# To handle upgrade_api
self.novaclient.api_version = api_versions.APIVersion(version)
def test_server_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_HARD
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_soft_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_SOFT
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id, HARDNESS)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_vnc_console(self):
server = self.servers.first()
console = self.servers.vnc_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_vnc_console.return_value = console
ret_val = api.nova.server_vnc_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.VNCConsole)
novaclient.servers.get_vnc_console.assert_called_once_with(
server.id, console_type)
def test_server_spice_console(self):
server = self.servers.first()
console = self.servers.spice_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_spice_console.return_value = console
ret_val = api.nova.server_spice_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.SPICEConsole)
novaclient.servers.get_spice_console.assert_called_once_with(
server.id, console_type)
def test_server_rdp_console(self):
server = self.servers.first()
console = self.servers.rdp_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_rdp_console.return_value = console
ret_val = api.nova.server_rdp_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.RDPConsole)
novaclient.servers.get_rdp_console.assert_called_once_with(
server.id, console_type)
def test_server_mks_console(self):
server = self.servers.first()
console = self.servers.mks_console_data
console_type = console["remote_console"]["type"]
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.53')
novaclient.servers.get_mks_console.return_value = console
ret_val = api.nova.server_mks_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.MKSConsole)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get_mks_console.assert_called_once_with(
server.id, console_type)
def test_server_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(
self.request,
search_opts={'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True, {'all_tenants': True})
def test_server_list_pagination(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertFalse(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_server_list_pagination_more(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers[:page_size + 1]
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertEqual(page_size, len(ret_val))
self.assertTrue(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
def test_usage_get(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usages.get.return_value = self.usages.first()
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_called_once_with(
self.tenant.id, 'start', 'end')
def test_usage_get_paginated(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.get.side_effect = [
self.usages.first(),
{},
]
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_has_calls([
mock.call(self.tenant.id, 'start', 'end'),
mock.call(self.tenant.id, 'start', 'end',
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_usage_list(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usage.list.return_value = usages
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_called_once_with('start', 'end', True)
def test_usage_list_paginated(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.list.side_effect = [
usages,
{},
]
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_has_calls([
mock.call('start', 'end', True),
mock.call('start', 'end', True,
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_server_get(self):
server = self.servers.first()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.get.return_value = server
ret_val = api.nova.server_get(self.request, server.id)
self.assertIsInstance(ret_val, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get.assert_called_once_with(server.id)
def test_server_metadata_update(self):
server = self.servers.first()
metadata = {'foo': 'bar'}
novaclient = self.stub_novaclient()
novaclient.servers.set_meta.return_value = None
ret_val = api.nova.server_metadata_update(self.request,
server.id,
metadata)
self.assertIsNone(ret_val)
novaclient.servers.set_meta.assert_called_once_with(server.id,
metadata)
def test_server_metadata_delete(self):
server = self.servers.first()
keys = ['a', 'b']
novaclient = self.stub_novaclient()
novaclient.servers.delete_meta.return_value = None
ret_val = api.nova.server_metadata_delete(self.request,
server.id,
keys)
self.assertIsNone(ret_val)
novaclient.servers.delete_meta.assert_called_once_with(server.id, keys)
def _test_absolute_limits(self, values, expected_results):
limits = mock.Mock()
limits.absolute = []
for key, val in values.items():
limit = mock.Mock()
limit.name = key
limit.value = val
limits.absolute.append(limit)
novaclient = self.stub_novaclient()
novaclient.limits.get.return_value = limits
ret_val = api.nova.tenant_absolute_limits(self.request, reserved=True)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
novaclient.limits.get.assert_called_once_with(reserved=True,
tenant_id=None)
def test_absolute_limits_handle_unlimited(self):
values = {"maxTotalCores": -1, "maxTotalInstances": 10}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10}
self._test_absolute_limits(values, expected_results)
def test_absolute_limits_negative_used_workaround(self):
values = {"maxTotalCores": -1,
"maxTotalInstances": 10,
"totalInstancesUsed": -1,
"totalCoresUsed": -1,
"totalRAMUsed": -2048,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10,
"totalInstancesUsed": 0,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
self._test_absolute_limits(values, expected_results)
def test_cold_migrate_host_succeed(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", False, True,
True)
self.assertTrue(ret_val)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_cold_migrate_host_fails(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.side_effect = \
nova_exceptions.ClientException(404)
self.assertRaises(nova_exceptions.ClientException,
api.nova.migrate_host,
self.request, "host", False, True, True)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_live_migrate_host_with_active_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.first()
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_with_paused_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[3]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_without_running_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[1]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.migrate.assert_called_once_with(server_uuid)
"""Flavor Tests"""
def test_flavor_list_no_extras(self):
flavors = self.flavors.list()
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors = api.nova.flavor_list(self.request)
self.assertEqual(len(flavors), len(api_flavors))
novaclient.flavors.list.assert_called_once_with(is_public=True)
def test_flavor_get_no_extras(self):
flavor = self.flavors.list()[1]
novaclient = self.stub_novaclient()
novaclient.flavors.get.return_value = flavor
api_flavor = api.nova.flavor_get(self.request, flavor.id)
self.assertEqual(api_flavor.id, flavor.id)
novaclient.flavors.get.assert_called_once_with(flavor.id)
def _test_flavor_list_paged(self, reversed_order=False, paginate=True):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
flavors = self.flavors.list()
order = 'asc' if reversed_order else 'desc'
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors, has_more, has_prev = api.nova.flavor_list_paged(
self.request, True, False, None, paginate=paginate,
reversed_order=reversed_order)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertFalse(has_more)
self.assertFalse(has_prev)
if paginate:
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=None, limit=page_size + 1,
sort_key='name', sort_dir=order)
else:
novaclient.flavors.list.assert_called_once_with(
is_public=True)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_flavor_list_pagination_more_and_prev(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
flavors = self.flavors.list()
marker = flavors[0].id
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors[1:page_size + 2]
api_flavors, has_more, has_prev = api.nova\
.flavor_list_paged(
self.request,
True,
False,
marker,
paginate=True)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertEqual(page_size, len(api_flavors))
self.assertTrue(has_more)
self.assertTrue(has_prev)
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=marker, limit=page_size + 1,
sort_key='name', sort_dir='desc')
def test_flavor_list_paged_default_order(self):
self._test_flavor_list_paged()
def test_flavor_list_paged_reversed_order(self):
self._test_flavor_list_paged(reversed_order=True)
def test_flavor_list_paged_paginate_false(self):
self._test_flavor_list_paged(paginate=False)
def test_flavor_create(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.create.return_value = flavor
api_flavor = api.nova.flavor_create(self.request,
flavor.name,
flavor.ram,
flavor.vcpus,
flavor.disk)
self.assertIsInstance(api_flavor, type(flavor))
self.assertEqual(flavor.name, api_flavor.name)
self.assertEqual(flavor.ram, api_flavor.ram)
self.assertEqual(flavor.vcpus, api_flavor.vcpus)
self.assertEqual(flavor.disk, api_flavor.disk)
self.assertEqual(0, api_flavor.ephemeral)
self.assertEqual(0, api_flavor.swap)
self.assertTrue(api_flavor.is_public)
self.assertEqual(1, api_flavor.rxtx_factor)
novaclient.flavors.create.assert_called_once_with(
flavor.name, flavor.ram, flavor.vcpus, flavor.disk,
flavorid='auto', ephemeral=0, swap=0, is_public=True,
rxtx_factor=1)
def test_flavor_delete(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.delete.return_value = None
api_val = api.nova.flavor_delete(self.request, flavor.id)
self.assertIsNone(api_val)
novaclient.flavors.delete.assert_called_once_with(flavor.id)
def test_flavor_access_list(self):
flavor_access = self.flavor_access.list()
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.list.return_value = flavor_access
api_flavor_access = api.nova.flavor_access_list(self.request, flavor)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertIsInstance(access, nova_flavor_access.FlavorAccess)
self.assertEqual(access.flavor_id, flavor.id)
novaclient.flavor_access.list.assert_called_once_with(flavor=flavor)
def test_add_tenant_to_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.add_tenant_access.return_value = flavor_access
api_flavor_access = api.nova.add_tenant_to_flavor(self.request,
flavor,
tenant)
self.assertIsInstance(api_flavor_access, list)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertEqual(access.flavor_id, flavor.id)
self.assertEqual(access.tenant_id, tenant.id)
novaclient.flavor_access.add_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_remove_tenant_from_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.remove_tenant_access.return_value = []
api_val = api.nova.remove_tenant_from_flavor(self.request,
flavor,
tenant)
self.assertEqual(len(api_val), len([]))
self.assertIsInstance(api_val, list)
novaclient.flavor_access.remove_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_server_group_list(self):
server_groups = self.server_groups.list()
novaclient = self.stub_novaclient()
novaclient.server_groups.list.return_value = server_groups
ret_val = api.nova.server_group_list(self.request)
self.assertIsInstance(ret_val, list)
self.assertEqual(len(ret_val), len(server_groups))
novaclient.server_groups.list.assert_called_once_with()
| 40.99226 | 79 | 0.629848 |
from __future__ import absolute_import
from django.conf import settings
from django.test.utils import override_settings
import mock
from novaclient import api_versions
from novaclient import exceptions as nova_exceptions
from novaclient.v2 import flavor_access as nova_flavor_access
from novaclient.v2 import servers
from horizon import exceptions as horizon_exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class ServerWrapperTests(test.TestCase):
use_mox = False
def test_get_base_attribute(self):
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(self.servers.first().id, server.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name(self, mock_image_get):
image = self.images.first()
mock_image_get.return_value = image
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(image.name, server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name_no_glance_service(self, mock_image_get):
server = self.servers.first()
exc_catalog = horizon_exceptions.ServiceCatalogException('image')
mock_image_get.side_effect = exc_catalog
server = api.nova.Server(server, self.request)
self.assertIsNone(server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
server.image['id'])
class ComputeApiTests(test.APIMockTestCase):
def _mock_current_version(self, mock_novaclient, version,
min_version=None):
ver = mock.Mock()
ver.min_version = min_version or '2.1'
ver.version = version
mock_novaclient.versions.get_current.return_value = ver
self.novaclient.api_version = api_versions.APIVersion(version)
def test_server_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_HARD
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_soft_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_SOFT
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id, HARDNESS)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_vnc_console(self):
server = self.servers.first()
console = self.servers.vnc_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_vnc_console.return_value = console
ret_val = api.nova.server_vnc_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.VNCConsole)
novaclient.servers.get_vnc_console.assert_called_once_with(
server.id, console_type)
def test_server_spice_console(self):
server = self.servers.first()
console = self.servers.spice_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_spice_console.return_value = console
ret_val = api.nova.server_spice_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.SPICEConsole)
novaclient.servers.get_spice_console.assert_called_once_with(
server.id, console_type)
def test_server_rdp_console(self):
server = self.servers.first()
console = self.servers.rdp_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_rdp_console.return_value = console
ret_val = api.nova.server_rdp_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.RDPConsole)
novaclient.servers.get_rdp_console.assert_called_once_with(
server.id, console_type)
def test_server_mks_console(self):
server = self.servers.first()
console = self.servers.mks_console_data
console_type = console["remote_console"]["type"]
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.53')
novaclient.servers.get_mks_console.return_value = console
ret_val = api.nova.server_mks_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.MKSConsole)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get_mks_console.assert_called_once_with(
server.id, console_type)
def test_server_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(
self.request,
search_opts={'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True, {'all_tenants': True})
def test_server_list_pagination(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertFalse(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_server_list_pagination_more(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers[:page_size + 1]
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertEqual(page_size, len(ret_val))
self.assertTrue(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
def test_usage_get(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usages.get.return_value = self.usages.first()
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_called_once_with(
self.tenant.id, 'start', 'end')
def test_usage_get_paginated(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.get.side_effect = [
self.usages.first(),
{},
]
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_has_calls([
mock.call(self.tenant.id, 'start', 'end'),
mock.call(self.tenant.id, 'start', 'end',
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_usage_list(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usage.list.return_value = usages
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_called_once_with('start', 'end', True)
def test_usage_list_paginated(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.list.side_effect = [
usages,
{},
]
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_has_calls([
mock.call('start', 'end', True),
mock.call('start', 'end', True,
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_server_get(self):
server = self.servers.first()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.get.return_value = server
ret_val = api.nova.server_get(self.request, server.id)
self.assertIsInstance(ret_val, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get.assert_called_once_with(server.id)
def test_server_metadata_update(self):
server = self.servers.first()
metadata = {'foo': 'bar'}
novaclient = self.stub_novaclient()
novaclient.servers.set_meta.return_value = None
ret_val = api.nova.server_metadata_update(self.request,
server.id,
metadata)
self.assertIsNone(ret_val)
novaclient.servers.set_meta.assert_called_once_with(server.id,
metadata)
def test_server_metadata_delete(self):
server = self.servers.first()
keys = ['a', 'b']
novaclient = self.stub_novaclient()
novaclient.servers.delete_meta.return_value = None
ret_val = api.nova.server_metadata_delete(self.request,
server.id,
keys)
self.assertIsNone(ret_val)
novaclient.servers.delete_meta.assert_called_once_with(server.id, keys)
def _test_absolute_limits(self, values, expected_results):
limits = mock.Mock()
limits.absolute = []
for key, val in values.items():
limit = mock.Mock()
limit.name = key
limit.value = val
limits.absolute.append(limit)
novaclient = self.stub_novaclient()
novaclient.limits.get.return_value = limits
ret_val = api.nova.tenant_absolute_limits(self.request, reserved=True)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
novaclient.limits.get.assert_called_once_with(reserved=True,
tenant_id=None)
def test_absolute_limits_handle_unlimited(self):
values = {"maxTotalCores": -1, "maxTotalInstances": 10}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10}
self._test_absolute_limits(values, expected_results)
def test_absolute_limits_negative_used_workaround(self):
values = {"maxTotalCores": -1,
"maxTotalInstances": 10,
"totalInstancesUsed": -1,
"totalCoresUsed": -1,
"totalRAMUsed": -2048,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10,
"totalInstancesUsed": 0,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
self._test_absolute_limits(values, expected_results)
def test_cold_migrate_host_succeed(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", False, True,
True)
self.assertTrue(ret_val)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_cold_migrate_host_fails(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.side_effect = \
nova_exceptions.ClientException(404)
self.assertRaises(nova_exceptions.ClientException,
api.nova.migrate_host,
self.request, "host", False, True, True)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_live_migrate_host_with_active_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.first()
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_with_paused_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[3]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_without_running_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[1]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.migrate.assert_called_once_with(server_uuid)
def test_flavor_list_no_extras(self):
flavors = self.flavors.list()
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors = api.nova.flavor_list(self.request)
self.assertEqual(len(flavors), len(api_flavors))
novaclient.flavors.list.assert_called_once_with(is_public=True)
def test_flavor_get_no_extras(self):
flavor = self.flavors.list()[1]
novaclient = self.stub_novaclient()
novaclient.flavors.get.return_value = flavor
api_flavor = api.nova.flavor_get(self.request, flavor.id)
self.assertEqual(api_flavor.id, flavor.id)
novaclient.flavors.get.assert_called_once_with(flavor.id)
def _test_flavor_list_paged(self, reversed_order=False, paginate=True):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
flavors = self.flavors.list()
order = 'asc' if reversed_order else 'desc'
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors, has_more, has_prev = api.nova.flavor_list_paged(
self.request, True, False, None, paginate=paginate,
reversed_order=reversed_order)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertFalse(has_more)
self.assertFalse(has_prev)
if paginate:
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=None, limit=page_size + 1,
sort_key='name', sort_dir=order)
else:
novaclient.flavors.list.assert_called_once_with(
is_public=True)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_flavor_list_pagination_more_and_prev(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
flavors = self.flavors.list()
marker = flavors[0].id
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors[1:page_size + 2]
api_flavors, has_more, has_prev = api.nova\
.flavor_list_paged(
self.request,
True,
False,
marker,
paginate=True)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertEqual(page_size, len(api_flavors))
self.assertTrue(has_more)
self.assertTrue(has_prev)
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=marker, limit=page_size + 1,
sort_key='name', sort_dir='desc')
def test_flavor_list_paged_default_order(self):
self._test_flavor_list_paged()
def test_flavor_list_paged_reversed_order(self):
self._test_flavor_list_paged(reversed_order=True)
def test_flavor_list_paged_paginate_false(self):
self._test_flavor_list_paged(paginate=False)
def test_flavor_create(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.create.return_value = flavor
api_flavor = api.nova.flavor_create(self.request,
flavor.name,
flavor.ram,
flavor.vcpus,
flavor.disk)
self.assertIsInstance(api_flavor, type(flavor))
self.assertEqual(flavor.name, api_flavor.name)
self.assertEqual(flavor.ram, api_flavor.ram)
self.assertEqual(flavor.vcpus, api_flavor.vcpus)
self.assertEqual(flavor.disk, api_flavor.disk)
self.assertEqual(0, api_flavor.ephemeral)
self.assertEqual(0, api_flavor.swap)
self.assertTrue(api_flavor.is_public)
self.assertEqual(1, api_flavor.rxtx_factor)
novaclient.flavors.create.assert_called_once_with(
flavor.name, flavor.ram, flavor.vcpus, flavor.disk,
flavorid='auto', ephemeral=0, swap=0, is_public=True,
rxtx_factor=1)
def test_flavor_delete(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.delete.return_value = None
api_val = api.nova.flavor_delete(self.request, flavor.id)
self.assertIsNone(api_val)
novaclient.flavors.delete.assert_called_once_with(flavor.id)
def test_flavor_access_list(self):
flavor_access = self.flavor_access.list()
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.list.return_value = flavor_access
api_flavor_access = api.nova.flavor_access_list(self.request, flavor)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertIsInstance(access, nova_flavor_access.FlavorAccess)
self.assertEqual(access.flavor_id, flavor.id)
novaclient.flavor_access.list.assert_called_once_with(flavor=flavor)
def test_add_tenant_to_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.add_tenant_access.return_value = flavor_access
api_flavor_access = api.nova.add_tenant_to_flavor(self.request,
flavor,
tenant)
self.assertIsInstance(api_flavor_access, list)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertEqual(access.flavor_id, flavor.id)
self.assertEqual(access.tenant_id, tenant.id)
novaclient.flavor_access.add_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_remove_tenant_from_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.remove_tenant_access.return_value = []
api_val = api.nova.remove_tenant_from_flavor(self.request,
flavor,
tenant)
self.assertEqual(len(api_val), len([]))
self.assertIsInstance(api_val, list)
novaclient.flavor_access.remove_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_server_group_list(self):
server_groups = self.server_groups.list()
novaclient = self.stub_novaclient()
novaclient.server_groups.list.return_value = server_groups
ret_val = api.nova.server_group_list(self.request)
self.assertIsInstance(ret_val, list)
self.assertEqual(len(ret_val), len(server_groups))
novaclient.server_groups.list.assert_called_once_with()
| true | true |
f7310b7ead9624ea504e64c406cc2aff15d3c26e | 475 | py | Python | File3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | 1 | 2021-06-07T07:55:28.000Z | 2021-06-07T07:55:28.000Z | File3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | File3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | '''
Function Name : main()
Description : How To Open File & Read The Data Using Open, Read
Function Date : 15 Mar 2021
Function Author : Prasad Dangare
Input : Int
Output : Int
'''
def main():
name = input("Enter the file name that you want to Read : ")
fobj = open(name,"r") # create new file
print("Data from file is ")
print(fobj.read())
if __name__ == "__main__":
main()
| 21.590909 | 70 | 0.543158 |
def main():
name = input("Enter the file name that you want to Read : ")
fobj = open(name,"r")
print("Data from file is ")
print(fobj.read())
if __name__ == "__main__":
main()
| true | true |
f7310b85b49a3410f4d4de81bf967f2752594b51 | 2,260 | py | Python | src/recording_script_generator/app/sentence_splitting.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | src/recording_script_generator/app/sentence_splitting.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | src/recording_script_generator/app/sentence_splitting.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Optional
from recording_script_generator.app.helper import (
raise_error_if_directory_exists_and_not_overwrite,
raise_error_if_directory_not_exists)
from recording_script_generator.app.io import (load_reading_passages,
load_reading_passages_paths,
load_selection,
save_reading_passages,
save_reading_passages_paths,
save_representations,
save_selection)
from recording_script_generator.core.sentence_splitting import main_inplace
from recording_script_generator.globals import (DEFAULT_CHUNKSIZE_FILES,
DEFAULT_MAXTASKSPERCHILD,
DEFAULT_N_JOBS,
DEFAULT_OVERWRITE)
def app_split_sentences(working_directory: Path, custom_output_directory: Optional[Path] = None, n_jobs: int = DEFAULT_N_JOBS, maxtasksperchild: Optional[int] = DEFAULT_MAXTASKSPERCHILD, chunksize: Optional[int] = DEFAULT_CHUNKSIZE_FILES, overwrite: bool = DEFAULT_OVERWRITE):
if raise_error_if_directory_not_exists(working_directory):
return
output_directory = working_directory
if custom_output_directory is not None:
if raise_error_if_directory_exists_and_not_overwrite(custom_output_directory, overwrite):
return
output_directory = custom_output_directory
selection = load_selection(working_directory)
reading_passages = load_reading_passages(working_directory)
reading_passages_paths = load_reading_passages_paths(working_directory)
representations = main_inplace(selection, reading_passages, reading_passages_paths,
n_jobs, maxtasksperchild, chunksize)
save_reading_passages(output_directory, reading_passages)
save_selection(output_directory, selection)
save_reading_passages_paths(output_directory, reading_passages_paths)
save_representations(output_directory, representations)
# TODO maybe also remove unused paths from paths
| 52.55814 | 276 | 0.684071 | from pathlib import Path
from typing import Optional
from recording_script_generator.app.helper import (
raise_error_if_directory_exists_and_not_overwrite,
raise_error_if_directory_not_exists)
from recording_script_generator.app.io import (load_reading_passages,
load_reading_passages_paths,
load_selection,
save_reading_passages,
save_reading_passages_paths,
save_representations,
save_selection)
from recording_script_generator.core.sentence_splitting import main_inplace
from recording_script_generator.globals import (DEFAULT_CHUNKSIZE_FILES,
DEFAULT_MAXTASKSPERCHILD,
DEFAULT_N_JOBS,
DEFAULT_OVERWRITE)
def app_split_sentences(working_directory: Path, custom_output_directory: Optional[Path] = None, n_jobs: int = DEFAULT_N_JOBS, maxtasksperchild: Optional[int] = DEFAULT_MAXTASKSPERCHILD, chunksize: Optional[int] = DEFAULT_CHUNKSIZE_FILES, overwrite: bool = DEFAULT_OVERWRITE):
if raise_error_if_directory_not_exists(working_directory):
return
output_directory = working_directory
if custom_output_directory is not None:
if raise_error_if_directory_exists_and_not_overwrite(custom_output_directory, overwrite):
return
output_directory = custom_output_directory
selection = load_selection(working_directory)
reading_passages = load_reading_passages(working_directory)
reading_passages_paths = load_reading_passages_paths(working_directory)
representations = main_inplace(selection, reading_passages, reading_passages_paths,
n_jobs, maxtasksperchild, chunksize)
save_reading_passages(output_directory, reading_passages)
save_selection(output_directory, selection)
save_reading_passages_paths(output_directory, reading_passages_paths)
save_representations(output_directory, representations)
| true | true |
f7310ca10c88fc71c44d09d18e1abc98736f1dc9 | 10,685 | py | Python | pokemon_data.py | aroxby/pk-stadium-decoder | 71f23bcc7035fcd763e69372387becc1b4744fd0 | [
"Unlicense"
] | null | null | null | pokemon_data.py | aroxby/pk-stadium-decoder | 71f23bcc7035fcd763e69372387becc1b4744fd0 | [
"Unlicense"
] | null | null | null | pokemon_data.py | aroxby/pk-stadium-decoder | 71f23bcc7035fcd763e69372387becc1b4744fd0 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from enum import Enum
class Type(Enum):
NORMAL = 0
FIGHTING = 1
FLYING = 2
POISON = 3
GROUND = 4
ROCK = 5
BIRD = 6
BUG = 7
GHOST = 8
FIRE = 20
WATER = 21
GRASS = 22
ELECTRIC = 23
PSYCHIC = 24
ICE = 25
DRAGON = 26
def __str__(self):
MAPPING = {
self.BIRD: 'Bird',
self.BUG: 'Bug',
self.DRAGON: 'Dragon',
self.ELECTRIC: 'Electric',
self.FIGHTING: 'Fighting',
self.FIRE: 'Fire',
self.FLYING: 'Flying',
self.GHOST: 'Ghost',
self.GRASS: 'Grass',
self.GROUND: 'Ground',
self.ICE: 'Ice',
self.NORMAL: 'Normal',
self.POISON: 'Poison',
self.PSYCHIC: 'Psychic',
self.ROCK: 'Rock',
self.WATER: 'Water',
}
name = MAPPING.get(self, f'<Type {self.value}>')
return name
class Move(Enum):
NONE = 0
POUND = 1
KARATECHOP = 2
DOUBLESLAP = 3
COMETPUNCH = 4
MEGAPUNCH = 5
PAYDAY = 6
FIREPUNCH = 7
ICEPUNCH = 8
THUNDERPUNCH = 9
SCRATCH = 10
VICEGRIP = 11
GUILLOTINE = 12
RAZORWIND = 13
SWORDSDANCE = 14
CUT = 15
GUST = 16
WINGATTACK = 17
WHIRLWIND = 18
FLY = 19
BIND = 20
SLAM = 21
VINEWHIP = 22
STOMP = 23
DOUBLEKICK = 24
MEGAKICK = 25
JUMPKICK = 26
ROLLINGKICK = 27
SANDATTACK = 28
HEADBUTT = 29
HORNATTACK = 30
FURYATTACK = 31
HORNDRILL = 32
TACKLE = 33
BODYSLAM = 34
WRAP = 35
TAKEDOWN = 36
THRASH = 37
DOUBLEEDGE = 38
TAILWHIP = 39
POISONSTING = 40
TWINEEDLE = 41
PINMISSILE = 42
LEER = 43
BITE = 44
GROWL = 45
ROAR = 46
SING = 47
SUPERSONIC = 48
SONICBOOM = 49
DISABLE = 50
ACID = 51
EMBER = 52
FLAMETHROWER = 53
MIST = 54
WATERGUN = 55
HYDROPUMP = 56
SURF = 57
ICEBEAM = 58
BLIZZARD = 59
PSYBEAM = 60
BUBBLEBEAM = 61
AURORABEAM = 62
HYPERBEAM = 63
PECK = 64
DRILLPECK = 65
SUBMISSION = 66
LOWKICK = 67
COUNTER = 68
SEISMICTOSS = 69
STRENGTH = 70
ABSORB = 71
MEGADRAIN = 72
LEECHSEED = 73
GROWTH = 74
RAZORLEAF = 75
SOLARBEAM = 76
POISONPOWDER = 77
STUNSPORE = 78
SLEEPPOWDER = 79
PETALDANCE = 80
STRINGSHOT = 81
DRAGONRAGE = 82
FIRESPIN = 83
THUNDERSHOCK = 84
THUNDERBOLT = 85
THUNDERWAVE = 86
THUNDER = 87
ROCKTHROW = 88
EARTHQUAKE = 89
FISSURE = 90
DIG = 91
TOXIC = 92
CONFUSION = 93
PSYCHIC = 94
HYPNOSIS = 95
MEDITATE = 96
AGILITY = 97
QUICKATTACK = 98
RAGE = 99
TELEPORT = 100
NIGHTSHADE = 101
MIMIC = 102
SCREECH = 103
DOUBLETEAM = 104
RECOVER = 105
HARDEN = 106
MINIMIZE = 107
SMOKESCREEN = 108
CONFUSERAY = 109
WITHDRAW = 110
DEFENSECURL = 111
BARRIER = 112
LIGHTSCREEN = 113
HAZE = 114
REFLECT = 115
FOCUSENERGY = 116
BIDE = 117
METRONOME = 118
MIRRORMOVE = 119
SELFDESTRUCT = 120
EGGBOMB = 121
LICK = 122
SMOG = 123
SLUDGE = 124
BONECLUB = 125
FIREBLAST = 126
WATERFALL = 127
CLAMP = 128
SWIFT = 129
SKULLBASH = 130
SPIKECANNON = 131
CONSTRICT = 132
AMNESIA = 133
KINESIS = 134
SOFTBOILED = 135
HIJUMPKICK = 136
GLARE = 137
DREAMEATER = 138
POISONGAS = 139
BARRAGE = 140
LEECHLIFE = 141
LOVELYKISS = 142
SKYATTACK = 143
TRANSFORM = 144
BUBBLE = 145
DIZZYPUNCH = 146
SPORE = 147
FLASH = 148
PSYWAVE = 149
SPLASH = 150
ACIDARMOR = 151
CRABHAMMER = 152
EXPLOSION = 153
FURYSWIPES = 154
BONEMERANG = 155
REST = 156
ROCKSLIDE = 157
HYPERFANG = 158
SHARPEN = 159
CONVERSION = 160
TRIATTACK = 161
SUPERFANG = 162
SLASH = 163
SUBSTITUTE = 164
STRUGGLE = 165
def __str__(self):
MAPPING = {
self.NONE: '-',
self.ABSORB: 'Absorb',
self.ACIDARMOR: 'Acid Armor',
self.ACID: 'Acid',
self.AGILITY: 'Agility',
self.AMNESIA: 'Amnesia',
self.AURORABEAM: 'Aurora Beam',
self.BARRAGE: 'Barrage',
self.BARRIER: 'Barrier',
self.BIDE: 'Bide',
self.BIND: 'Bind',
self.BITE: 'Bite',
self.BLIZZARD: 'Blizzard',
self.BODYSLAM: 'Body Slam',
self.BONECLUB: 'Bone Club',
self.BONEMERANG: 'Bonemerang',
self.BUBBLE: 'Bubble',
self.BUBBLEBEAM: 'Bubblebeam',
self.CLAMP: 'Clamp',
self.COMETPUNCH: 'Comet Punch',
self.CONFUSERAY: 'Confuse Ray',
self.CONFUSION: 'Confusion',
self.CONSTRICT: 'Constrict',
self.CONVERSION: 'Conversion',
self.COUNTER: 'Counter',
self.CRABHAMMER: 'Crabhammer',
self.CUT: 'Cut',
self.DEFENSECURL: 'Defense Curl',
self.DIG: 'Dig',
self.DISABLE: 'Disable',
self.DIZZYPUNCH: 'Dizzy Punch',
self.DOUBLEKICK: 'Double Kick',
self.DOUBLETEAM: 'Double Team',
self.DOUBLEEDGE: 'Double-Edge',
self.DOUBLESLAP: 'Doubleslap',
self.DRAGONRAGE: 'Dragon Rage',
self.DREAMEATER: 'Dream Eater',
self.DRILLPECK: 'Drill Peck',
self.EARTHQUAKE: 'Earthquake',
self.EGGBOMB: 'Egg Bomb',
self.EMBER: 'Ember',
self.EXPLOSION: 'Explosion',
self.FIREBLAST: 'Fire Blast',
self.FIREPUNCH: 'Fire Punch',
self.FIRESPIN: 'Fire Spin',
self.FISSURE: 'Fissure',
self.FLAMETHROWER: 'Flamethrower',
self.FLASH: 'Flash',
self.FLY: 'Fly',
self.FOCUSENERGY: 'Focus Energy',
self.FURYATTACK: 'Fury Attack',
self.FURYSWIPES: 'Fury Swipes',
self.GLARE: 'Glare',
self.GROWL: 'Growl',
self.GROWTH: 'Growth',
self.GUILLOTINE: 'Guillotine',
self.GUST: 'Gust',
self.HARDEN: 'Harden',
self.HAZE: 'Haze',
self.HEADBUTT: 'Headbutt',
self.HIJUMPKICK: 'Hi Jump Kick',
self.HORNATTACK: 'Horn Attack',
self.HORNDRILL: 'Horn Drill',
self.HYDROPUMP: 'Hydro Pump',
self.HYPERBEAM: 'Hyper Beam',
self.HYPERFANG: 'Hyper Fang',
self.HYPNOSIS: 'Hypnosis',
self.ICEBEAM: 'Ice Beam',
self.ICEPUNCH: 'Ice Punch',
self.JUMPKICK: 'Jump Kick',
self.KARATECHOP: 'Karate Chop',
self.KINESIS: 'Kinesis',
self.LEECHLIFE: 'Leech Life',
self.LEECHSEED: 'Leech Seed',
self.LEER: 'Leer',
self.LICK: 'Lick',
self.LIGHTSCREEN: 'Light Screen',
self.LOVELYKISS: 'Lovely Kiss',
self.LOWKICK: 'Low Kick',
self.MEDITATE: 'Meditate',
self.MEGADRAIN: 'Mega Drain',
self.MEGAKICK: 'Mega Kick',
self.MEGAPUNCH: 'Mega Punch',
self.METRONOME: 'Metronome',
self.MIMIC: 'Mimic',
self.MINIMIZE: 'Minimize',
self.MIRRORMOVE: 'Mirror Move',
self.MIST: 'Mist',
self.NIGHTSHADE: 'Night Shade',
self.PAYDAY: 'Pay Day',
self.PECK: 'Peck',
self.PETALDANCE: 'Petal Dance',
self.PINMISSILE: 'Pin Missile',
self.POISONGAS: 'Poison Gas',
self.POISONSTING: 'Poison Sting',
self.POISONPOWDER: 'Poisonpowder',
self.POUND: 'Pound',
self.PSYBEAM: 'Psybeam',
self.PSYCHIC: 'Psychic',
self.PSYWAVE: 'Psywave',
self.QUICKATTACK: 'Quick Attack',
self.RAGE: 'Rage',
self.RAZORLEAF: 'Razor Leaf',
self.RAZORWIND: 'Razor Wind',
self.RECOVER: 'Recover',
self.REFLECT: 'Reflect',
self.REST: 'Rest',
self.ROAR: 'Roar',
self.ROCKSLIDE: 'Rock Slide',
self.ROCKTHROW: 'Rock Throw',
self.ROLLINGKICK: 'Rolling Kick',
self.SANDATTACK: 'Sand-Attack',
self.SCRATCH: 'Scratch',
self.SCREECH: 'Screech',
self.SEISMICTOSS: 'Seismic Toss',
self.SELFDESTRUCT: 'Selfdestruct',
self.SHARPEN: 'Sharpen',
self.SING: 'Sing',
self.SKULLBASH: 'Skull Bash',
self.SKYATTACK: 'Sky Attack',
self.SLAM: 'Slam',
self.SLASH: 'Slash',
self.SLEEPPOWDER: 'Sleep Powder',
self.SLUDGE: 'Sludge',
self.SMOG: 'Smog',
self.SMOKESCREEN: 'Smokescreen',
self.SOFTBOILED: 'Softboiled',
self.SOLARBEAM: 'Solarbeam',
self.SONICBOOM: 'Sonicboom',
self.SPIKECANNON: 'Spike Cannon',
self.SPLASH: 'Splash',
self.SPORE: 'Spore',
self.STOMP: 'Stomp',
self.STRENGTH: 'Strength',
self.STRINGSHOT: 'String Shot',
self.STRUGGLE: 'Struggle',
self.STUNSPORE: 'Stun Spore',
self.SUBMISSION: 'Submission',
self.SUBSTITUTE: 'Substitute',
self.SUPERFANG: 'Super Fang',
self.SUPERSONIC: 'Supersonic',
self.SURF: 'Surf',
self.SWIFT: 'Swift',
self.SWORDSDANCE: 'Swords Dance',
self.TACKLE: 'Tackle',
self.TAILWHIP: 'Tail Whip',
self.TAKEDOWN: 'Take Down',
self.TELEPORT: 'Teleport',
self.THRASH: 'Thrash',
self.THUNDERWAVE: 'Thunder Wave',
self.THUNDER: 'Thunder',
self.THUNDERBOLT: 'Thunderbolt',
self.THUNDERPUNCH: 'Thunderpunch',
self.THUNDERSHOCK: 'Thundershock',
self.TOXIC: 'Toxic',
self.TRANSFORM: 'Transform',
self.TRIATTACK: 'Tri Attack',
self.TWINEEDLE: 'Twineedle',
self.VICEGRIP: 'Vicegrip',
self.VINEWHIP: 'Vine Whip',
self.WATERGUN: 'Water Gun',
self.WATERFALL: 'Waterfall',
self.WHIRLWIND: 'Whirlwind',
self.WINGATTACK: 'Wing Attack',
self.WITHDRAW: 'Withdraw',
self.WRAP: 'Wrap',
}
name = MAPPING.get(self, f'<Move {self.value}>')
return name
| 27.753247 | 56 | 0.51839 |
from enum import Enum
class Type(Enum):
NORMAL = 0
FIGHTING = 1
FLYING = 2
POISON = 3
GROUND = 4
ROCK = 5
BIRD = 6
BUG = 7
GHOST = 8
FIRE = 20
WATER = 21
GRASS = 22
ELECTRIC = 23
PSYCHIC = 24
ICE = 25
DRAGON = 26
def __str__(self):
MAPPING = {
self.BIRD: 'Bird',
self.BUG: 'Bug',
self.DRAGON: 'Dragon',
self.ELECTRIC: 'Electric',
self.FIGHTING: 'Fighting',
self.FIRE: 'Fire',
self.FLYING: 'Flying',
self.GHOST: 'Ghost',
self.GRASS: 'Grass',
self.GROUND: 'Ground',
self.ICE: 'Ice',
self.NORMAL: 'Normal',
self.POISON: 'Poison',
self.PSYCHIC: 'Psychic',
self.ROCK: 'Rock',
self.WATER: 'Water',
}
name = MAPPING.get(self, f'<Type {self.value}>')
return name
class Move(Enum):
NONE = 0
POUND = 1
KARATECHOP = 2
DOUBLESLAP = 3
COMETPUNCH = 4
MEGAPUNCH = 5
PAYDAY = 6
FIREPUNCH = 7
ICEPUNCH = 8
THUNDERPUNCH = 9
SCRATCH = 10
VICEGRIP = 11
GUILLOTINE = 12
RAZORWIND = 13
SWORDSDANCE = 14
CUT = 15
GUST = 16
WINGATTACK = 17
WHIRLWIND = 18
FLY = 19
BIND = 20
SLAM = 21
VINEWHIP = 22
STOMP = 23
DOUBLEKICK = 24
MEGAKICK = 25
JUMPKICK = 26
ROLLINGKICK = 27
SANDATTACK = 28
HEADBUTT = 29
HORNATTACK = 30
FURYATTACK = 31
HORNDRILL = 32
TACKLE = 33
BODYSLAM = 34
WRAP = 35
TAKEDOWN = 36
THRASH = 37
DOUBLEEDGE = 38
TAILWHIP = 39
POISONSTING = 40
TWINEEDLE = 41
PINMISSILE = 42
LEER = 43
BITE = 44
GROWL = 45
ROAR = 46
SING = 47
SUPERSONIC = 48
SONICBOOM = 49
DISABLE = 50
ACID = 51
EMBER = 52
FLAMETHROWER = 53
MIST = 54
WATERGUN = 55
HYDROPUMP = 56
SURF = 57
ICEBEAM = 58
BLIZZARD = 59
PSYBEAM = 60
BUBBLEBEAM = 61
AURORABEAM = 62
HYPERBEAM = 63
PECK = 64
DRILLPECK = 65
SUBMISSION = 66
LOWKICK = 67
COUNTER = 68
SEISMICTOSS = 69
STRENGTH = 70
ABSORB = 71
MEGADRAIN = 72
LEECHSEED = 73
GROWTH = 74
RAZORLEAF = 75
SOLARBEAM = 76
POISONPOWDER = 77
STUNSPORE = 78
SLEEPPOWDER = 79
PETALDANCE = 80
STRINGSHOT = 81
DRAGONRAGE = 82
FIRESPIN = 83
THUNDERSHOCK = 84
THUNDERBOLT = 85
THUNDERWAVE = 86
THUNDER = 87
ROCKTHROW = 88
EARTHQUAKE = 89
FISSURE = 90
DIG = 91
TOXIC = 92
CONFUSION = 93
PSYCHIC = 94
HYPNOSIS = 95
MEDITATE = 96
AGILITY = 97
QUICKATTACK = 98
RAGE = 99
TELEPORT = 100
NIGHTSHADE = 101
MIMIC = 102
SCREECH = 103
DOUBLETEAM = 104
RECOVER = 105
HARDEN = 106
MINIMIZE = 107
SMOKESCREEN = 108
CONFUSERAY = 109
WITHDRAW = 110
DEFENSECURL = 111
BARRIER = 112
LIGHTSCREEN = 113
HAZE = 114
REFLECT = 115
FOCUSENERGY = 116
BIDE = 117
METRONOME = 118
MIRRORMOVE = 119
SELFDESTRUCT = 120
EGGBOMB = 121
LICK = 122
SMOG = 123
SLUDGE = 124
BONECLUB = 125
FIREBLAST = 126
WATERFALL = 127
CLAMP = 128
SWIFT = 129
SKULLBASH = 130
SPIKECANNON = 131
CONSTRICT = 132
AMNESIA = 133
KINESIS = 134
SOFTBOILED = 135
HIJUMPKICK = 136
GLARE = 137
DREAMEATER = 138
POISONGAS = 139
BARRAGE = 140
LEECHLIFE = 141
LOVELYKISS = 142
SKYATTACK = 143
TRANSFORM = 144
BUBBLE = 145
DIZZYPUNCH = 146
SPORE = 147
FLASH = 148
PSYWAVE = 149
SPLASH = 150
ACIDARMOR = 151
CRABHAMMER = 152
EXPLOSION = 153
FURYSWIPES = 154
BONEMERANG = 155
REST = 156
ROCKSLIDE = 157
HYPERFANG = 158
SHARPEN = 159
CONVERSION = 160
TRIATTACK = 161
SUPERFANG = 162
SLASH = 163
SUBSTITUTE = 164
STRUGGLE = 165
def __str__(self):
MAPPING = {
self.NONE: '-',
self.ABSORB: 'Absorb',
self.ACIDARMOR: 'Acid Armor',
self.ACID: 'Acid',
self.AGILITY: 'Agility',
self.AMNESIA: 'Amnesia',
self.AURORABEAM: 'Aurora Beam',
self.BARRAGE: 'Barrage',
self.BARRIER: 'Barrier',
self.BIDE: 'Bide',
self.BIND: 'Bind',
self.BITE: 'Bite',
self.BLIZZARD: 'Blizzard',
self.BODYSLAM: 'Body Slam',
self.BONECLUB: 'Bone Club',
self.BONEMERANG: 'Bonemerang',
self.BUBBLE: 'Bubble',
self.BUBBLEBEAM: 'Bubblebeam',
self.CLAMP: 'Clamp',
self.COMETPUNCH: 'Comet Punch',
self.CONFUSERAY: 'Confuse Ray',
self.CONFUSION: 'Confusion',
self.CONSTRICT: 'Constrict',
self.CONVERSION: 'Conversion',
self.COUNTER: 'Counter',
self.CRABHAMMER: 'Crabhammer',
self.CUT: 'Cut',
self.DEFENSECURL: 'Defense Curl',
self.DIG: 'Dig',
self.DISABLE: 'Disable',
self.DIZZYPUNCH: 'Dizzy Punch',
self.DOUBLEKICK: 'Double Kick',
self.DOUBLETEAM: 'Double Team',
self.DOUBLEEDGE: 'Double-Edge',
self.DOUBLESLAP: 'Doubleslap',
self.DRAGONRAGE: 'Dragon Rage',
self.DREAMEATER: 'Dream Eater',
self.DRILLPECK: 'Drill Peck',
self.EARTHQUAKE: 'Earthquake',
self.EGGBOMB: 'Egg Bomb',
self.EMBER: 'Ember',
self.EXPLOSION: 'Explosion',
self.FIREBLAST: 'Fire Blast',
self.FIREPUNCH: 'Fire Punch',
self.FIRESPIN: 'Fire Spin',
self.FISSURE: 'Fissure',
self.FLAMETHROWER: 'Flamethrower',
self.FLASH: 'Flash',
self.FLY: 'Fly',
self.FOCUSENERGY: 'Focus Energy',
self.FURYATTACK: 'Fury Attack',
self.FURYSWIPES: 'Fury Swipes',
self.GLARE: 'Glare',
self.GROWL: 'Growl',
self.GROWTH: 'Growth',
self.GUILLOTINE: 'Guillotine',
self.GUST: 'Gust',
self.HARDEN: 'Harden',
self.HAZE: 'Haze',
self.HEADBUTT: 'Headbutt',
self.HIJUMPKICK: 'Hi Jump Kick',
self.HORNATTACK: 'Horn Attack',
self.HORNDRILL: 'Horn Drill',
self.HYDROPUMP: 'Hydro Pump',
self.HYPERBEAM: 'Hyper Beam',
self.HYPERFANG: 'Hyper Fang',
self.HYPNOSIS: 'Hypnosis',
self.ICEBEAM: 'Ice Beam',
self.ICEPUNCH: 'Ice Punch',
self.JUMPKICK: 'Jump Kick',
self.KARATECHOP: 'Karate Chop',
self.KINESIS: 'Kinesis',
self.LEECHLIFE: 'Leech Life',
self.LEECHSEED: 'Leech Seed',
self.LEER: 'Leer',
self.LICK: 'Lick',
self.LIGHTSCREEN: 'Light Screen',
self.LOVELYKISS: 'Lovely Kiss',
self.LOWKICK: 'Low Kick',
self.MEDITATE: 'Meditate',
self.MEGADRAIN: 'Mega Drain',
self.MEGAKICK: 'Mega Kick',
self.MEGAPUNCH: 'Mega Punch',
self.METRONOME: 'Metronome',
self.MIMIC: 'Mimic',
self.MINIMIZE: 'Minimize',
self.MIRRORMOVE: 'Mirror Move',
self.MIST: 'Mist',
self.NIGHTSHADE: 'Night Shade',
self.PAYDAY: 'Pay Day',
self.PECK: 'Peck',
self.PETALDANCE: 'Petal Dance',
self.PINMISSILE: 'Pin Missile',
self.POISONGAS: 'Poison Gas',
self.POISONSTING: 'Poison Sting',
self.POISONPOWDER: 'Poisonpowder',
self.POUND: 'Pound',
self.PSYBEAM: 'Psybeam',
self.PSYCHIC: 'Psychic',
self.PSYWAVE: 'Psywave',
self.QUICKATTACK: 'Quick Attack',
self.RAGE: 'Rage',
self.RAZORLEAF: 'Razor Leaf',
self.RAZORWIND: 'Razor Wind',
self.RECOVER: 'Recover',
self.REFLECT: 'Reflect',
self.REST: 'Rest',
self.ROAR: 'Roar',
self.ROCKSLIDE: 'Rock Slide',
self.ROCKTHROW: 'Rock Throw',
self.ROLLINGKICK: 'Rolling Kick',
self.SANDATTACK: 'Sand-Attack',
self.SCRATCH: 'Scratch',
self.SCREECH: 'Screech',
self.SEISMICTOSS: 'Seismic Toss',
self.SELFDESTRUCT: 'Selfdestruct',
self.SHARPEN: 'Sharpen',
self.SING: 'Sing',
self.SKULLBASH: 'Skull Bash',
self.SKYATTACK: 'Sky Attack',
self.SLAM: 'Slam',
self.SLASH: 'Slash',
self.SLEEPPOWDER: 'Sleep Powder',
self.SLUDGE: 'Sludge',
self.SMOG: 'Smog',
self.SMOKESCREEN: 'Smokescreen',
self.SOFTBOILED: 'Softboiled',
self.SOLARBEAM: 'Solarbeam',
self.SONICBOOM: 'Sonicboom',
self.SPIKECANNON: 'Spike Cannon',
self.SPLASH: 'Splash',
self.SPORE: 'Spore',
self.STOMP: 'Stomp',
self.STRENGTH: 'Strength',
self.STRINGSHOT: 'String Shot',
self.STRUGGLE: 'Struggle',
self.STUNSPORE: 'Stun Spore',
self.SUBMISSION: 'Submission',
self.SUBSTITUTE: 'Substitute',
self.SUPERFANG: 'Super Fang',
self.SUPERSONIC: 'Supersonic',
self.SURF: 'Surf',
self.SWIFT: 'Swift',
self.SWORDSDANCE: 'Swords Dance',
self.TACKLE: 'Tackle',
self.TAILWHIP: 'Tail Whip',
self.TAKEDOWN: 'Take Down',
self.TELEPORT: 'Teleport',
self.THRASH: 'Thrash',
self.THUNDERWAVE: 'Thunder Wave',
self.THUNDER: 'Thunder',
self.THUNDERBOLT: 'Thunderbolt',
self.THUNDERPUNCH: 'Thunderpunch',
self.THUNDERSHOCK: 'Thundershock',
self.TOXIC: 'Toxic',
self.TRANSFORM: 'Transform',
self.TRIATTACK: 'Tri Attack',
self.TWINEEDLE: 'Twineedle',
self.VICEGRIP: 'Vicegrip',
self.VINEWHIP: 'Vine Whip',
self.WATERGUN: 'Water Gun',
self.WATERFALL: 'Waterfall',
self.WHIRLWIND: 'Whirlwind',
self.WINGATTACK: 'Wing Attack',
self.WITHDRAW: 'Withdraw',
self.WRAP: 'Wrap',
}
name = MAPPING.get(self, f'<Move {self.value}>')
return name
| true | true |
f7310cbb1ac1ab0317dcf30418fa49e6095d48e7 | 2,760 | py | Python | maps/ors.py | sackh/maps-cli | 64cc1877518c88bc9b885ebc22580b595bee6fcc | [
"MIT"
] | 5 | 2021-01-21T08:19:43.000Z | 2021-12-12T06:20:53.000Z | maps/ors.py | sackh/maps-cli | 64cc1877518c88bc9b885ebc22580b595bee6fcc | [
"MIT"
] | null | null | null | maps/ors.py | sackh/maps-cli | 64cc1877518c88bc9b885ebc22580b595bee6fcc | [
"MIT"
] | null | null | null | """This module defines all the ORS(https://openrouteservice.org/services/) commands."""
import os
import click
import openrouteservice as opnrs
import simplejson as json
from geojsonio import display as geo_display
from maps.exceptions import ApiKeyNotFoundError
from maps.utils import yield_subcommands
@click.group()
@click.pass_context
def ors(ctx):
"""ORS (https://openrouteservice.org/) provider."""
ctx.obj = {}
@ors.command()
def show():
"""show list of all sub commands."""
for sub in yield_subcommands(ors):
click.secho(sub, fg="green")
@ors.command(short_help="forward or reverse geocode for an address or coordinates.")
@click.argument("query", required=True)
@click.option("--apikey", help="Your ORS API key", type=str)
@click.option(
"--forward/--reverse",
default=True,
show_default=True,
help="Perform a forward or reverse geocode",
)
@click.option("--raw", is_flag=True)
@click.option("--display", help="Display result in browser", is_flag=True)
@click.pass_context
def geocoding(ctx, query, apikey, forward, raw, display):
"""
Open Route Service geocoding service.
\f
:param ctx: A context dictionary.
:param query: A string to represent address query for geocoding.
:param apikey: An API key for authentication.
:param forward: A boolean flag for forward/reverse geocoding.
:param raw: A boolean flag to show api response as it is.
:param display: A boolean flag to show result in web browser.
:return: None.
"""
apikey = apikey or os.environ.get("ORS_APIKEY")
if apikey is None:
raise ApiKeyNotFoundError(
"Please pass Open Route Service API KEY as --apikey or set it as environment "
"variable in ORS_APIKEY "
)
ctx.obj["apikey"] = apikey
geolocator = opnrs.Client(key=ctx.obj["apikey"])
if forward:
geocode = geolocator.pelias_search(text=query)
if raw:
click.secho(json.dumps(geocode, indent=2), fg="green")
elif display:
geocode.pop("geocoding")
geo_display(json.dumps(geocode))
else:
for feature in geocode["features"]:
coords = feature["geometry"]["coordinates"]
result = {"lat": coords[1], "lon": coords[0]}
click.secho(json.dumps(result, indent=2), fg="green")
else:
coordinate = query.split(",")
reverse = geolocator.pelias_reverse(point=coordinate, validate=False)
if raw:
for result in reverse["features"]:
click.secho(json.dumps(result, indent=2), fg="green")
else:
for result in reverse["features"]:
click.secho(result["properties"]["label"], fg="green")
| 34.074074 | 90 | 0.648913 | import os
import click
import openrouteservice as opnrs
import simplejson as json
from geojsonio import display as geo_display
from maps.exceptions import ApiKeyNotFoundError
from maps.utils import yield_subcommands
@click.group()
@click.pass_context
def ors(ctx):
ctx.obj = {}
@ors.command()
def show():
for sub in yield_subcommands(ors):
click.secho(sub, fg="green")
@ors.command(short_help="forward or reverse geocode for an address or coordinates.")
@click.argument("query", required=True)
@click.option("--apikey", help="Your ORS API key", type=str)
@click.option(
"--forward/--reverse",
default=True,
show_default=True,
help="Perform a forward or reverse geocode",
)
@click.option("--raw", is_flag=True)
@click.option("--display", help="Display result in browser", is_flag=True)
@click.pass_context
def geocoding(ctx, query, apikey, forward, raw, display):
apikey = apikey or os.environ.get("ORS_APIKEY")
if apikey is None:
raise ApiKeyNotFoundError(
"Please pass Open Route Service API KEY as --apikey or set it as environment "
"variable in ORS_APIKEY "
)
ctx.obj["apikey"] = apikey
geolocator = opnrs.Client(key=ctx.obj["apikey"])
if forward:
geocode = geolocator.pelias_search(text=query)
if raw:
click.secho(json.dumps(geocode, indent=2), fg="green")
elif display:
geocode.pop("geocoding")
geo_display(json.dumps(geocode))
else:
for feature in geocode["features"]:
coords = feature["geometry"]["coordinates"]
result = {"lat": coords[1], "lon": coords[0]}
click.secho(json.dumps(result, indent=2), fg="green")
else:
coordinate = query.split(",")
reverse = geolocator.pelias_reverse(point=coordinate, validate=False)
if raw:
for result in reverse["features"]:
click.secho(json.dumps(result, indent=2), fg="green")
else:
for result in reverse["features"]:
click.secho(result["properties"]["label"], fg="green")
| true | true |
f7310d1859fb85e6d93daf01566a93a660d350f0 | 56 | py | Python | virtual/lib/python3.6/site-packages/imagekit/forms/__init__.py | kenmutuma001/galleria | 1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72 | [
"Unlicense"
] | 2 | 2019-04-15T10:28:42.000Z | 2019-04-26T21:48:17.000Z | virtual/lib/python3.6/site-packages/imagekit/forms/__init__.py | kenmutuma001/galleria | 1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72 | [
"Unlicense"
] | 12 | 2020-02-12T00:25:14.000Z | 2022-03-11T23:48:53.000Z | virtual/lib/python3.6/site-packages/imagekit/forms/__init__.py | kenmutuma001/galleria | 1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72 | [
"Unlicense"
] | 1 | 2021-05-24T10:19:13.000Z | 2021-05-24T10:19:13.000Z | # flake8: noqa
from .fields import ProcessedImageField
| 14 | 39 | 0.803571 |
from .fields import ProcessedImageField
| true | true |
f7310fabca6b6ef898997efb6b048ead96681b15 | 4,598 | py | Python | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 29 | 2018-09-19T01:16:27.000Z | 2022-03-29T14:35:36.000Z | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-04-12T18:37:36.000Z | 2022-02-10T00:27:55.000Z | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-03-05T23:44:11.000Z | 2022-03-18T07:29:31.000Z | from copy import deepcopy
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pdb
def binarize_labels(true_labels, pred_labels):
srcids = list(pred_labels.keys())
tot_labels = [list(labels) for labels in
list(pred_labels.values()) + list(true_labels.values())]
mlb = MultiLabelBinarizer().fit(tot_labels)
pred_mat = mlb.transform(pred_labels.values())
true_mat = mlb.transform(true_labels.values())
return true_mat, pred_mat
def get_micro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_micro_f1_mat(true_mat, pred_mat)
def get_macro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_macro_f1_mat(true_mat, pred_mat)
def get_macro_f1_mat(true_mat, pred_mat):
assert true_mat.shape == pred_mat.shape
f1s = []
for i in range(0, true_mat.shape[1]):
if 1 not in true_mat[:,i]:
continue
f1 = f1_score(true_mat[:,i], pred_mat[:,i])
f1s.append(f1)
return np.mean(f1s)
def get_multiclass_micro_f1(true_labels, pred_labels):
le = LabelEncoder()
#pred_mat, true_mat = binarize_labels(true_labels, pred_labels)
#f1_custom = get_micro_f1_mat(true_mat, pred_mat)
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='micro')
#f1_weighted = f1_score(true_encoded, pred_encoded, average='weighted')
#pdb.set_trace()
return f1_micro
def get_multiclass_macro_f1(true_labels, pred_labels):
le = LabelEncoder()
#pred_mat, true_mat = binarize_labels(true_labels, pred_labels)
#f1_custom = get_micro_f1_mat(true_mat, pred_mat)
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='macro')
#f1_weighted = f1_score(true_encoded, pred_encoded, average='weighted')
#pdb.set_trace()
return f1_micro
def get_micro_f1_mat(true_mat, pred_mat):
TP = np.sum(np.bitwise_and(true_mat==1, pred_mat==1))
TN = np.sum(np.bitwise_and(true_mat==0, pred_mat==0))
FN = np.sum(np.bitwise_and(true_mat==1, pred_mat==0))
FP = np.sum(np.bitwise_and(true_mat==0, pred_mat==1))
micro_prec = TP / (TP + FP)
micro_rec = TP / (TP + FN)
return 2 * micro_prec * micro_rec / (micro_prec + micro_rec)
def get_point_accuracy(true_tagsets, pred_tagsets):
target_srcids = pred_tagsets.keys()
return sum([true_tagsets[srcid].lower() == pred_tagsets[srcid].lower()
for srcid in target_srcids]) / len(target_srcids)
def get_accuracy(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(pred_tagsets)
true = set(true_tagsets_sets[srcid])
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def exclude_common_tagsets(tagsets):
return [tagset for tagset in tagsets
if tagset.split('-')[0] != 'networkadapter' and
tagset.split('-')[0] != 'building'
]
def get_accuracy_conservative(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(exclude_common_tagsets(pred_tagsets))
true = set(exclude_common_tagsets(true_tagsets_sets[srcid]))
if len(true) == 0:
jaccard = 1
else:
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def get_set_accuracy(true_label_sets, pred_tagset_sets):
# Accuracy per sample = #intersection / #union
# Accuracy over set = average of the accuracy per sample
# Input params dictionary based on the srcids
for srcid, pred_tagset_set in pred_tagset_sets.items():
pass #TODO
| 38.966102 | 75 | 0.707916 | from copy import deepcopy
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pdb
def binarize_labels(true_labels, pred_labels):
srcids = list(pred_labels.keys())
tot_labels = [list(labels) for labels in
list(pred_labels.values()) + list(true_labels.values())]
mlb = MultiLabelBinarizer().fit(tot_labels)
pred_mat = mlb.transform(pred_labels.values())
true_mat = mlb.transform(true_labels.values())
return true_mat, pred_mat
def get_micro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_micro_f1_mat(true_mat, pred_mat)
def get_macro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_macro_f1_mat(true_mat, pred_mat)
def get_macro_f1_mat(true_mat, pred_mat):
assert true_mat.shape == pred_mat.shape
f1s = []
for i in range(0, true_mat.shape[1]):
if 1 not in true_mat[:,i]:
continue
f1 = f1_score(true_mat[:,i], pred_mat[:,i])
f1s.append(f1)
return np.mean(f1s)
def get_multiclass_micro_f1(true_labels, pred_labels):
le = LabelEncoder()
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='micro')
return f1_micro
def get_multiclass_macro_f1(true_labels, pred_labels):
le = LabelEncoder()
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='macro')
return f1_micro
def get_micro_f1_mat(true_mat, pred_mat):
TP = np.sum(np.bitwise_and(true_mat==1, pred_mat==1))
TN = np.sum(np.bitwise_and(true_mat==0, pred_mat==0))
FN = np.sum(np.bitwise_and(true_mat==1, pred_mat==0))
FP = np.sum(np.bitwise_and(true_mat==0, pred_mat==1))
micro_prec = TP / (TP + FP)
micro_rec = TP / (TP + FN)
return 2 * micro_prec * micro_rec / (micro_prec + micro_rec)
def get_point_accuracy(true_tagsets, pred_tagsets):
target_srcids = pred_tagsets.keys()
return sum([true_tagsets[srcid].lower() == pred_tagsets[srcid].lower()
for srcid in target_srcids]) / len(target_srcids)
def get_accuracy(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(pred_tagsets)
true = set(true_tagsets_sets[srcid])
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def exclude_common_tagsets(tagsets):
return [tagset for tagset in tagsets
if tagset.split('-')[0] != 'networkadapter' and
tagset.split('-')[0] != 'building'
]
def get_accuracy_conservative(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(exclude_common_tagsets(pred_tagsets))
true = set(exclude_common_tagsets(true_tagsets_sets[srcid]))
if len(true) == 0:
jaccard = 1
else:
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def get_set_accuracy(true_label_sets, pred_tagset_sets):
ed_tagset_set in pred_tagset_sets.items():
pass
| true | true |
f7310fbb1474ae83999ab53c3f7d66fcd8c2abb3 | 136,240 | py | Python | Lib/test/test_codecs.py | nkhandare/python31all- | c6c792f2db5938def0261378acb5cf1de440ff43 | [
"bzip2-1.0.6"
] | 33 | 2021-07-25T14:23:35.000Z | 2022-03-31T00:17:30.000Z | Lib/test/test_codecs.py | nkhandare/python31all- | c6c792f2db5938def0261378acb5cf1de440ff43 | [
"bzip2-1.0.6"
] | 32 | 2019-04-26T12:29:36.000Z | 2022-03-08T14:24:30.000Z | Lib/test/test_codecs.py | val-verde/cpython | 17aa701d799d5e071d83205d877f722f1498a09f | [
"0BSD"
] | 3 | 2019-11-12T15:21:58.000Z | 2020-09-04T14:27:55.000Z | import codecs
import contextlib
import io
import locale
import sys
import unittest
import encodings
from unittest import mock
from test import support
from test.support import os_helper
from test.support import warnings_helper
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
# On small versions of Windows like Windows IoT or Windows Nano Server not all codepages are present
def is_code_page_present(cp):
from ctypes import POINTER, WINFUNCTYPE, WinDLL
from ctypes.wintypes import BOOL, UINT, BYTE, WCHAR, UINT, DWORD
MAX_LEADBYTES = 12 # 5 ranges, 2 bytes ea., 0 term.
MAX_DEFAULTCHAR = 2 # single or double byte
MAX_PATH = 260
class CPINFOEXW(ctypes.Structure):
_fields_ = [("MaxCharSize", UINT),
("DefaultChar", BYTE*MAX_DEFAULTCHAR),
("LeadByte", BYTE*MAX_LEADBYTES),
("UnicodeDefaultChar", WCHAR),
("CodePage", UINT),
("CodePageName", WCHAR*MAX_PATH)]
prototype = WINFUNCTYPE(BOOL, UINT, DWORD, POINTER(CPINFOEXW))
GetCPInfoEx = prototype(("GetCPInfoExW", WinDLL("kernel32")))
info = CPINFOEXW()
return GetCPInfoEx(cp, 0, info)
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using an incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #32110: Test readline() followed by read(n)
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(1), lines[1][0])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read(n) followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #32110: Test read(n) followed by read(n)
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(1), data[5])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[6:106])
# Issue #12446: Test read(n) followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "namereplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
# sequential surrogate characters
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "replace"),
"[??]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
backslashreplace = ''.join('\\x%02x' % b
for b in self.ill_formed_sequence)
self.assertEqual(test_sequence.decode(self.encoding, "backslashreplace"),
before + backslashreplace + after)
def test_incremental_surrogatepass(self):
# Test incremental decoder for surrogatepass handler:
# see issue #24214
# High surrogate
data = '\uD901'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:], True), '\uD901')
# Low surrogate
data = '\uDC02'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:]), '\uDC02')
class UTF32Test(ReadTest, unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest, unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest, unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest, unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(s)
with warnings_helper.check_warnings(('', DeprecationWarning)):
reader = codecs.open(os_helper.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest, unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest, unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
BOM = b''
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode(self.encoding, error_handler),
expected)
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode(self.encoding, "surrogateescape"),
self.BOM + b'[\x80]')
with self.assertRaises(UnicodeEncodeError) as cm:
"[\uDC80\uD800\uDFFF]".encode(self.encoding, "surrogateescape")
exc = cm.exception
self.assertEqual(exc.object[exc.start:exc.end], '\uD800\uDFFF')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode(self.encoding, "surrogatepass"),
self.BOM + b"abc\xed\xa0\x80def")
self.assertEqual("\U00010fff\uD800".encode(self.encoding, "surrogatepass"),
self.BOM + b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "surrogatepass"),
self.BOM + b'[\xed\xa0\x80\xed\xb2\x80]')
self.assertEqual(b"abc\xed\xa0\x80def".decode(self.encoding, "surrogatepass"),
"abc\ud800def")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode(self.encoding, "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode(self.encoding, "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode(self.encoding, "surrogatepass")
def test_incremental_errors(self):
# Test that the incremental decoder can fail with final=False.
# See issue #24214
cases = [b'\x80', b'\xBF', b'\xC0', b'\xC1', b'\xF5', b'\xF6', b'\xFF']
for prefix in (b'\xC2', b'\xDF', b'\xE0', b'\xE0\xA0', b'\xEF',
b'\xEF\xBF', b'\xF0', b'\xF0\x90', b'\xF0\x90\x80',
b'\xF4', b'\xF4\x8F', b'\xF4\x8F\xBF'):
for suffix in b'\x7F', b'\xC0':
cases.append(prefix + suffix)
cases.extend((b'\xE0\x80', b'\xE0\x9F', b'\xED\xA0\x80',
b'\xED\xBF\xBF', b'\xF0\x80', b'\xF0\x8F', b'\xF4\x90'))
for data in cases:
with self.subTest(data=data):
dec = codecs.getincrementaldecoder(self.encoding)()
self.assertRaises(UnicodeDecodeError, dec.decode, data)
class UTF7Test(ReadTest, unittest.TestCase):
encoding = "utf-7"
def test_ascii(self):
# Set D (directly encoded characters)
set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'\'(),-./:?')
self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii'))
self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d)
# Set O (optional direct characters)
set_o = ' !"#$%&*;<=>@[]^_`{|}'
self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii'))
self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o)
# +
self.assertEqual('a+b'.encode(self.encoding), b'a+-b')
self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b')
# White spaces
ws = ' \t\n\r'
self.assertEqual(ws.encode(self.encoding), ws.encode('ascii'))
self.assertEqual(ws.encode('ascii').decode(self.encoding), ws)
# Other ASCII characters
other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) -
set(set_d + set_o + '+' + ws)))
self.assertEqual(other_ascii.encode(self.encoding),
b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'\xffb', '\ufffdb'),
(b'a\xffb', 'a\ufffdb'),
(b'a\xff\xffb', 'a\ufffd\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+IKw-b\xff', 'a\u20acb\ufffd'),
(b'a+IKw\xffb', 'a\u20ac\ufffdb'),
(b'a+@b', 'a\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0')
self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-')
self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding),
b'+IKwgrNgB3KA-')
self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
def test_lone_surrogates(self):
tests = [
(b'a+2AE-b', 'a\ud801b'),
(b'a+2AE\xffb', 'a\ufffdb'),
(b'a+2AE', 'a\ufffd'),
(b'a+2AEA-b', 'a\ufffdb'),
(b'a+2AH-b', 'a\ufffdb'),
(b'a+IKzYAQ-b', 'a\u20ac\ud801b'),
(b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'),
(b'a+IKzYAQA-b', 'a\u20ac\ufffdb'),
(b'a+IKzYAd-b', 'a\u20ac\ufffdb'),
(b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'),
(b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'),
(b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(UTF8Test, unittest.TestCase):
encoding = "utf-8-sig"
BOM = codecs.BOM_UTF8
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
self.assertEqual(codecs.escape_decode(bytearray()), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", b"[\\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\x410]", b"[A0]")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, b"\\" + b)
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), b"\\" + b.upper())
with self.assertWarns(DeprecationWarning):
check(br"\8", b"\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", b"\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", b"\\\xfa")
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
def test_decode_invalid(self):
testcases = [
(b"xn--w&", "strict", UnicodeError()),
(b"xn--w&", "ignore", "xn-"),
]
for puny, errors, expected in testcases:
with self.subTest(puny=puny, errors=errors):
if isinstance(expected, Exception):
self.assertRaises(UnicodeError, puny.decode, "punycode", errors)
else:
self.assertEqual(puny.decode("punycode", errors), expected)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
def test_errors(self):
"""Only supports "strict" error handler"""
"python.org".encode("idna", "strict")
b"python.org".decode("idna", "strict")
for errors in ("ignore", "replace", "backslashreplace",
"surrogateescape"):
self.assertRaises(Exception, "python.org".encode, "idna", errors)
self.assertRaises(Exception,
b"python.org".decode, "idna", errors)
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
# test keywords
self.assertEqual(codecs.decode(obj=b'\xe4\xf6\xfc', encoding='latin-1'),
'\xe4\xf6\xfc')
self.assertEqual(codecs.decode(b'[\xff]', 'ascii', errors='ignore'),
'[]')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
# test keywords
self.assertEqual(codecs.encode(obj='\xe4\xf6\xfc', encoding='latin-1'),
b'\xe4\xf6\xfc')
self.assertEqual(codecs.encode('[\xff]', 'ascii', errors='ignore'),
b'[]')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_unregister(self):
name = "nonexistent_codec_name"
search_function = mock.Mock()
codecs.register(search_function)
self.assertRaises(TypeError, codecs.lookup, name)
search_function.assert_called_with(name)
search_function.reset_mock()
codecs.unregister(search_function)
self.assertRaises(LookupError, codecs.lookup, name)
search_function.assert_not_called()
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
def test_all(self):
api = (
"encode", "decode",
"register", "CodecInfo", "Codec", "IncrementalEncoder",
"IncrementalDecoder", "StreamReader", "StreamWriter", "lookup",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"register_error", "lookup_error",
"strict_errors", "replace_errors", "ignore_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"namereplace_errors",
"open", "EncodedFile",
"iterencode", "iterdecode",
"BOM", "BOM_BE", "BOM_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_BE", "BOM_UTF16_LE",
"BOM_UTF32", "BOM_UTF32_BE", "BOM_UTF32_LE",
"BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", # Undocumented
"StreamReaderWriter", "StreamRecoder",
)
self.assertCountEqual(api, codecs.__all__)
for api in codecs.__all__:
getattr(codecs, api)
def test_open(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for mode in ('w', 'r', 'r+', 'w+', 'a', 'a+'):
with self.subTest(mode), \
codecs.open(os_helper.TESTFN, mode, 'ascii') as file:
self.assertIsInstance(file, codecs.StreamReaderWriter)
def test_undefined(self):
self.assertRaises(UnicodeError, codecs.encode, 'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.encode, '', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'', 'undefined')
for errors in ('strict', 'ignore', 'replace', 'backslashreplace'):
self.assertRaises(UnicodeError,
codecs.encode, 'abc', 'undefined', errors)
self.assertRaises(UnicodeError,
codecs.decode, b'abc', 'undefined', errors)
def test_file_closes_if_lookup_error_raised(self):
mock_open = mock.mock_open()
with mock.patch('builtins.open', mock_open) as file:
with self.assertRaises(LookupError):
codecs.open(os_helper.TESTFN, 'wt', 'invalid-encoding')
file().close.assert_called()
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_t",
"koi8_u",
"kz1048",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
if hasattr(codecs, "oem_encode"):
all_unicode_encodings.append("oem")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_stateful = [
"punycode",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_stateful:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab\ufffe"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: None}),
("ab\\x02", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: -2}
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: 999999999}
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
self.assertTrue(f.closed)
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding a unicode string is supported and gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
class UnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", r"[\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtuvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, "\\" + chr(i))
if b.upper() not in b'UN':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), "\\" + chr(i-32))
with self.assertWarns(DeprecationWarning):
check(br"\8", "\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", "\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", "\\\xfa")
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'',
'',
'',
'\x00',
'\x00',
'\x00\t',
'\x00\t',
'\x00\t\n',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class RawUnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "raw-unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'\x00',
'\x00\t',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class EscapeEncodeTest(unittest.TestCase):
def test_escape_encode(self):
tests = [
(b'', (b'', 0)),
(b'foobar', (b'foobar', 6)),
(b'spam\0eggs', (b'spam\\x00eggs', 9)),
(b'a\'b', (b"a\\'b", 3)),
(b'b\\c', (b'b\\\\c', 3)),
(b'c\nd', (b'c\\nd', 3)),
(b'd\re', (b'd\\re', 3)),
(b'f\x7fg', (b'f\\x7fg', 3)),
]
for data, output in tests:
with self.subTest(data=data):
self.assertEqual(codecs.escape_encode(data), output)
self.assertRaises(TypeError, codecs.escape_encode, 'spam')
self.assertRaises(TypeError, codecs.escape_encode, bytearray(b'spam'))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
# We check all the transform codecs accept memoryview input
# for encoding and decoding
# and also that they roundtrip correctly
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_denylists_binary_transforms(self):
# Check binary -> binary codecs give a good error for str input
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = (r"{!r} is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_denylists_text_transforms(self):
# Check str.encode gives a good error message for str -> str codecs
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_denylists_binary_transforms(self):
# Check bytes.decode and bytearray.decode give a good error
# message for binary -> binary codecs
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_denylists_text_transforms(self):
# Check str -> str codec gives a good error for binary input
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
# Check zlib codec gives a good error for malformed input
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
# Check hex codec gives a good error for malformed input
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Unfortunately, the bz2 module throws OSError, which the codec
# machinery currently can't wrap :(
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
def test_quopri_stateless(self):
# Should encode with quotetabs=True
encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
self.assertEqual(encoded, b"space=20tab=09eol=20\n")
# But should still support unescaped tabs and spaces
unescaped = b"space tab eol\n"
self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, b"", "uu-codec")
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple search functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
class ExceptionChainingTest(unittest.TestCase):
def setUp(self):
self.codec_name = 'exception_chaining_test'
codecs.register(_get_test_codec)
self.addCleanup(codecs.unregister, _get_test_codec)
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
# recreate the codec entry) and regrtest refleak hunting (which
# runs the same test instance multiple times). This means we
# need to ensure the codecs call back in to the instance to find
# out which exception to raise rather than binding them in a
# closure to an object that may change on the next run
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
# Issue #22166: Also pop from caches to avoid appearance of ref leaks
encodings._cache.pop(self.codec_name, None)
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
# Helper to dynamically change the object raised by a test codec
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
# http://bugs.python.org/issue19609
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
# The initial codec lookup should not be wrapped
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# The stdlib non-text codecs are now marked so they're
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
# sure the case where an inappropriate output type is produced is
# handled appropriately
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
# No input or output type checks on the codecs module functions
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
# Text model methods should complain
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
r"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
r"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
class CodePageTest(unittest.TestCase):
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00', 'strict', True)
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff', 'strict', True)
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors, True)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors, True)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'namereplace',
b'[\\N{LATIN SMALL LETTER Y WITH DIAERESIS}]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
('\udcff', 'strict', None),
('[\udcff]', 'surrogateescape', b'[\xff]'),
('[\udcff]', 'surrogatepass', None),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'backslashreplace', '[\\xff]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'[\xff]', 'surrogatepass', None),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
(b'\x81\x00abc', 'backslashreplace', '\\x81\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
# test error handlers
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
('\udc98', 'surrogateescape', b'\x98'),
('\udc98', 'surrogatepass', None),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_code_page_decode_flags(self):
# Issue #36312: For some code pages (e.g. UTF-7) flags for
# MultiByteToWideChar() must be set to 0.
if support.verbose:
sys.stdout.write('\n')
for cp in (50220, 50221, 50222, 50225, 50227, 50229,
*range(57002, 57011+1), 65000):
# On small versions of Windows like Windows IoT
# not all codepages are present.
# A missing codepage causes an OSError exception
# so check for the codepage before decoding
if is_code_page_present(cp):
self.assertEqual(codecs.code_page_decode(cp, b'abc'), ('abc', 3), f'cp{cp}')
else:
if support.verbose:
print(f" skipping cp={cp}")
self.assertEqual(codecs.code_page_decode(42, b'abc'),
('\uf061\uf062\uf063', 3))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
def test_mbcs_alias(self):
# Check that looking up our 'default' codepage will return
# mbcs when we don't have a more specific one available
with mock.patch('_winapi.GetACP', return_value=123):
codec = codecs.lookup('cp123')
self.assertEqual(codec.name, 'mbcs')
@support.bigmemtest(size=2**31, memuse=7, dry_run=False)
def test_large_input(self, size):
# Test input longer than INT_MAX.
# Input should contain undecodable bytes before and after
# the INT_MAX limit.
encoded = (b'01234567' * ((size//8)-1) +
b'\x85\x86\xea\xeb\xec\xef\xfc\xfd\xfe\xff')
self.assertEqual(len(encoded), size+2)
decoded = codecs.code_page_decode(932, encoded, 'surrogateescape', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), decoded[1])
self.assertEqual(decoded[0][:10], '0123456701')
self.assertEqual(decoded[0][-20:],
'6701234567'
'\udc85\udc86\udcea\udceb\udcec'
'\udcef\udcfc\udcfd\udcfe\udcff')
@support.bigmemtest(size=2**31, memuse=6, dry_run=False)
def test_large_utf8_input(self, size):
# Test input longer than INT_MAX.
# Input should contain a decodable multi-byte character
# surrounding INT_MAX
encoded = (b'0123456\xed\x84\x80' * (size//8))
self.assertEqual(len(encoded), size // 8 * 10)
decoded = codecs.code_page_decode(65001, encoded, 'ignore', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), size)
self.assertEqual(decoded[0][:10], '0123456\ud10001')
self.assertEqual(decoded[0][-11:], '56\ud1000123456\ud100')
class ASCIITest(unittest.TestCase):
def test_encode(self):
self.assertEqual('abc123'.encode('ascii'), b'abc123')
def test_encode_error(self):
for data, error_handler, expected in (
('[\x80\xff\u20ac]', 'ignore', b'[]'),
('[\x80\xff\u20ac]', 'replace', b'[???]'),
('[\x80\xff\u20ac]', 'xmlcharrefreplace', b'[€ÿ€]'),
('[\x80\xff\u20ac\U000abcde]', 'backslashreplace',
b'[\\x80\\xff\\u20ac\\U000abcde]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('ascii', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\xff'.encode('ascii', 'surrogateescape')
def test_decode(self):
self.assertEqual(b'abc'.decode('ascii'), 'abc')
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode('ascii', error_handler),
expected)
class Latin1Test(unittest.TestCase):
def test_encode(self):
for data, expected in (
('abc', b'abc'),
('\x80\xe9\xff', b'\x80\xe9\xff'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.encode('latin1'), expected)
def test_encode_errors(self):
for data, error_handler, expected in (
('[\u20ac\udc80]', 'ignore', b'[]'),
('[\u20ac\udc80]', 'replace', b'[??]'),
('[\u20ac\U000abcde]', 'backslashreplace',
b'[\\u20ac\\U000abcde]'),
('[\u20ac\udc80]', 'xmlcharrefreplace', b'[€�]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('latin1', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\u20ac'.encode('latin1', 'surrogateescape')
def test_decode(self):
for data, expected in (
(b'abc', 'abc'),
(b'[\x80\xff]', '[\x80\xff]'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.decode('latin1'), expected)
class StreamRecoderTest(unittest.TestCase):
def test_writelines(self):
bio = io.BytesIO()
codec = codecs.lookup('ascii')
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.ascii.StreamReader, encodings.ascii.StreamWriter)
sr.writelines([b'a', b'b'])
self.assertEqual(bio.getvalue(), b'ab')
def test_write(self):
bio = io.BytesIO()
codec = codecs.lookup('latin1')
# Recode from Latin-1 to utf-8.
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.utf_8.StreamReader, encodings.utf_8.StreamWriter)
text = 'àñé'
sr.write(text.encode('latin1'))
self.assertEqual(bio.getvalue(), text.encode('utf-8'))
def test_seeking_read(self):
bio = io.BytesIO('line1\nline2\nline3\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
self.assertEqual(sr.readline(), b'line1\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'line1\n')
self.assertEqual(sr.readline(), b'line2\n')
self.assertEqual(sr.readline(), b'line3\n')
self.assertEqual(sr.readline(), b'')
def test_seeking_write(self):
bio = io.BytesIO('123456789\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
# Test that seek() only resets its internal buffer when offset
# and whence are zero.
sr.seek(2)
sr.write(b'\nabc\n')
self.assertEqual(sr.readline(), b'789\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'1\n')
self.assertEqual(sr.readline(), b'abc\n')
self.assertEqual(sr.readline(), b'789\n')
@unittest.skipIf(_testcapi is None, 'need _testcapi module')
class LocaleCodecTest(unittest.TestCase):
"""
Test indirectly _Py_DecodeUTF8Ex() and _Py_EncodeUTF8Ex().
"""
ENCODING = sys.getfilesystemencoding()
STRINGS = ("ascii", "ulatin1:\xa7\xe9",
"u255:\xff",
"UCS:\xe9\u20ac\U0010ffff",
"surrogates:\uDC80\uDCFF")
BYTES_STRINGS = (b"blatin1:\xa7\xe9", b"b255:\xff")
SURROGATES = "\uDC80\uDCFF"
def encode(self, text, errors="strict"):
return _testcapi.EncodeLocaleEx(text, 0, errors)
def check_encode_strings(self, errors):
for text in self.STRINGS:
with self.subTest(text=text):
try:
expected = text.encode(self.ENCODING, errors)
except UnicodeEncodeError:
with self.assertRaises(RuntimeError) as cm:
self.encode(text, errors)
errmsg = str(cm.exception)
self.assertRegex(errmsg, r"encode error: pos=[0-9]+, reason=")
else:
encoded = self.encode(text, errors)
self.assertEqual(encoded, expected)
def test_encode_strict(self):
self.check_encode_strings("strict")
def test_encode_surrogateescape(self):
self.check_encode_strings("surrogateescape")
def test_encode_surrogatepass(self):
try:
self.encode('', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} encoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_encode_strings("surrogatepass")
def test_encode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.encode('', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
def decode(self, encoded, errors="strict"):
return _testcapi.DecodeLocaleEx(encoded, 0, errors)
def check_decode_strings(self, errors):
is_utf8 = (self.ENCODING == "utf-8")
if is_utf8:
encode_errors = 'surrogateescape'
else:
encode_errors = 'strict'
strings = list(self.BYTES_STRINGS)
for text in self.STRINGS:
try:
encoded = text.encode(self.ENCODING, encode_errors)
if encoded not in strings:
strings.append(encoded)
except UnicodeEncodeError:
encoded = None
if is_utf8:
encoded2 = text.encode(self.ENCODING, 'surrogatepass')
if encoded2 != encoded:
strings.append(encoded2)
for encoded in strings:
with self.subTest(encoded=encoded):
try:
expected = encoded.decode(self.ENCODING, errors)
except UnicodeDecodeError:
with self.assertRaises(RuntimeError) as cm:
self.decode(encoded, errors)
errmsg = str(cm.exception)
self.assertTrue(errmsg.startswith("decode error: "), errmsg)
else:
decoded = self.decode(encoded, errors)
self.assertEqual(decoded, expected)
def test_decode_strict(self):
self.check_decode_strings("strict")
def test_decode_surrogateescape(self):
self.check_decode_strings("surrogateescape")
def test_decode_surrogatepass(self):
try:
self.decode(b'', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} decoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_decode_strings("surrogatepass")
def test_decode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.decode(b'', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
class Rot13Test(unittest.TestCase):
"""Test the educational ROT-13 codec."""
def test_encode(self):
ciphertext = codecs.encode("Caesar liked ciphers", 'rot-13')
self.assertEqual(ciphertext, 'Pnrfne yvxrq pvcuref')
def test_decode(self):
plaintext = codecs.decode('Rg gh, Oehgr?', 'rot-13')
self.assertEqual(plaintext, 'Et tu, Brute?')
def test_incremental_encode(self):
encoder = codecs.getincrementalencoder('rot-13')()
ciphertext = encoder.encode('ABBA nag Cheryl Baker')
self.assertEqual(ciphertext, 'NOON ant Purely Onxre')
def test_incremental_decode(self):
decoder = codecs.getincrementaldecoder('rot-13')()
plaintext = decoder.decode('terra Ares envy tha')
self.assertEqual(plaintext, 'green Nerf rail gun')
class Rot13UtilTest(unittest.TestCase):
"""Test the ROT-13 codec via rot13 function,
i.e. the user has done something like:
$ echo "Hello World" | python -m encodings.rot_13
"""
def test_rot13_func(self):
infile = io.StringIO('Gb or, be abg gb or, gung vf gur dhrfgvba')
outfile = io.StringIO()
encodings.rot_13.rot13(infile, outfile)
outfile.seek(0)
plain_text = outfile.read()
self.assertEqual(
plain_text,
'To be, or not to be, that is the question')
class CodecNameNormalizationTest(unittest.TestCase):
"""Test codec name normalization"""
def test_codecs_lookup(self):
FOUND = (1, 2, 3, 4)
NOT_FOUND = (None, None, None, None)
def search_function(encoding):
if encoding == "aaa_8":
return FOUND
else:
return NOT_FOUND
codecs.register(search_function)
self.addCleanup(codecs.unregister, search_function)
self.assertEqual(FOUND, codecs.lookup('aaa_8'))
self.assertEqual(FOUND, codecs.lookup('AAA-8'))
self.assertEqual(FOUND, codecs.lookup('AAA---8'))
self.assertEqual(FOUND, codecs.lookup('AAA 8'))
self.assertEqual(FOUND, codecs.lookup('aaa\xe9\u20ac-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA...8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('a\xe9\u20ac-8'))
def test_encodings_normalize_encoding(self):
# encodings.normalize_encoding() ignores non-ASCII characters.
normalize = encodings.normalize_encoding
self.assertEqual(normalize('utf_8'), 'utf_8')
self.assertEqual(normalize('utf\xE9\u20AC\U0010ffff-8'), 'utf_8')
self.assertEqual(normalize('utf 8'), 'utf_8')
# encodings.normalize_encoding() doesn't convert
# characters to lower case.
self.assertEqual(normalize('UTF 8'), 'UTF_8')
self.assertEqual(normalize('utf.8'), 'utf.8')
self.assertEqual(normalize('utf...8'), 'utf...8')
if __name__ == "__main__":
unittest.main()
| 38.649645 | 113 | 0.543188 | import codecs
import contextlib
import io
import locale
import sys
import unittest
import encodings
from unittest import mock
from test import support
from test.support import os_helper
from test.support import warnings_helper
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
def is_code_page_present(cp):
from ctypes import POINTER, WINFUNCTYPE, WinDLL
from ctypes.wintypes import BOOL, UINT, BYTE, WCHAR, UINT, DWORD
MAX_LEADBYTES = 12
MAX_DEFAULTCHAR = 2
MAX_PATH = 260
class CPINFOEXW(ctypes.Structure):
_fields_ = [("MaxCharSize", UINT),
("DefaultChar", BYTE*MAX_DEFAULTCHAR),
("LeadByte", BYTE*MAX_LEADBYTES),
("UnicodeDefaultChar", WCHAR),
("CodePage", UINT),
("CodePageName", WCHAR*MAX_PATH)]
prototype = WINFUNCTYPE(BOOL, UINT, DWORD, POINTER(CPINFOEXW))
GetCPInfoEx = prototype(("GetCPInfoExW", WinDLL("kernel32")))
info = CPINFOEXW()
return GetCPInfoEx(cp, 0, info)
class Queue(object):
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0]
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
if not state[1]:
d.setstate((state[0][:0], 0))
self.assertTrue(not d.decode(state[0]))
self.assertEqual(state, d.getstate())
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using an incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
ertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
tEqual(f.readline(), lines[0])
self.assertEqual(f.read(1), lines[1][0])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
al(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
sertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(1), data[5])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[6:106])
Equal(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse()
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
'
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
'
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
'
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
'
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
'
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "namereplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
# sequential surrogate characters
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "replace"),
"[??]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
backslashreplace = ''.join('\\x%02x' % b
for b in self.ill_formed_sequence)
self.assertEqual(test_sequence.decode(self.encoding, "backslashreplace"),
before + backslashreplace + after)
def test_incremental_surrogatepass(self):
# Test incremental decoder for surrogatepass handler:
# see issue #24214
# High surrogate
data = '\uD901'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:], True), '\uD901')
# Low surrogate
data = '\uDC02'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:]), '\uDC02')
class UTF32Test(ReadTest, unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest, unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest, unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest, unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(s)
with warnings_helper.check_warnings(('', DeprecationWarning)):
reader = codecs.open(os_helper.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest, unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest, unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
BOM = b''
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode(self.encoding, error_handler),
expected)
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode(self.encoding, "surrogateescape"),
self.BOM + b'[\x80]')
with self.assertRaises(UnicodeEncodeError) as cm:
"[\uDC80\uD800\uDFFF]".encode(self.encoding, "surrogateescape")
exc = cm.exception
self.assertEqual(exc.object[exc.start:exc.end], '\uD800\uDFFF')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode(self.encoding, "surrogatepass"),
self.BOM + b"abc\xed\xa0\x80def")
self.assertEqual("\U00010fff\uD800".encode(self.encoding, "surrogatepass"),
self.BOM + b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "surrogatepass"),
self.BOM + b'[\xed\xa0\x80\xed\xb2\x80]')
self.assertEqual(b"abc\xed\xa0\x80def".decode(self.encoding, "surrogatepass"),
"abc\ud800def")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode(self.encoding, "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode(self.encoding, "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode(self.encoding, "surrogatepass")
def test_incremental_errors(self):
# Test that the incremental decoder can fail with final=False.
# See issue #24214
cases = [b'\x80', b'\xBF', b'\xC0', b'\xC1', b'\xF5', b'\xF6', b'\xFF']
for prefix in (b'\xC2', b'\xDF', b'\xE0', b'\xE0\xA0', b'\xEF',
b'\xEF\xBF', b'\xF0', b'\xF0\x90', b'\xF0\x90\x80',
b'\xF4', b'\xF4\x8F', b'\xF4\x8F\xBF'):
for suffix in b'\x7F', b'\xC0':
cases.append(prefix + suffix)
cases.extend((b'\xE0\x80', b'\xE0\x9F', b'\xED\xA0\x80',
b'\xED\xBF\xBF', b'\xF0\x80', b'\xF0\x8F', b'\xF4\x90'))
for data in cases:
with self.subTest(data=data):
dec = codecs.getincrementaldecoder(self.encoding)()
self.assertRaises(UnicodeDecodeError, dec.decode, data)
class UTF7Test(ReadTest, unittest.TestCase):
encoding = "utf-7"
def test_ascii(self):
# Set D (directly encoded characters)
set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'\'(),-./:?')
self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii'))
self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d)
set_o = ' !"#$%&*;<=>@[]^_`{|}'
self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii'))
self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o)
# +
self.assertEqual('a+b'.encode(self.encoding), b'a+-b')
self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b')
# White spaces
ws = ' \t\n\r'
self.assertEqual(ws.encode(self.encoding), ws.encode('ascii'))
self.assertEqual(ws.encode('ascii').decode(self.encoding), ws)
# Other ASCII characters
other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) -
set(set_d + set_o + '+' + ws)))
self.assertEqual(other_ascii.encode(self.encoding),
b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'\xffb', '\ufffdb'),
(b'a\xffb', 'a\ufffdb'),
(b'a\xff\xffb', 'a\ufffd\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+IKw-b\xff', 'a\u20acb\ufffd'),
(b'a+IKw\xffb', 'a\u20ac\ufffdb'),
(b'a+@b', 'a\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0')
self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-')
self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding),
b'+IKwgrNgB3KA-')
self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
def test_lone_surrogates(self):
tests = [
(b'a+2AE-b', 'a\ud801b'),
(b'a+2AE\xffb', 'a\ufffdb'),
(b'a+2AE', 'a\ufffd'),
(b'a+2AEA-b', 'a\ufffdb'),
(b'a+2AH-b', 'a\ufffdb'),
(b'a+IKzYAQ-b', 'a\u20ac\ud801b'),
(b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'),
(b'a+IKzYAQA-b', 'a\u20ac\ufffdb'),
(b'a+IKzYAd-b', 'a\u20ac\ufffdb'),
(b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'),
(b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'),
(b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(UTF8Test, unittest.TestCase):
encoding = "utf-8-sig"
BOM = codecs.BOM_UTF8
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
self.assertEqual(codecs.escape_decode(bytearray()), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", b"[\\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\x410]", b"[A0]")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, b"\\" + b)
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), b"\\" + b.upper())
with self.assertWarns(DeprecationWarning):
check(br"\8", b"\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", b"\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", b"\\\xfa")
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
def test_decode_invalid(self):
testcases = [
(b"xn--w&", "strict", UnicodeError()),
(b"xn--w&", "ignore", "xn-"),
]
for puny, errors, expected in testcases:
with self.subTest(puny=puny, errors=errors):
if isinstance(expected, Exception):
self.assertRaises(UnicodeError, puny.decode, "punycode", errors)
else:
self.assertEqual(puny.decode("punycode", errors), expected)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
def test_errors(self):
"python.org".encode("idna", "strict")
b"python.org".decode("idna", "strict")
for errors in ("ignore", "replace", "backslashreplace",
"surrogateescape"):
self.assertRaises(Exception, "python.org".encode, "idna", errors)
self.assertRaises(Exception,
b"python.org".decode, "idna", errors)
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
# test keywords
self.assertEqual(codecs.decode(obj=b'\xe4\xf6\xfc', encoding='latin-1'),
'\xe4\xf6\xfc')
self.assertEqual(codecs.decode(b'[\xff]', 'ascii', errors='ignore'),
'[]')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
# test keywords
self.assertEqual(codecs.encode(obj='\xe4\xf6\xfc', encoding='latin-1'),
b'\xe4\xf6\xfc')
self.assertEqual(codecs.encode('[\xff]', 'ascii', errors='ignore'),
b'[]')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_unregister(self):
name = "nonexistent_codec_name"
search_function = mock.Mock()
codecs.register(search_function)
self.assertRaises(TypeError, codecs.lookup, name)
search_function.assert_called_with(name)
search_function.reset_mock()
codecs.unregister(search_function)
self.assertRaises(LookupError, codecs.lookup, name)
search_function.assert_not_called()
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
def test_all(self):
api = (
"encode", "decode",
"register", "CodecInfo", "Codec", "IncrementalEncoder",
"IncrementalDecoder", "StreamReader", "StreamWriter", "lookup",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"register_error", "lookup_error",
"strict_errors", "replace_errors", "ignore_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"namereplace_errors",
"open", "EncodedFile",
"iterencode", "iterdecode",
"BOM", "BOM_BE", "BOM_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_BE", "BOM_UTF16_LE",
"BOM_UTF32", "BOM_UTF32_BE", "BOM_UTF32_LE",
"BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", # Undocumented
"StreamReaderWriter", "StreamRecoder",
)
self.assertCountEqual(api, codecs.__all__)
for api in codecs.__all__:
getattr(codecs, api)
def test_open(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for mode in ('w', 'r', 'r+', 'w+', 'a', 'a+'):
with self.subTest(mode), \
codecs.open(os_helper.TESTFN, mode, 'ascii') as file:
self.assertIsInstance(file, codecs.StreamReaderWriter)
def test_undefined(self):
self.assertRaises(UnicodeError, codecs.encode, 'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.encode, '', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'', 'undefined')
for errors in ('strict', 'ignore', 'replace', 'backslashreplace'):
self.assertRaises(UnicodeError,
codecs.encode, 'abc', 'undefined', errors)
self.assertRaises(UnicodeError,
codecs.decode, b'abc', 'undefined', errors)
def test_file_closes_if_lookup_error_raised(self):
mock_open = mock.mock_open()
with mock.patch('builtins.open', mock_open) as file:
with self.assertRaises(LookupError):
codecs.open(os_helper.TESTFN, 'wt', 'invalid-encoding')
file().close.assert_called()
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_t",
"koi8_u",
"kz1048",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
if hasattr(codecs, "oem_encode"):
all_unicode_encodings.append("oem")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_stateful = [
"punycode",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_stateful:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab\ufffe"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: None}),
("ab\\x02", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: -2}
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: 999999999}
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
self.assertTrue(f.closed)
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding a unicode string is supported and gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
class UnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", r"[\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtuvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, "\\" + chr(i))
if b.upper() not in b'UN':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), "\\" + chr(i-32))
with self.assertWarns(DeprecationWarning):
check(br"\8", "\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", "\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", "\\\xfa")
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'',
'',
'',
'\x00',
'\x00',
'\x00\t',
'\x00\t',
'\x00\t\n',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class RawUnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "raw-unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'\x00',
'\x00\t',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class EscapeEncodeTest(unittest.TestCase):
def test_escape_encode(self):
tests = [
(b'', (b'', 0)),
(b'foobar', (b'foobar', 6)),
(b'spam\0eggs', (b'spam\\x00eggs', 9)),
(b'a\'b', (b"a\\'b", 3)),
(b'b\\c', (b'b\\\\c', 3)),
(b'c\nd', (b'c\\nd', 3)),
(b'd\re', (b'd\\re', 3)),
(b'f\x7fg', (b'f\\x7fg', 3)),
]
for data, output in tests:
with self.subTest(data=data):
self.assertEqual(codecs.escape_encode(data), output)
self.assertRaises(TypeError, codecs.escape_encode, 'spam')
self.assertRaises(TypeError, codecs.escape_encode, bytearray(b'spam'))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
# We check all the transform codecs accept memoryview input
# for encoding and decoding
# and also that they roundtrip correctly
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_denylists_binary_transforms(self):
# Check binary -> binary codecs give a good error for str input
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = (r"{!r} is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_denylists_text_transforms(self):
# Check str.encode gives a good error message for str -> str codecs
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_denylists_binary_transforms(self):
# Check bytes.decode and bytearray.decode give a good error
# message for binary -> binary codecs
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_denylists_text_transforms(self):
# Check str -> str codec gives a good error for binary input
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
# Check zlib codec gives a good error for malformed input
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
# Check hex codec gives a good error for malformed input
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Unfortunately, the bz2 module throws OSError, which the codec
# machinery currently can't wrap :(
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
def test_quopri_stateless(self):
# Should encode with quotetabs=True
encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
self.assertEqual(encoded, b"space=20tab=09eol=20\n")
# But should still support unescaped tabs and spaces
unescaped = b"space tab eol\n"
self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, b"", "uu-codec")
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple search functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
class ExceptionChainingTest(unittest.TestCase):
def setUp(self):
self.codec_name = 'exception_chaining_test'
codecs.register(_get_test_codec)
self.addCleanup(codecs.unregister, _get_test_codec)
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
# recreate the codec entry) and regrtest refleak hunting (which
# runs the same test instance multiple times). This means we
# need to ensure the codecs call back in to the instance to find
# out which exception to raise rather than binding them in a
# closure to an object that may change on the next run
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
# Issue #22166: Also pop from caches to avoid appearance of ref leaks
encodings._cache.pop(self.codec_name, None)
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
# Helper to dynamically change the object raised by a test codec
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
# http://bugs.python.org/issue19609
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
# The initial codec lookup should not be wrapped
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# The stdlib non-text codecs are now marked so they're
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
# sure the case where an inappropriate output type is produced is
# handled appropriately
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
# No input or output type checks on the codecs module functions
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
# Text model methods should complain
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
r"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
r"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
class CodePageTest(unittest.TestCase):
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00', 'strict', True)
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff', 'strict', True)
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors, True)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors, True)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'namereplace',
b'[\\N{LATIN SMALL LETTER Y WITH DIAERESIS}]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
('\udcff', 'strict', None),
('[\udcff]', 'surrogateescape', b'[\xff]'),
('[\udcff]', 'surrogatepass', None),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'backslashreplace', '[\\xff]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'[\xff]', 'surrogatepass', None),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
(b'\x81\x00abc', 'backslashreplace', '\\x81\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
# test error handlers
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
('\udc98', 'surrogateescape', b'\x98'),
('\udc98', 'surrogatepass', None),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_code_page_decode_flags(self):
# Issue #36312: For some code pages (e.g. UTF-7) flags for
# MultiByteToWideChar() must be set to 0.
if support.verbose:
sys.stdout.write('\n')
for cp in (50220, 50221, 50222, 50225, 50227, 50229,
*range(57002, 57011+1), 65000):
# On small versions of Windows like Windows IoT
# not all codepages are present.
# A missing codepage causes an OSError exception
# so check for the codepage before decoding
if is_code_page_present(cp):
self.assertEqual(codecs.code_page_decode(cp, b'abc'), ('abc', 3), f'cp{cp}')
else:
if support.verbose:
print(f" skipping cp={cp}")
self.assertEqual(codecs.code_page_decode(42, b'abc'),
('\uf061\uf062\uf063', 3))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
def test_mbcs_alias(self):
# Check that looking up our 'default' codepage will return
# mbcs when we don't have a more specific one available
with mock.patch('_winapi.GetACP', return_value=123):
codec = codecs.lookup('cp123')
self.assertEqual(codec.name, 'mbcs')
@support.bigmemtest(size=2**31, memuse=7, dry_run=False)
def test_large_input(self, size):
# Test input longer than INT_MAX.
# Input should contain undecodable bytes before and after
# the INT_MAX limit.
encoded = (b'01234567' * ((size//8)-1) +
b'\x85\x86\xea\xeb\xec\xef\xfc\xfd\xfe\xff')
self.assertEqual(len(encoded), size+2)
decoded = codecs.code_page_decode(932, encoded, 'surrogateescape', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), decoded[1])
self.assertEqual(decoded[0][:10], '0123456701')
self.assertEqual(decoded[0][-20:],
'6701234567'
'\udc85\udc86\udcea\udceb\udcec'
'\udcef\udcfc\udcfd\udcfe\udcff')
@support.bigmemtest(size=2**31, memuse=6, dry_run=False)
def test_large_utf8_input(self, size):
# Test input longer than INT_MAX.
# Input should contain a decodable multi-byte character
# surrounding INT_MAX
encoded = (b'0123456\xed\x84\x80' * (size//8))
self.assertEqual(len(encoded), size // 8 * 10)
decoded = codecs.code_page_decode(65001, encoded, 'ignore', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), size)
self.assertEqual(decoded[0][:10], '0123456\ud10001')
self.assertEqual(decoded[0][-11:], '56\ud1000123456\ud100')
class ASCIITest(unittest.TestCase):
def test_encode(self):
self.assertEqual('abc123'.encode('ascii'), b'abc123')
def test_encode_error(self):
for data, error_handler, expected in (
('[\x80\xff\u20ac]', 'ignore', b'[]'),
('[\x80\xff\u20ac]', 'replace', b'[???]'),
('[\x80\xff\u20ac]', 'xmlcharrefreplace', b'[€ÿ€]'),
('[\x80\xff\u20ac\U000abcde]', 'backslashreplace',
b'[\\x80\\xff\\u20ac\\U000abcde]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('ascii', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\xff'.encode('ascii', 'surrogateescape')
def test_decode(self):
self.assertEqual(b'abc'.decode('ascii'), 'abc')
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode('ascii', error_handler),
expected)
class Latin1Test(unittest.TestCase):
def test_encode(self):
for data, expected in (
('abc', b'abc'),
('\x80\xe9\xff', b'\x80\xe9\xff'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.encode('latin1'), expected)
def test_encode_errors(self):
for data, error_handler, expected in (
('[\u20ac\udc80]', 'ignore', b'[]'),
('[\u20ac\udc80]', 'replace', b'[??]'),
('[\u20ac\U000abcde]', 'backslashreplace',
b'[\\u20ac\\U000abcde]'),
('[\u20ac\udc80]', 'xmlcharrefreplace', b'[€�]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('latin1', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\u20ac'.encode('latin1', 'surrogateescape')
def test_decode(self):
for data, expected in (
(b'abc', 'abc'),
(b'[\x80\xff]', '[\x80\xff]'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.decode('latin1'), expected)
class StreamRecoderTest(unittest.TestCase):
def test_writelines(self):
bio = io.BytesIO()
codec = codecs.lookup('ascii')
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.ascii.StreamReader, encodings.ascii.StreamWriter)
sr.writelines([b'a', b'b'])
self.assertEqual(bio.getvalue(), b'ab')
def test_write(self):
bio = io.BytesIO()
codec = codecs.lookup('latin1')
# Recode from Latin-1 to utf-8.
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.utf_8.StreamReader, encodings.utf_8.StreamWriter)
text = 'àñé'
sr.write(text.encode('latin1'))
self.assertEqual(bio.getvalue(), text.encode('utf-8'))
def test_seeking_read(self):
bio = io.BytesIO('line1\nline2\nline3\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
self.assertEqual(sr.readline(), b'line1\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'line1\n')
self.assertEqual(sr.readline(), b'line2\n')
self.assertEqual(sr.readline(), b'line3\n')
self.assertEqual(sr.readline(), b'')
def test_seeking_write(self):
bio = io.BytesIO('123456789\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
# Test that seek() only resets its internal buffer when offset
# and whence are zero.
sr.seek(2)
sr.write(b'\nabc\n')
self.assertEqual(sr.readline(), b'789\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'1\n')
self.assertEqual(sr.readline(), b'abc\n')
self.assertEqual(sr.readline(), b'789\n')
@unittest.skipIf(_testcapi is None, 'need _testcapi module')
class LocaleCodecTest(unittest.TestCase):
ENCODING = sys.getfilesystemencoding()
STRINGS = ("ascii", "ulatin1:\xa7\xe9",
"u255:\xff",
"UCS:\xe9\u20ac\U0010ffff",
"surrogates:\uDC80\uDCFF")
BYTES_STRINGS = (b"blatin1:\xa7\xe9", b"b255:\xff")
SURROGATES = "\uDC80\uDCFF"
def encode(self, text, errors="strict"):
return _testcapi.EncodeLocaleEx(text, 0, errors)
def check_encode_strings(self, errors):
for text in self.STRINGS:
with self.subTest(text=text):
try:
expected = text.encode(self.ENCODING, errors)
except UnicodeEncodeError:
with self.assertRaises(RuntimeError) as cm:
self.encode(text, errors)
errmsg = str(cm.exception)
self.assertRegex(errmsg, r"encode error: pos=[0-9]+, reason=")
else:
encoded = self.encode(text, errors)
self.assertEqual(encoded, expected)
def test_encode_strict(self):
self.check_encode_strings("strict")
def test_encode_surrogateescape(self):
self.check_encode_strings("surrogateescape")
def test_encode_surrogatepass(self):
try:
self.encode('', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} encoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_encode_strings("surrogatepass")
def test_encode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.encode('', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
def decode(self, encoded, errors="strict"):
return _testcapi.DecodeLocaleEx(encoded, 0, errors)
def check_decode_strings(self, errors):
is_utf8 = (self.ENCODING == "utf-8")
if is_utf8:
encode_errors = 'surrogateescape'
else:
encode_errors = 'strict'
strings = list(self.BYTES_STRINGS)
for text in self.STRINGS:
try:
encoded = text.encode(self.ENCODING, encode_errors)
if encoded not in strings:
strings.append(encoded)
except UnicodeEncodeError:
encoded = None
if is_utf8:
encoded2 = text.encode(self.ENCODING, 'surrogatepass')
if encoded2 != encoded:
strings.append(encoded2)
for encoded in strings:
with self.subTest(encoded=encoded):
try:
expected = encoded.decode(self.ENCODING, errors)
except UnicodeDecodeError:
with self.assertRaises(RuntimeError) as cm:
self.decode(encoded, errors)
errmsg = str(cm.exception)
self.assertTrue(errmsg.startswith("decode error: "), errmsg)
else:
decoded = self.decode(encoded, errors)
self.assertEqual(decoded, expected)
def test_decode_strict(self):
self.check_decode_strings("strict")
def test_decode_surrogateescape(self):
self.check_decode_strings("surrogateescape")
def test_decode_surrogatepass(self):
try:
self.decode(b'', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} decoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_decode_strings("surrogatepass")
def test_decode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.decode(b'', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
class Rot13Test(unittest.TestCase):
def test_encode(self):
ciphertext = codecs.encode("Caesar liked ciphers", 'rot-13')
self.assertEqual(ciphertext, 'Pnrfne yvxrq pvcuref')
def test_decode(self):
plaintext = codecs.decode('Rg gh, Oehgr?', 'rot-13')
self.assertEqual(plaintext, 'Et tu, Brute?')
def test_incremental_encode(self):
encoder = codecs.getincrementalencoder('rot-13')()
ciphertext = encoder.encode('ABBA nag Cheryl Baker')
self.assertEqual(ciphertext, 'NOON ant Purely Onxre')
def test_incremental_decode(self):
decoder = codecs.getincrementaldecoder('rot-13')()
plaintext = decoder.decode('terra Ares envy tha')
self.assertEqual(plaintext, 'green Nerf rail gun')
class Rot13UtilTest(unittest.TestCase):
def test_rot13_func(self):
infile = io.StringIO('Gb or, be abg gb or, gung vf gur dhrfgvba')
outfile = io.StringIO()
encodings.rot_13.rot13(infile, outfile)
outfile.seek(0)
plain_text = outfile.read()
self.assertEqual(
plain_text,
'To be, or not to be, that is the question')
class CodecNameNormalizationTest(unittest.TestCase):
def test_codecs_lookup(self):
FOUND = (1, 2, 3, 4)
NOT_FOUND = (None, None, None, None)
def search_function(encoding):
if encoding == "aaa_8":
return FOUND
else:
return NOT_FOUND
codecs.register(search_function)
self.addCleanup(codecs.unregister, search_function)
self.assertEqual(FOUND, codecs.lookup('aaa_8'))
self.assertEqual(FOUND, codecs.lookup('AAA-8'))
self.assertEqual(FOUND, codecs.lookup('AAA---8'))
self.assertEqual(FOUND, codecs.lookup('AAA 8'))
self.assertEqual(FOUND, codecs.lookup('aaa\xe9\u20ac-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA...8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('a\xe9\u20ac-8'))
def test_encodings_normalize_encoding(self):
# encodings.normalize_encoding() ignores non-ASCII characters.
normalize = encodings.normalize_encoding
self.assertEqual(normalize('utf_8'), 'utf_8')
self.assertEqual(normalize('utf\xE9\u20AC\U0010ffff-8'), 'utf_8')
self.assertEqual(normalize('utf 8'), 'utf_8')
# encodings.normalize_encoding() doesn't convert
# characters to lower case.
self.assertEqual(normalize('UTF 8'), 'UTF_8')
self.assertEqual(normalize('utf.8'), 'utf.8')
self.assertEqual(normalize('utf...8'), 'utf...8')
if __name__ == "__main__":
unittest.main()
| true | true |
f73110ecdff79d7c029c0dd0d895ef71ea68326b | 12,233 | py | Python | loopy/transform/instruction.py | benSepanski/loopy | 5db582d579eb65ce58b93e2c53feb1d48404cf2d | [
"MIT"
] | null | null | null | loopy/transform/instruction.py | benSepanski/loopy | 5db582d579eb65ce58b93e2c53feb1d48404cf2d | [
"MIT"
] | null | null | null | loopy/transform/instruction.py | benSepanski/loopy | 5db582d579eb65ce58b93e2c53feb1d48404cf2d | [
"MIT"
] | null | null | null | from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2012 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six # noqa
from loopy.diagnostic import LoopyError
from loopy.kernel import LoopKernel
from loopy.kernel.function_interface import (ScalarCallable, CallableKernel)
from loopy.program import Program, iterate_over_kernels_if_given_program
# {{{ find_instructions
def find_instructions_in_single_kernel(kernel, insn_match):
assert isinstance(kernel, LoopKernel)
from loopy.match import parse_match
match = parse_match(insn_match)
return [insn for insn in kernel.instructions if match(kernel, insn)]
def find_instructions(program, insn_match):
assert isinstance(program, Program)
insns = []
for in_knl_callable in program.callables_table.values():
if isinstance(in_knl_callable, CallableKernel):
insns += (find_instructions_in_single_kernel(
in_knl_callable.subkernel, insn_match))
elif isinstance(in_knl_callable, ScalarCallable):
pass
else:
raise NotImplementedError("Unknown callable type %s." % (
type(in_knl_callable)))
return insns
# }}}
# {{{ map_instructions
def map_instructions(kernel, insn_match, f):
from loopy.match import parse_match
match = parse_match(insn_match)
new_insns = []
for insn in kernel.instructions:
if match(kernel, insn):
new_insns.append(f(insn))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
# }}}
# {{{ set_instruction_priority
@iterate_over_kernels_if_given_program
def set_instruction_priority(kernel, insn_match, priority):
"""Set the priority of instructions matching *insn_match* to *priority*.
*insn_match* may be any instruction id match understood by
:func:`loopy.match.parse_match`.
"""
def set_prio(insn):
return insn.copy(priority=priority)
return map_instructions(kernel, insn_match, set_prio)
# }}}
# {{{ add_dependency
@iterate_over_kernels_if_given_program
def add_dependency(kernel, insn_match, depends_on):
"""Add the instruction dependency *dependency* to the instructions matched
by *insn_match*.
*insn_match* and *depends_on* may be any instruction id match understood by
:func:`loopy.match.parse_match`.
.. versionchanged:: 2016.3
Third argument renamed to *depends_on* for clarity, allowed to
be not just ID but also match expression.
"""
if isinstance(depends_on, str) and depends_on in kernel.id_to_insn:
added_deps = frozenset([depends_on])
else:
added_deps = frozenset(
dep.id for dep in find_instructions_in_single_kernel(kernel,
depends_on))
if not added_deps:
raise LoopyError("no instructions found matching '%s' "
"(to add as dependencies)" % depends_on)
matched = [False]
def add_dep(insn):
new_deps = insn.depends_on
matched[0] = True
if new_deps is None:
new_deps = added_deps
else:
new_deps = new_deps | added_deps
return insn.copy(depends_on=new_deps)
result = map_instructions(kernel, insn_match, add_dep)
if not matched[0]:
raise LoopyError("no instructions found matching '%s' "
"(to which dependencies would be added)" % insn_match)
return result
# }}}
# {{{ remove_instructions
def remove_instructions(kernel, insn_ids):
"""Return a new kernel with instructions in *insn_ids* removed.
Dependencies across (one, for now) deleted isntructions are propagated.
Behavior is undefined for now for chains of dependencies within the
set of deleted instructions.
This also updates *no_sync_with* for all instructions.
"""
if not insn_ids:
return kernel
assert isinstance(insn_ids, set)
id_to_insn = kernel.id_to_insn
new_insns = []
for insn in kernel.instructions:
if insn.id in insn_ids:
continue
# transitively propagate dependencies
# (only one level for now)
if insn.depends_on is None:
depends_on = frozenset()
else:
depends_on = insn.depends_on
new_deps = depends_on - insn_ids
for dep_id in depends_on & insn_ids:
new_deps = new_deps | id_to_insn[dep_id].depends_on
# update no_sync_with
new_no_sync_with = frozenset((insn_id, scope)
for insn_id, scope in insn.no_sync_with
if insn_id not in insn_ids)
new_insns.append(
insn.copy(depends_on=new_deps, no_sync_with=new_no_sync_with))
return kernel.copy(
instructions=new_insns)
# }}}
# {{{ replace_instruction_ids
def replace_instruction_ids(kernel, replacements):
new_insns = []
for insn in kernel.instructions:
changed = False
new_depends_on = []
new_no_sync_with = []
for dep in insn.depends_on:
if dep in replacements:
new_depends_on.extend(replacements[dep])
changed = True
else:
new_depends_on.append(dep)
for insn_id, scope in insn.no_sync_with:
if insn_id in replacements:
new_no_sync_with.extend(
(repl, scope) for repl in replacements[insn_id])
changed = True
else:
new_no_sync_with.append((insn_id, scope))
new_insns.append(
insn.copy(
depends_on=frozenset(new_depends_on),
no_sync_with=frozenset(new_no_sync_with))
if changed else insn)
return kernel.copy(instructions=new_insns)
# }}}
# {{{ tag_instructions
@iterate_over_kernels_if_given_program
def tag_instructions(kernel, new_tag, within=None):
from loopy.match import parse_match
within = parse_match(within)
new_insns = []
for insn in kernel.instructions:
if within(kernel, insn):
new_insns.append(
insn.copy(tags=insn.tags | frozenset([new_tag])))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
# }}}
# {{{ add nosync
@iterate_over_kernels_if_given_program
def add_nosync(kernel, scope, source, sink, bidirectional=False, force=False,
empty_ok=False):
"""Add a *no_sync_with* directive between *source* and *sink*.
*no_sync_with* is only added if *sink* depends on *source* or
if the instruction pair is in a conflicting group.
This function does not check for the presence of a memory dependency.
:arg kernel: The kernel
:arg source: Either a single instruction id, or any instruction id
match understood by :func:`loopy.match.parse_match`.
:arg sink: Either a single instruction id, or any instruction id
match understood by :func:`loopy.match.parse_match`.
:arg scope: A valid *no_sync_with* scope. See
:attr:`loopy.InstructionBase.no_sync_with` for allowable scopes.
:arg bidirectional: A :class:`bool`. If *True*, add a *no_sync_with*
to both the source and sink instructions, otherwise the directive
is only added to the sink instructions.
:arg force: A :class:`bool`. If *True*, add a *no_sync_with* directive
even without the presence of a dependency edge or conflicting
instruction group.
:arg empty_ok: If *True*, do not complain even if no *nosync* tags were
added as a result of the transformation.
:return: The updated kernel
.. versionchanged:: 2018.1
If the transformation adds no *nosync* directives, it will complain.
This used to silently pass. This behavior can be restored using
*empty_ok*.
"""
assert isinstance(kernel, LoopKernel)
if isinstance(source, str) and source in kernel.id_to_insn:
sources = frozenset([source])
else:
sources = frozenset(
source.id for source in find_instructions_in_single_kernel(
kernel, source))
if isinstance(sink, str) and sink in kernel.id_to_insn:
sinks = frozenset([sink])
else:
sinks = frozenset(
sink.id for sink in find_instructions_in_single_kernel(
kernel, sink))
if not sources and not empty_ok:
raise LoopyError("No match found for source specification '%s'." % source)
if not sinks and not empty_ok:
raise LoopyError("No match found for sink specification '%s'." % sink)
def insns_in_conflicting_groups(insn1_id, insn2_id):
insn1 = kernel.id_to_insn[insn1_id]
insn2 = kernel.id_to_insn[insn2_id]
return (
bool(insn1.groups & insn2.conflicts_with_groups)
or
bool(insn2.groups & insn1.conflicts_with_groups))
from collections import defaultdict
nosync_to_add = defaultdict(set)
rec_dep_map = kernel.recursive_insn_dep_map()
for sink in sinks:
for source in sources:
needs_nosync = force or (
source in rec_dep_map[sink]
or insns_in_conflicting_groups(source, sink))
if not needs_nosync:
continue
nosync_to_add[sink].add((source, scope))
if bidirectional:
nosync_to_add[source].add((sink, scope))
if not nosync_to_add and not empty_ok:
raise LoopyError("No nosync annotations were added as a result "
"of this call. add_nosync will (by default) only add them to "
"accompany existing depencies or group exclusions. Maybe you want "
"to pass force=True?")
new_instructions = list(kernel.instructions)
for i, insn in enumerate(new_instructions):
if insn.id in nosync_to_add:
new_instructions[i] = insn.copy(no_sync_with=insn.no_sync_with
| frozenset(nosync_to_add[insn.id]))
return kernel.copy(instructions=new_instructions)
# }}}
# {{{ uniquify_instruction_ids
@iterate_over_kernels_if_given_program
def uniquify_instruction_ids(kernel):
"""Converts any ids that are :class:`loopy.UniqueName` or *None* into unique
strings.
This function does *not* deduplicate existing instruction ids.
"""
from loopy.kernel.creation import UniqueName
insn_ids = set(
insn.id for insn in kernel.instructions
if insn.id is not None and not isinstance(insn.id, UniqueName))
from pytools import UniqueNameGenerator
insn_id_gen = UniqueNameGenerator(insn_ids)
new_instructions = []
for insn in kernel.instructions:
if insn.id is None:
new_instructions.append(
insn.copy(id=insn_id_gen("insn")))
elif isinstance(insn.id, UniqueName):
new_instructions.append(
insn.copy(id=insn_id_gen(insn.id.name)))
else:
new_instructions.append(insn)
return kernel.copy(instructions=new_instructions)
# }}}
# vim: foldmethod=marker
| 31.366667 | 83 | 0.666721 | from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2012 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
from loopy.diagnostic import LoopyError
from loopy.kernel import LoopKernel
from loopy.kernel.function_interface import (ScalarCallable, CallableKernel)
from loopy.program import Program, iterate_over_kernels_if_given_program
def find_instructions_in_single_kernel(kernel, insn_match):
assert isinstance(kernel, LoopKernel)
from loopy.match import parse_match
match = parse_match(insn_match)
return [insn for insn in kernel.instructions if match(kernel, insn)]
def find_instructions(program, insn_match):
assert isinstance(program, Program)
insns = []
for in_knl_callable in program.callables_table.values():
if isinstance(in_knl_callable, CallableKernel):
insns += (find_instructions_in_single_kernel(
in_knl_callable.subkernel, insn_match))
elif isinstance(in_knl_callable, ScalarCallable):
pass
else:
raise NotImplementedError("Unknown callable type %s." % (
type(in_knl_callable)))
return insns
def map_instructions(kernel, insn_match, f):
from loopy.match import parse_match
match = parse_match(insn_match)
new_insns = []
for insn in kernel.instructions:
if match(kernel, insn):
new_insns.append(f(insn))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
@iterate_over_kernels_if_given_program
def set_instruction_priority(kernel, insn_match, priority):
def set_prio(insn):
return insn.copy(priority=priority)
return map_instructions(kernel, insn_match, set_prio)
@iterate_over_kernels_if_given_program
def add_dependency(kernel, insn_match, depends_on):
if isinstance(depends_on, str) and depends_on in kernel.id_to_insn:
added_deps = frozenset([depends_on])
else:
added_deps = frozenset(
dep.id for dep in find_instructions_in_single_kernel(kernel,
depends_on))
if not added_deps:
raise LoopyError("no instructions found matching '%s' "
"(to add as dependencies)" % depends_on)
matched = [False]
def add_dep(insn):
new_deps = insn.depends_on
matched[0] = True
if new_deps is None:
new_deps = added_deps
else:
new_deps = new_deps | added_deps
return insn.copy(depends_on=new_deps)
result = map_instructions(kernel, insn_match, add_dep)
if not matched[0]:
raise LoopyError("no instructions found matching '%s' "
"(to which dependencies would be added)" % insn_match)
return result
def remove_instructions(kernel, insn_ids):
if not insn_ids:
return kernel
assert isinstance(insn_ids, set)
id_to_insn = kernel.id_to_insn
new_insns = []
for insn in kernel.instructions:
if insn.id in insn_ids:
continue
if insn.depends_on is None:
depends_on = frozenset()
else:
depends_on = insn.depends_on
new_deps = depends_on - insn_ids
for dep_id in depends_on & insn_ids:
new_deps = new_deps | id_to_insn[dep_id].depends_on
new_no_sync_with = frozenset((insn_id, scope)
for insn_id, scope in insn.no_sync_with
if insn_id not in insn_ids)
new_insns.append(
insn.copy(depends_on=new_deps, no_sync_with=new_no_sync_with))
return kernel.copy(
instructions=new_insns)
def replace_instruction_ids(kernel, replacements):
new_insns = []
for insn in kernel.instructions:
changed = False
new_depends_on = []
new_no_sync_with = []
for dep in insn.depends_on:
if dep in replacements:
new_depends_on.extend(replacements[dep])
changed = True
else:
new_depends_on.append(dep)
for insn_id, scope in insn.no_sync_with:
if insn_id in replacements:
new_no_sync_with.extend(
(repl, scope) for repl in replacements[insn_id])
changed = True
else:
new_no_sync_with.append((insn_id, scope))
new_insns.append(
insn.copy(
depends_on=frozenset(new_depends_on),
no_sync_with=frozenset(new_no_sync_with))
if changed else insn)
return kernel.copy(instructions=new_insns)
@iterate_over_kernels_if_given_program
def tag_instructions(kernel, new_tag, within=None):
from loopy.match import parse_match
within = parse_match(within)
new_insns = []
for insn in kernel.instructions:
if within(kernel, insn):
new_insns.append(
insn.copy(tags=insn.tags | frozenset([new_tag])))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
@iterate_over_kernels_if_given_program
def add_nosync(kernel, scope, source, sink, bidirectional=False, force=False,
empty_ok=False):
assert isinstance(kernel, LoopKernel)
if isinstance(source, str) and source in kernel.id_to_insn:
sources = frozenset([source])
else:
sources = frozenset(
source.id for source in find_instructions_in_single_kernel(
kernel, source))
if isinstance(sink, str) and sink in kernel.id_to_insn:
sinks = frozenset([sink])
else:
sinks = frozenset(
sink.id for sink in find_instructions_in_single_kernel(
kernel, sink))
if not sources and not empty_ok:
raise LoopyError("No match found for source specification '%s'." % source)
if not sinks and not empty_ok:
raise LoopyError("No match found for sink specification '%s'." % sink)
def insns_in_conflicting_groups(insn1_id, insn2_id):
insn1 = kernel.id_to_insn[insn1_id]
insn2 = kernel.id_to_insn[insn2_id]
return (
bool(insn1.groups & insn2.conflicts_with_groups)
or
bool(insn2.groups & insn1.conflicts_with_groups))
from collections import defaultdict
nosync_to_add = defaultdict(set)
rec_dep_map = kernel.recursive_insn_dep_map()
for sink in sinks:
for source in sources:
needs_nosync = force or (
source in rec_dep_map[sink]
or insns_in_conflicting_groups(source, sink))
if not needs_nosync:
continue
nosync_to_add[sink].add((source, scope))
if bidirectional:
nosync_to_add[source].add((sink, scope))
if not nosync_to_add and not empty_ok:
raise LoopyError("No nosync annotations were added as a result "
"of this call. add_nosync will (by default) only add them to "
"accompany existing depencies or group exclusions. Maybe you want "
"to pass force=True?")
new_instructions = list(kernel.instructions)
for i, insn in enumerate(new_instructions):
if insn.id in nosync_to_add:
new_instructions[i] = insn.copy(no_sync_with=insn.no_sync_with
| frozenset(nosync_to_add[insn.id]))
return kernel.copy(instructions=new_instructions)
@iterate_over_kernels_if_given_program
def uniquify_instruction_ids(kernel):
from loopy.kernel.creation import UniqueName
insn_ids = set(
insn.id for insn in kernel.instructions
if insn.id is not None and not isinstance(insn.id, UniqueName))
from pytools import UniqueNameGenerator
insn_id_gen = UniqueNameGenerator(insn_ids)
new_instructions = []
for insn in kernel.instructions:
if insn.id is None:
new_instructions.append(
insn.copy(id=insn_id_gen("insn")))
elif isinstance(insn.id, UniqueName):
new_instructions.append(
insn.copy(id=insn_id_gen(insn.id.name)))
else:
new_instructions.append(insn)
return kernel.copy(instructions=new_instructions)
| true | true |
f731121b0a99cb58789c4b1de9a36c3004b171ee | 442 | py | Python | Inventory/views.py | DivyaKarunakaran/Inventory | ac883f087a5204832349e0fe3ed3692bc3d413c2 | [
"bzip2-1.0.6"
] | null | null | null | Inventory/views.py | DivyaKarunakaran/Inventory | ac883f087a5204832349e0fe3ed3692bc3d413c2 | [
"bzip2-1.0.6"
] | null | null | null | Inventory/views.py | DivyaKarunakaran/Inventory | ac883f087a5204832349e0fe3ed3692bc3d413c2 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render
from django.http import Http404
from Inventory.models import Item
def index(request):
items=Item.objects.exclude(amount=0)
return render(request,'Inventory/index.html',{
'items':items,})
def item_detail(request, id):
try:
item=Item.objects.get(id=id)
except Item.DoesNotExist:
raise Http404('This item does not exist')
return render(request,'Inventory/item_detail.html',{
'item':item,
})
| 24.555556 | 53 | 0.751131 | from django.shortcuts import render
from django.http import Http404
from Inventory.models import Item
def index(request):
items=Item.objects.exclude(amount=0)
return render(request,'Inventory/index.html',{
'items':items,})
def item_detail(request, id):
try:
item=Item.objects.get(id=id)
except Item.DoesNotExist:
raise Http404('This item does not exist')
return render(request,'Inventory/item_detail.html',{
'item':item,
})
| true | true |
f731144c606f8f088d759bd0b3022acc7a14317e | 6,296 | py | Python | ckan/lib/formatters.py | okfde/ckankrzn | df4c1ed624f6751ac2a8f03527ff19e448d27dfb | [
"Apache-2.0"
] | 4 | 2017-06-12T15:18:30.000Z | 2019-10-11T15:12:43.000Z | ckan/lib/formatters.py | okfde/ckankrzn | df4c1ed624f6751ac2a8f03527ff19e448d27dfb | [
"Apache-2.0"
] | 64 | 2017-05-14T22:15:53.000Z | 2020-03-08T15:26:49.000Z | ckan/lib/formatters.py | okfde/ckankrzn | df4c1ed624f6751ac2a8f03527ff19e448d27dfb | [
"Apache-2.0"
] | 2 | 2018-09-08T08:02:25.000Z | 2020-04-24T13:02:06.000Z | # encoding: utf-8
import datetime
import pytz
from babel import numbers
import ckan.lib.i18n as i18n
from ckan.common import _, ungettext
##################################################
# #
# Month translations #
# #
##################################################
def _month_jan():
return _('January')
def _month_feb():
return _('February')
def _month_mar():
return _('March')
def _month_apr():
return _('April')
def _month_may():
return _('May')
def _month_june():
return _('June')
def _month_july():
return _('July')
def _month_aug():
return _('August')
def _month_sept():
return _('September')
def _month_oct():
return _('October')
def _month_nov():
return _('November')
def _month_dec():
return _('December')
# _MONTH_FUNCTIONS provides an easy way to get a localised month via
# _MONTH_FUNCTIONS[month]() where months are zero based ie jan = 0, dec = 11
_MONTH_FUNCTIONS = [_month_jan, _month_feb, _month_mar, _month_apr,
_month_may, _month_june, _month_july, _month_aug,
_month_sept, _month_oct, _month_nov, _month_dec]
def localised_nice_date(datetime_, show_date=False, with_hours=False):
''' Returns a friendly localised unicode representation of a datetime.
:param datetime_: The date to format
:type datetime_: datetime
:param show_date: Show date not 2 days ago etc
:type show_date: bool
:param with_hours: should the `hours:mins` be shown for dates
:type with_hours: bool
:rtype: sting
'''
def months_between(date1, date2):
if date1 > date2:
date1, date2 = date2, date1
m1 = date1.year * 12 + date1.month
m2 = date2.year * 12 + date2.month
months = m2 - m1
if date1.day > date2.day:
months -= 1
elif date1.day == date2.day:
seconds1 = date1.hour * 3600 + date1.minute + date1.second
seconds2 = date2.hour * 3600 + date2.minute + date2.second
if seconds1 > seconds2:
months -= 1
return months
if not show_date:
now = datetime.datetime.utcnow()
if datetime_.tzinfo is not None:
now = now.replace(tzinfo=datetime_.tzinfo)
else:
now = now.replace(tzinfo=pytz.utc)
datetime_ = datetime_.replace(tzinfo=pytz.utc)
date_diff = now - datetime_
days = date_diff.days
if days < 1 and now > datetime_:
# less than one day
seconds = date_diff.seconds
if seconds < 3600:
# less than one hour
if seconds < 60:
return _('Just now')
else:
return ungettext('{mins} minute ago', '{mins} minutes ago',
seconds / 60).format(mins=seconds / 60)
else:
return ungettext('{hours} hour ago', '{hours} hours ago',
seconds / 3600).format(hours=seconds / 3600)
# more than one day
months = months_between(datetime_, now)
if months < 1:
return ungettext('{days} day ago', '{days} days ago',
days).format(days=days)
if months < 13:
return ungettext('{months} month ago', '{months} months ago',
months).format(months=months)
return ungettext('over {years} year ago', 'over {years} years ago',
months / 12).format(years=months / 12)
# actual date
details = {
'min': datetime_.minute,
'hour': datetime_.hour,
'day': datetime_.day,
'year': datetime_.year,
'month': _MONTH_FUNCTIONS[datetime_.month - 1](),
'timezone': datetime_.tzname(),
}
if with_hours:
return (
# NOTE: This is for translating dates like `April 24, 2013, 10:45 (Europe/Zurich)`
_('{month} {day}, {year}, {hour:02}:{min:02} ({timezone})') \
.format(**details))
else:
return (
# NOTE: This is for translating dates like `April 24, 2013`
_('{month} {day}, {year}').format(**details))
def localised_number(number):
''' Returns a localised unicode representation of number '''
return numbers.format_number(number, locale=i18n.get_lang())
def localised_filesize(number):
''' Returns a localised unicode representation of a number in bytes, MiB
etc '''
def rnd(number, divisor):
# round to 1 decimal place
return localised_number(float(number * 10 / divisor) / 10)
if number < 1024:
return _('{bytes} bytes').format(bytes=localised_number(number))
elif number < 1024 ** 2:
return _('{kibibytes} KiB').format(kibibytes=rnd(number, 1024))
elif number < 1024 ** 3:
return _('{mebibytes} MiB').format(mebibytes=rnd(number, 1024 ** 2))
elif number < 1024 ** 4:
return _('{gibibytes} GiB').format(gibibytes=rnd(number, 1024 ** 3))
else:
return _('{tebibytes} TiB').format(tebibytes=rnd(number, 1024 ** 4))
def localised_SI_number(number):
''' Returns a localised unicode representation of a number in SI format
eg 14700 becomes 14.7k '''
def rnd(number, divisor):
# round to 1 decimal place
return localised_number(float(number * 10 / divisor) / 10)
if number < 1000:
return _('{n}').format(n=localised_number(number))
elif number < 1000 ** 2:
return _('{k}k').format(k=rnd(number, 1000))
elif number < 1000 ** 3:
return _('{m}M').format(m=rnd(number, 1000 ** 2))
elif number < 1000 ** 4:
return _('{g}G').format(g=rnd(number, 1000 ** 3))
elif number < 1000 ** 5:
return _('{t}T').format(t=rnd(number, 1000 ** 4))
elif number < 1000 ** 6:
return _('{p}P').format(p=rnd(number, 1000 ** 5))
elif number < 1000 ** 7:
return _('{e}E').format(e=rnd(number, 1000 ** 6))
elif number < 1000 ** 8:
return _('{z}Z').format(z=rnd(number, 1000 ** 7))
else:
return _('{y}Y').format(y=rnd(number, 1000 ** 8))
| 30.563107 | 94 | 0.560038 |
import datetime
import pytz
from babel import numbers
import ckan.lib.i18n as i18n
from ckan.common import _, ungettext
ys)
if months < 13:
return ungettext('{months} month ago', '{months} months ago',
months).format(months=months)
return ungettext('over {years} year ago', 'over {years} years ago',
months / 12).format(years=months / 12)
details = {
'min': datetime_.minute,
'hour': datetime_.hour,
'day': datetime_.day,
'year': datetime_.year,
'month': _MONTH_FUNCTIONS[datetime_.month - 1](),
'timezone': datetime_.tzname(),
}
if with_hours:
return (
_('{month} {day}, {year}, {hour:02}:{min:02} ({timezone})') \
.format(**details))
else:
return (
_('{month} {day}, {year}').format(**details))
def localised_number(number):
return numbers.format_number(number, locale=i18n.get_lang())
def localised_filesize(number):
def rnd(number, divisor):
return localised_number(float(number * 10 / divisor) / 10)
if number < 1024:
return _('{bytes} bytes').format(bytes=localised_number(number))
elif number < 1024 ** 2:
return _('{kibibytes} KiB').format(kibibytes=rnd(number, 1024))
elif number < 1024 ** 3:
return _('{mebibytes} MiB').format(mebibytes=rnd(number, 1024 ** 2))
elif number < 1024 ** 4:
return _('{gibibytes} GiB').format(gibibytes=rnd(number, 1024 ** 3))
else:
return _('{tebibytes} TiB').format(tebibytes=rnd(number, 1024 ** 4))
def localised_SI_number(number):
def rnd(number, divisor):
return localised_number(float(number * 10 / divisor) / 10)
if number < 1000:
return _('{n}').format(n=localised_number(number))
elif number < 1000 ** 2:
return _('{k}k').format(k=rnd(number, 1000))
elif number < 1000 ** 3:
return _('{m}M').format(m=rnd(number, 1000 ** 2))
elif number < 1000 ** 4:
return _('{g}G').format(g=rnd(number, 1000 ** 3))
elif number < 1000 ** 5:
return _('{t}T').format(t=rnd(number, 1000 ** 4))
elif number < 1000 ** 6:
return _('{p}P').format(p=rnd(number, 1000 ** 5))
elif number < 1000 ** 7:
return _('{e}E').format(e=rnd(number, 1000 ** 6))
elif number < 1000 ** 8:
return _('{z}Z').format(z=rnd(number, 1000 ** 7))
else:
return _('{y}Y').format(y=rnd(number, 1000 ** 8))
| true | true |
f731159d8d119b22890a43bc26246da1964a17db | 3,103 | py | Python | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniAmpeTracerSyncModel(object):
def __init__(self):
self._device_id = None
self._product_id = None
self._spm_a = None
self._spm_b = None
self._spm_c = None
self._spm_d = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def product_id(self):
return self._product_id
@product_id.setter
def product_id(self, value):
self._product_id = value
@property
def spm_a(self):
return self._spm_a
@spm_a.setter
def spm_a(self, value):
self._spm_a = value
@property
def spm_b(self):
return self._spm_b
@spm_b.setter
def spm_b(self, value):
self._spm_b = value
@property
def spm_c(self):
return self._spm_c
@spm_c.setter
def spm_c(self, value):
self._spm_c = value
@property
def spm_d(self):
return self._spm_d
@spm_d.setter
def spm_d(self, value):
self._spm_d = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.product_id:
if hasattr(self.product_id, 'to_alipay_dict'):
params['product_id'] = self.product_id.to_alipay_dict()
else:
params['product_id'] = self.product_id
if self.spm_a:
if hasattr(self.spm_a, 'to_alipay_dict'):
params['spm_a'] = self.spm_a.to_alipay_dict()
else:
params['spm_a'] = self.spm_a
if self.spm_b:
if hasattr(self.spm_b, 'to_alipay_dict'):
params['spm_b'] = self.spm_b.to_alipay_dict()
else:
params['spm_b'] = self.spm_b
if self.spm_c:
if hasattr(self.spm_c, 'to_alipay_dict'):
params['spm_c'] = self.spm_c.to_alipay_dict()
else:
params['spm_c'] = self.spm_c
if self.spm_d:
if hasattr(self.spm_d, 'to_alipay_dict'):
params['spm_d'] = self.spm_d.to_alipay_dict()
else:
params['spm_d'] = self.spm_d
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniAmpeTracerSyncModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'product_id' in d:
o.product_id = d['product_id']
if 'spm_a' in d:
o.spm_a = d['spm_a']
if 'spm_b' in d:
o.spm_b = d['spm_b']
if 'spm_c' in d:
o.spm_c = d['spm_c']
if 'spm_d' in d:
o.spm_d = d['spm_d']
return o
| 26.75 | 71 | 0.548179 |
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniAmpeTracerSyncModel(object):
def __init__(self):
self._device_id = None
self._product_id = None
self._spm_a = None
self._spm_b = None
self._spm_c = None
self._spm_d = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def product_id(self):
return self._product_id
@product_id.setter
def product_id(self, value):
self._product_id = value
@property
def spm_a(self):
return self._spm_a
@spm_a.setter
def spm_a(self, value):
self._spm_a = value
@property
def spm_b(self):
return self._spm_b
@spm_b.setter
def spm_b(self, value):
self._spm_b = value
@property
def spm_c(self):
return self._spm_c
@spm_c.setter
def spm_c(self, value):
self._spm_c = value
@property
def spm_d(self):
return self._spm_d
@spm_d.setter
def spm_d(self, value):
self._spm_d = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.product_id:
if hasattr(self.product_id, 'to_alipay_dict'):
params['product_id'] = self.product_id.to_alipay_dict()
else:
params['product_id'] = self.product_id
if self.spm_a:
if hasattr(self.spm_a, 'to_alipay_dict'):
params['spm_a'] = self.spm_a.to_alipay_dict()
else:
params['spm_a'] = self.spm_a
if self.spm_b:
if hasattr(self.spm_b, 'to_alipay_dict'):
params['spm_b'] = self.spm_b.to_alipay_dict()
else:
params['spm_b'] = self.spm_b
if self.spm_c:
if hasattr(self.spm_c, 'to_alipay_dict'):
params['spm_c'] = self.spm_c.to_alipay_dict()
else:
params['spm_c'] = self.spm_c
if self.spm_d:
if hasattr(self.spm_d, 'to_alipay_dict'):
params['spm_d'] = self.spm_d.to_alipay_dict()
else:
params['spm_d'] = self.spm_d
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniAmpeTracerSyncModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'product_id' in d:
o.product_id = d['product_id']
if 'spm_a' in d:
o.spm_a = d['spm_a']
if 'spm_b' in d:
o.spm_b = d['spm_b']
if 'spm_c' in d:
o.spm_c = d['spm_c']
if 'spm_d' in d:
o.spm_d = d['spm_d']
return o
| true | true |
f7311738dee08b938940c7168a1cd11f92e41b95 | 338 | py | Python | cn/opencv/chapter01/01Read.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | null | null | null | cn/opencv/chapter01/01Read.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | null | null | null | cn/opencv/chapter01/01Read.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | 2 | 2019-06-18T05:53:26.000Z | 2019-06-19T03:26:02.000Z | """
read picture
"""
import cv2
def read_picture(path):
"""
读取图片
:return:
"""
img = cv2.imread(path)
cv2.namedWindow("OPEN_CV_READ_IMG")
cv2.imshow("OPEN_CV_READ_IMG", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
path = "../media/lena/lena.jpg"
read_picture(path)
| 15.363636 | 39 | 0.615385 | import cv2
def read_picture(path):
img = cv2.imread(path)
cv2.namedWindow("OPEN_CV_READ_IMG")
cv2.imshow("OPEN_CV_READ_IMG", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
path = "../media/lena/lena.jpg"
read_picture(path)
| true | true |
f73117825794ffc13c74bb610938ec6e7e2bbad0 | 1,828 | py | Python | pymenu/elements.py | feftio/pymenu | 846b916ce55548c53f43f3642d69e6c64fd9d774 | [
"MIT"
] | null | null | null | pymenu/elements.py | feftio/pymenu | 846b916ce55548c53f43f3642d69e6c64fd9d774 | [
"MIT"
] | null | null | null | pymenu/elements.py | feftio/pymenu | 846b916ce55548c53f43f3642d69e6c64fd9d774 | [
"MIT"
] | null | null | null | from __future__ import annotations
import typing as t
from abc import ABC, abstractmethod
from pymenu.listener import GroupListener, Listener, ListenerInterface
from pymenu.triggers import Trigger
from rich import print
class ElementInterface(ListenerInterface, ABC):
@abstractmethod
def render(self) -> None:
pass
class Element(ElementInterface, Listener):
pass
class GroupElement(ElementInterface, GroupListener):
def __init__(self, childs: t.Tuple[ElementInterface]):
self.childs = childs
class Group(GroupElement):
def __init__(self, *childs: t.Tuple[ElementInterface]):
super().__init__(childs)
def render(self) -> None:
for element in self.childs:
element.render()
class Item(Element):
# TODO: make "label" optional.
def __init__(self, label: t.Optional[str] = None, action: t.Optional[t.Callable] = None, triggers: t.Optional[Trigger] = None):
self.label: t.Optional[str] = label
self.listener(action, triggers)
def render(self) -> None:
if self.label is None:
return
print(self.label)
class Hidden(Element):
def __init__(self, action: t.Optional[t.Callable] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
self.listener(action, triggers)
def render(self) -> None:
pass
class Redirect(Item):
def __init__(self, to: str, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
self.to = to
def _action(self) -> None:
pass
class Back(Item):
def __init__(self, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
def _action(self) -> None:
pass
| 26.882353 | 131 | 0.669037 | from __future__ import annotations
import typing as t
from abc import ABC, abstractmethod
from pymenu.listener import GroupListener, Listener, ListenerInterface
from pymenu.triggers import Trigger
from rich import print
class ElementInterface(ListenerInterface, ABC):
@abstractmethod
def render(self) -> None:
pass
class Element(ElementInterface, Listener):
pass
class GroupElement(ElementInterface, GroupListener):
def __init__(self, childs: t.Tuple[ElementInterface]):
self.childs = childs
class Group(GroupElement):
def __init__(self, *childs: t.Tuple[ElementInterface]):
super().__init__(childs)
def render(self) -> None:
for element in self.childs:
element.render()
class Item(Element):
def __init__(self, label: t.Optional[str] = None, action: t.Optional[t.Callable] = None, triggers: t.Optional[Trigger] = None):
self.label: t.Optional[str] = label
self.listener(action, triggers)
def render(self) -> None:
if self.label is None:
return
print(self.label)
class Hidden(Element):
def __init__(self, action: t.Optional[t.Callable] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
self.listener(action, triggers)
def render(self) -> None:
pass
class Redirect(Item):
def __init__(self, to: str, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
self.to = to
def _action(self) -> None:
pass
class Back(Item):
def __init__(self, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
def _action(self) -> None:
pass
| true | true |
f7311956fa5e3a221660dc56d6dcaaef28e32f55 | 1,662 | py | Python | tests/test_calculator.py | amard33p/minimal-pytest-project | 45844a70d8c8a6499a038a6eb99bf7b7b78ccdd8 | [
"Apache-2.0"
] | 2 | 2020-12-08T14:35:19.000Z | 2022-01-18T21:35:14.000Z | tests/test_calculator.py | amard33p/minimal-pytest-project | 45844a70d8c8a6499a038a6eb99bf7b7b78ccdd8 | [
"Apache-2.0"
] | null | null | null | tests/test_calculator.py | amard33p/minimal-pytest-project | 45844a70d8c8a6499a038a6eb99bf7b7b78ccdd8 | [
"Apache-2.0"
] | 1 | 2021-09-13T12:36:32.000Z | 2021-09-13T12:36:32.000Z | import pytest
import mymath.calculator
from mymath.calculator import add, div, filesum, fileconcat, approx_eq
# Simple tests
# ----------------------------------------------------
def test_add():
assert add(1, 2) == 3
def test_div():
assert div(4, 2) == 2
assert div(0, 2) == 0
# Catching exceptions
# ------------------------------------------------------------------------------
def test_div_by_zero():
with pytest.raises(ValueError) as ex:
div(1, 0)
assert str(ex.value) == 'Cannot divide by zero!'
# Tests organized in class
# ------------------------------------------------------------------------------
class TestCalculator:
def test_add(self):
assert add(1, 2) == 3
def test_add_zero(self):
assert add(0, 0) == 0
assert add(1, 0) == 1
assert add(0, 2) == 2
def test_div(self):
assert div(4, 2) == 2
# Fixtures
# ------------------------------------------------------------------------------
@pytest.fixture(scope="function")
def numbers_file():
f = open("tests/data/numbers.txt")
def fin():
f.close()
return f
def test_filesum(numbers_file):
assert filesum(numbers_file) == 6
def test_fileconcat(numbers_file):
assert fileconcat(numbers_file) == 123
# Monkey patching, Mocking
# ------------------------------------------------------------------------------
def test_approx_eq(monkeypatch):
def mock_eps(machine):
return 2
assert approx_eq(1, 1)
#monkeypatch.setattr(mymath.calculator, 'eps', mock_eps)
monkeypatch.setattr('mymath.calculator.eps', mock_eps)
assert approx_eq(1, 2)
| 23.742857 | 80 | 0.493983 | import pytest
import mymath.calculator
from mymath.calculator import add, div, filesum, fileconcat, approx_eq
def test_add():
assert add(1, 2) == 3
def test_div():
assert div(4, 2) == 2
assert div(0, 2) == 0
def test_div_by_zero():
with pytest.raises(ValueError) as ex:
div(1, 0)
assert str(ex.value) == 'Cannot divide by zero!'
class TestCalculator:
def test_add(self):
assert add(1, 2) == 3
def test_add_zero(self):
assert add(0, 0) == 0
assert add(1, 0) == 1
assert add(0, 2) == 2
def test_div(self):
assert div(4, 2) == 2
@pytest.fixture(scope="function")
def numbers_file():
f = open("tests/data/numbers.txt")
def fin():
f.close()
return f
def test_filesum(numbers_file):
assert filesum(numbers_file) == 6
def test_fileconcat(numbers_file):
assert fileconcat(numbers_file) == 123
def test_approx_eq(monkeypatch):
def mock_eps(machine):
return 2
assert approx_eq(1, 1)
monkeypatch.setattr('mymath.calculator.eps', mock_eps)
assert approx_eq(1, 2)
| true | true |
f731198351b74c2e46960cec248a5589d6459d40 | 18,893 | py | Python | redis_in_action/redis_action_ch05.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 1 | 2018-12-19T22:07:56.000Z | 2018-12-19T22:07:56.000Z | redis_in_action/redis_action_ch05.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 12 | 2020-03-14T05:32:26.000Z | 2022-03-12T00:08:49.000Z | redis_in_action/redis_action_ch05.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 1 | 2018-12-19T22:08:00.000Z | 2018-12-19T22:08:00.000Z | """
@author: magician
@file: redis_action_ch05.py
@date: 2021/11/22
"""
import bisect
import contextlib
import csv
import functools
import json
import logging
import random
import threading
import time
import unittest
import uuid
import redis
from datetime import datetime
QUIT = False
SAMPLE_COUNT = 100
config_connection = None
SEVERITY = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'waring',
logging.ERROR: 'error',
logging.CRITICAL: 'critical',
}
SEVERITY.update((name, name) for name in list(SEVERITY.values()))
PRECISION = [1, 5, 60, 300, 3600, 18000, 86400]
LAST_CHECKED = None
IS_UNDER_MAINTENANCE = False
CONFIGS = {}
CHECKED = {}
REDIS_CONNECTIONS = {}
def to_bytes(x):
"""
to_bytes
@param x:
@return:
"""
return x.encode() if isinstance(x, str) else x
def to_str(x):
"""
to_str
@param x:
@return:
"""
return x.decode() if isinstance(x, bytes) else x
def log_recent(conn, name, message, severity=logging.INFO, pipe=None):
"""
log_recent
@param conn:
@param name:
@param message:
@param severity:
@param pipe:
@return:
"""
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'recent:%s:%s' % (name, severity)
message = time.asctime() + ' ' + message
pipe = pipe or conn.pipeline()
pipe.lpush(destination, message)
pipe.ltrim(destination, 0, 99)
pipe.execute()
def log_common(conn, name, message, severity=logging.INFO, timeout=5):
"""
log_common
@param conn:
@param name:
@param message:
@param severity:
@param timeout:
@return:
"""
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'common:%s:%s' % (name, severity)
start_key = destination + ':start'
pipe = conn.pipeline()
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if existing and existing < to_bytes(hour_start):
pipe.rename(destination, destination + ':last')
pipe.rename(destination, destination + ':pstart')
pipe.set(start_key, hour_start)
elif not existing:
pipe.set(start_key, hour_start)
pipe.zincrby(destination, 1, message)
log_recent(pipe, name, message, severity, pipe)
return
except redis.exceptions.WatchError:
continue
def update_counter(conn, name, count=1, now=None):
"""
update_counter
@param conn:
@param name:
@param count:
@param now:
@return:
"""
now = now or time.time()
pipe = conn.pipeline()
for prec in PRECISION:
pnow = int(now / prec) * prec
hash = '%s:%s' % (prec, name)
pipe.zadd('known:', {hash: 0})
pipe.hincrby('count: ' + hash, pnow, count)
pipe.execute()
def get_counter(conn, name, precision):
"""
get_counter
@param conn:
@param name:
@param precision:
@return:
"""
hash = "%s:%s" % (precision, name)
data = conn.hgetall('count:' + hash)
to_return = []
for key, value in data.items():
to_return.append((int(key), int(value)))
to_return.sort()
return to_return
def clean_counters(conn):
"""
clean_counters
@param conn:
@return:
"""
pipe = conn.pipeline(True)
passes = 0
while not QUIT:
start = time.time()
index = 0
while index < conn.zcard('known:'):
hash = conn.zcard('known:', index, index)
index += 1
if not hash:
break
hash = hash[0]
prec = int(hash.partition(b':')[0])
bprec = int(prec // 60) or 1
if passes % bprec:
continue
hkey = 'count:' + to_str(hash)
cutoff = time.time() - SAMPLE_COUNT * prec
samples = list(map(int, conn.hkeys(hkey)))
samples.sort()
remove = bisect.bisect_right(samples, cutoff)
if remove:
conn.hdel(hkey, *samples[:remove])
if remove == len(samples):
try:
pipe.watch(hkey)
if not pipe.hlen(hkey):
pipe.multi()
pipe.zrem('known:', hash)
pipe.execute()
index -= 1
else:
pipe.unwatch()
except redis.exceptions.WatchError:
pass
passes += 1
duration = min(int(time.time() - start) + 1, 60)
time.sleep(max(60 - duration, 1))
def update_stats(conn, context, type, value, timeout=5):
"""
update_stats
@param conn:
@param context:
@param type:
@param value:
@param timeout:
@return:
"""
destination = 'stats:%s:%s' % (context, type)
start_key = destination + ':start'
pipe = conn.pipeline(True)
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if not existing:
pipe.set(start_key, hour_start)
elif to_str(existing) < hour_start:
pipe.rename(destination, destination + ':last')
pipe.rename(start_key, destination + ':pstart')
pipe.set(start_key, hour_start)
tkey1 = str(uuid.uuid4())
tkey2 = str(uuid.uuid4())
pipe.zadd(tkey1, {'min': value})
pipe.zadd(tkey1, {'max': value})
pipe.zunionstore(destination, [destination, tkey1], aggregate='min')
pipe.zunionstore(destination, [destination, tkey2], aggregate='max')
pipe.delete(tkey1, tkey2)
pipe.zincrby(destination, 1, 'count')
pipe.zincrby(destination, value, 'sum')
pipe.zincrby(destination, value * value, 'sumsq')
return pipe.execute()[-3:]
except redis.exceptions.WatchError:
continue
def get_stats(conn, context, type):
"""
get_stats
@param conn:
@param context:
@param type:
@return:
"""
key = 'stats:%s:%s' % (context, type)
data = dict(conn.zrange(key, 0, -1, withscores=True))
data[b'average'] = data[b'sum'] / data[b'count']
numerator = data[b'sumsq'] - data[b'sum'] ** 2 / data[b'count']
data[b'stddev'] = (numerator / (data[b'count'] - 1 or 1)) ** 0.5
return data
@contextlib.contextmanager
def access_time(conn, context):
"""
access_time
@param conn:
@param context:
@return:
"""
start = time.time()
yield
delta = time.time() - start
stats = update_stats(conn, context, 'AccessTime', delta)
average = stats[1] / stats[0]
pipe = conn.pipeline(True)
pipe.zadd('slowest:AccessTime', {context: average})
pipe.zremrangebyrank('slowest:AccessTime', 0, -101)
pipe.execute()
def process_view(conn, callback):
"""
process_view
@param conn:
@param callback:
@return:
"""
with access_time(conn, request.path):
return callback()
def ip_to_score(ip_address):
"""
ip_to_score
@param ip_address:
@return:
"""
score = 0
for v in ip_address.split('.'):
score = score * 256 + int(v, 10)
return score
def import_ips_to_redis(conn, filename):
"""
import_ips_to_redis
@param conn:
@param filename:
@return:
"""
csv_file = csv.reader(open(filename, 'rb'))
for count, row in enumerate(csv_file):
start_ip = row[0] if row else ''
if 'i' in start_ip.lower():
continue
if '.' in start_ip:
start_ip = ip_to_score(start_ip)
elif start_ip.isdigit():
start_ip = int(start_ip, 10)
else:
continue
city_id = row[2] + '_' + str(count)
conn.zadd('ip2cityid:', {city_id: start_ip})
def import_cities_to_redis(conn, filename):
"""
import_cities_to_redis
@param conn:
@param filename:
@return:
"""
for row in csv.reader(open(filename, 'rb')):
if len(row) < 4 or row[0].isdigit():
continue
row = [i.decode('latin-1') for i in row]
city_id = row[0]
country = row[1]
region = row[2]
city = row[3]
conn.hset('cityid2city:', city_id, json.dumps([city, country, region]))
def find_city_by_ip(conn, ip_address):
"""
find_city_by_ip
@param conn:
@param ip_address:
@return:
"""
if isinstance(ip_address, str):
ip_address = ip_to_score(ip_address)
city_id = conn.zrevrangebyscore('ip2cityid:', ip_address, 0, start=0, num=1)
if not city_id:
return None
city_id = city_id[0].partition('_')[0]
return json.loads(conn.hget('cityid2city:', city_id))
def is_under_maintenance(conn):
"""
is_under_maintenance
@param conn:
@return:
"""
global LAST_CHECKED, IS_UNDER_MAINTENANCE
if (not LAST_CHECKED) or LAST_CHECKED < time.time() - 1:
LAST_CHECKED = time.time()
IS_UNDER_MAINTENANCE = bool(conn.get('is-under-maintenance'))
return IS_UNDER_MAINTENANCE
def set_config(conn, type, component, config):
"""
set_config
@param conn:
@param type:
@param component:
@param config:
@return:
"""
conn.set('config:%s:%s' % (type, component), json.dumps(config))
def get_config(conn, type, component, wait=1):
"""
get_config
@param conn:
@param type:
@param component:
@param wait:
@return:
"""
key = 'config:%s:%s' % (type, component)
ch = CHECKED.get(key)
if (not ch) or ch < time.time() - wait:
CHECKED[key] = time.time()
config = json.loads(conn.get(key) or '{}')
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key)
if config != old_config:
CONFIGS[key] = config
return CONFIGS.get(key)
def redis_connection(component, wait=1):
"""
redis_connection
@param component:
@param wait:
@return:
"""
key = 'config:redis:' + component
def wrapper(function):
@functools.wraps(function)
def call(*args, **kwargs):
old_config = CONFIGS.get(key, object())
config = get_config(config_connection, 'redis', component, wait)
if config != old_config:
REDIS_CONNECTIONS[key] = redis.Redis(**config)
return function(REDIS_CONNECTIONS.get(key), *args, **kwargs)
return call
return wrapper
# --------------- Below this line are helpers to test the code ----------------
class request:
pass
# # a faster version with pipelines for actual testing
# def import_ips_to_redis(conn, filename):
# csv_file = csv.reader(open(filename, 'rb'))
# pipe = conn.pipeline(False)
# for count, row in enumerate(csv_file):
# start_ip = row[0] if row else ''
# if 'i' in start_ip.lower():
# continue
# if '.' in start_ip:
# start_ip = ip_to_score(start_ip)
# elif start_ip.isdigit():
# start_ip = int(start_ip, 10)
# else:
# continue
#
# city_id = row[2] + '_' + str(count)
# pipe.zadd('ip2cityid:', {city_id: start_ip})
# if not (count + 1) % 1000:
# pipe.execute()
# pipe.execute()
#
#
# def import_cities_to_redis(conn, filename):
# pipe = conn.pipeline(False)
# for count, row in enumerate(csv.reader(open(filename, 'rb'))):
# if len(row) < 4 or not row[0].isdigit():
# continue
# row = [i.decode('latin-1') for i in row]
# city_id = row[0]
# country = row[1]
# region = row[2]
# city = row[3]
# pipe.hset('cityid2city:', city_id,
# json.dumps([city, region, country]))
# if not (count + 1) % 1000:
# pipe.execute()
# pipe.execute()
class TestCh05(unittest.TestCase):
def setUp(self):
global config_connection
import redis
self.conn = config_connection = redis.Redis(db=15, password='123456')
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
del self.conn
global config_connection, QUIT, SAMPLE_COUNT
config_connection = None
QUIT = False
SAMPLE_COUNT = 100
print()
print()
def test_log_recent(self):
import pprint
conn = self.conn
print("Let's write a few logs to the recent log")
for msg in range(5):
log_recent(conn, 'test', 'this is message %s' % msg)
recent = conn.lrange('recent:test:info', 0, -1)
print("The current recent message log has this many messages:", len(recent))
print("Those messages include:")
pprint.pprint(recent[:10])
self.assertTrue(len(recent) >= 5)
def test_log_common(self):
import pprint
conn = self.conn
print("Let's write some items to the common log")
for count in range(1, 6):
for i in range(count):
log_common(conn, 'test', "message-%s" % count)
common = conn.zrevrange('common:test:info', 0, -1, withscores=True)
print("The current number of common messages is:", len(common))
print("Those common messages are:")
pprint.pprint(common)
self.assertTrue(len(common) >= 5)
def test_counters(self):
import pprint
global QUIT, SAMPLE_COUNT
conn = self.conn
print("Let's update some counters for now and a little in the future")
now = time.time()
for delta in range(10):
update_counter(conn, 'test', count=random.randrange(1, 5), now=now + delta)
counter = get_counter(conn, 'test', 1)
print("We have some per-second counters:", len(counter))
self.assertTrue(len(counter) >= 10)
counter = get_counter(conn, 'test', 5)
print("We have some per-5-second counters:", len(counter))
print("These counters include:")
pprint.pprint(counter[:10])
self.assertTrue(len(counter) >= 2)
print()
tt = time.time
def new_tt():
return tt() + 2 * 86400
time.time = new_tt
print("Let's clean out some counters by setting our sample count to 0")
SAMPLE_COUNT = 0
t = threading.Thread(target=clean_counters, args=(conn,))
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
t.start()
time.sleep(1)
QUIT = True
time.time = tt
counter = get_counter(conn, 'test', 86400)
print("Did we clean out all of the counters?", not counter)
self.assertFalse(counter)
def test_stats(self):
import pprint
conn = self.conn
print("Let's add some data for our statistics!")
for i in range(5):
r = update_stats(conn, 'temp', 'example', random.randrange(5, 15))
print("We have some aggregate statistics:", r)
rr = get_stats(conn, 'temp', 'example')
print("Which we can also fetch manually:")
pprint.pprint(rr)
self.assertTrue(rr[b'count'] >= 5)
def test_access_time(self):
import pprint
conn = self.conn
print("Let's calculate some access times...")
for i in range(10):
with access_time(conn, "req-%s" % i):
time.sleep(.5 + random.random())
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
print()
def cb():
time.sleep(1 + random.random())
print("Let's use the callback version...")
for i in range(5):
request.path = 'cbreq-%s' % i
process_view(conn, cb)
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
def test_ip_lookup(self):
conn = self.conn
try:
open('GeoLiteCity-Blocks.csv', 'rb')
open('GeoLiteCity-Location.csv', 'rb')
except:
print("********")
print("You do not have the GeoLiteCity database available, aborting test")
print("Please have the following two files in the current path:")
print("GeoLiteCity-Blocks.csv")
print("GeoLiteCity-Location.csv")
print("********")
return
print("Importing IP addresses to Redis... (this may take a while)")
import_ips_to_redis(conn, 'GeoLiteCity-Blocks.csv')
ranges = conn.zcard('ip2cityid:')
print("Loaded ranges into Redis:", ranges)
self.assertTrue(ranges > 1000)
print()
print("Importing Location lookups to Redis... (this may take a while)")
import_cities_to_redis(conn, 'GeoLiteCity-Location.csv')
cities = conn.hlen('cityid2city:')
print("Loaded city lookups into Redis:", cities)
self.assertTrue(cities > 1000)
print()
print("Let's lookup some locations!")
rr = random.randrange
for i in range(5):
print(find_city_by_ip(conn, '%s.%s.%s.%s' % (rr(1, 255), rr(256), rr(256), rr(256))))
def test_is_under_maintenance(self):
print("Are we under maintenance (we shouldn't be)?", is_under_maintenance(self.conn))
self.conn.set('is-under-maintenance', 'yes')
print("We cached this, so it should be the same:", is_under_maintenance(self.conn))
time.sleep(1)
print("But after a sleep, it should change:", is_under_maintenance(self.conn))
print("Cleaning up...")
self.conn.delete('is-under-maintenance')
time.sleep(1)
print("Should be False again:", is_under_maintenance(self.conn))
def test_config(self):
print("Let's set a config and then get a connection from that config...")
set_config(self.conn, 'redis', 'test', {'db': 15})
@redis_connection('test')
def test(conn2):
return bool(conn2.info())
print("We can run commands from the configured connection:", test())
if __name__ == '__main__':
unittest.main()
| 27.743025 | 97 | 0.568094 | import bisect
import contextlib
import csv
import functools
import json
import logging
import random
import threading
import time
import unittest
import uuid
import redis
from datetime import datetime
QUIT = False
SAMPLE_COUNT = 100
config_connection = None
SEVERITY = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'waring',
logging.ERROR: 'error',
logging.CRITICAL: 'critical',
}
SEVERITY.update((name, name) for name in list(SEVERITY.values()))
PRECISION = [1, 5, 60, 300, 3600, 18000, 86400]
LAST_CHECKED = None
IS_UNDER_MAINTENANCE = False
CONFIGS = {}
CHECKED = {}
REDIS_CONNECTIONS = {}
def to_bytes(x):
return x.encode() if isinstance(x, str) else x
def to_str(x):
return x.decode() if isinstance(x, bytes) else x
def log_recent(conn, name, message, severity=logging.INFO, pipe=None):
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'recent:%s:%s' % (name, severity)
message = time.asctime() + ' ' + message
pipe = pipe or conn.pipeline()
pipe.lpush(destination, message)
pipe.ltrim(destination, 0, 99)
pipe.execute()
def log_common(conn, name, message, severity=logging.INFO, timeout=5):
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'common:%s:%s' % (name, severity)
start_key = destination + ':start'
pipe = conn.pipeline()
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if existing and existing < to_bytes(hour_start):
pipe.rename(destination, destination + ':last')
pipe.rename(destination, destination + ':pstart')
pipe.set(start_key, hour_start)
elif not existing:
pipe.set(start_key, hour_start)
pipe.zincrby(destination, 1, message)
log_recent(pipe, name, message, severity, pipe)
return
except redis.exceptions.WatchError:
continue
def update_counter(conn, name, count=1, now=None):
now = now or time.time()
pipe = conn.pipeline()
for prec in PRECISION:
pnow = int(now / prec) * prec
hash = '%s:%s' % (prec, name)
pipe.zadd('known:', {hash: 0})
pipe.hincrby('count: ' + hash, pnow, count)
pipe.execute()
def get_counter(conn, name, precision):
hash = "%s:%s" % (precision, name)
data = conn.hgetall('count:' + hash)
to_return = []
for key, value in data.items():
to_return.append((int(key), int(value)))
to_return.sort()
return to_return
def clean_counters(conn):
pipe = conn.pipeline(True)
passes = 0
while not QUIT:
start = time.time()
index = 0
while index < conn.zcard('known:'):
hash = conn.zcard('known:', index, index)
index += 1
if not hash:
break
hash = hash[0]
prec = int(hash.partition(b':')[0])
bprec = int(prec // 60) or 1
if passes % bprec:
continue
hkey = 'count:' + to_str(hash)
cutoff = time.time() - SAMPLE_COUNT * prec
samples = list(map(int, conn.hkeys(hkey)))
samples.sort()
remove = bisect.bisect_right(samples, cutoff)
if remove:
conn.hdel(hkey, *samples[:remove])
if remove == len(samples):
try:
pipe.watch(hkey)
if not pipe.hlen(hkey):
pipe.multi()
pipe.zrem('known:', hash)
pipe.execute()
index -= 1
else:
pipe.unwatch()
except redis.exceptions.WatchError:
pass
passes += 1
duration = min(int(time.time() - start) + 1, 60)
time.sleep(max(60 - duration, 1))
def update_stats(conn, context, type, value, timeout=5):
destination = 'stats:%s:%s' % (context, type)
start_key = destination + ':start'
pipe = conn.pipeline(True)
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if not existing:
pipe.set(start_key, hour_start)
elif to_str(existing) < hour_start:
pipe.rename(destination, destination + ':last')
pipe.rename(start_key, destination + ':pstart')
pipe.set(start_key, hour_start)
tkey1 = str(uuid.uuid4())
tkey2 = str(uuid.uuid4())
pipe.zadd(tkey1, {'min': value})
pipe.zadd(tkey1, {'max': value})
pipe.zunionstore(destination, [destination, tkey1], aggregate='min')
pipe.zunionstore(destination, [destination, tkey2], aggregate='max')
pipe.delete(tkey1, tkey2)
pipe.zincrby(destination, 1, 'count')
pipe.zincrby(destination, value, 'sum')
pipe.zincrby(destination, value * value, 'sumsq')
return pipe.execute()[-3:]
except redis.exceptions.WatchError:
continue
def get_stats(conn, context, type):
key = 'stats:%s:%s' % (context, type)
data = dict(conn.zrange(key, 0, -1, withscores=True))
data[b'average'] = data[b'sum'] / data[b'count']
numerator = data[b'sumsq'] - data[b'sum'] ** 2 / data[b'count']
data[b'stddev'] = (numerator / (data[b'count'] - 1 or 1)) ** 0.5
return data
@contextlib.contextmanager
def access_time(conn, context):
start = time.time()
yield
delta = time.time() - start
stats = update_stats(conn, context, 'AccessTime', delta)
average = stats[1] / stats[0]
pipe = conn.pipeline(True)
pipe.zadd('slowest:AccessTime', {context: average})
pipe.zremrangebyrank('slowest:AccessTime', 0, -101)
pipe.execute()
def process_view(conn, callback):
with access_time(conn, request.path):
return callback()
def ip_to_score(ip_address):
score = 0
for v in ip_address.split('.'):
score = score * 256 + int(v, 10)
return score
def import_ips_to_redis(conn, filename):
csv_file = csv.reader(open(filename, 'rb'))
for count, row in enumerate(csv_file):
start_ip = row[0] if row else ''
if 'i' in start_ip.lower():
continue
if '.' in start_ip:
start_ip = ip_to_score(start_ip)
elif start_ip.isdigit():
start_ip = int(start_ip, 10)
else:
continue
city_id = row[2] + '_' + str(count)
conn.zadd('ip2cityid:', {city_id: start_ip})
def import_cities_to_redis(conn, filename):
for row in csv.reader(open(filename, 'rb')):
if len(row) < 4 or row[0].isdigit():
continue
row = [i.decode('latin-1') for i in row]
city_id = row[0]
country = row[1]
region = row[2]
city = row[3]
conn.hset('cityid2city:', city_id, json.dumps([city, country, region]))
def find_city_by_ip(conn, ip_address):
if isinstance(ip_address, str):
ip_address = ip_to_score(ip_address)
city_id = conn.zrevrangebyscore('ip2cityid:', ip_address, 0, start=0, num=1)
if not city_id:
return None
city_id = city_id[0].partition('_')[0]
return json.loads(conn.hget('cityid2city:', city_id))
def is_under_maintenance(conn):
global LAST_CHECKED, IS_UNDER_MAINTENANCE
if (not LAST_CHECKED) or LAST_CHECKED < time.time() - 1:
LAST_CHECKED = time.time()
IS_UNDER_MAINTENANCE = bool(conn.get('is-under-maintenance'))
return IS_UNDER_MAINTENANCE
def set_config(conn, type, component, config):
conn.set('config:%s:%s' % (type, component), json.dumps(config))
def get_config(conn, type, component, wait=1):
key = 'config:%s:%s' % (type, component)
ch = CHECKED.get(key)
if (not ch) or ch < time.time() - wait:
CHECKED[key] = time.time()
config = json.loads(conn.get(key) or '{}')
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key)
if config != old_config:
CONFIGS[key] = config
return CONFIGS.get(key)
def redis_connection(component, wait=1):
key = 'config:redis:' + component
def wrapper(function):
@functools.wraps(function)
def call(*args, **kwargs):
old_config = CONFIGS.get(key, object())
config = get_config(config_connection, 'redis', component, wait)
if config != old_config:
REDIS_CONNECTIONS[key] = redis.Redis(**config)
return function(REDIS_CONNECTIONS.get(key), *args, **kwargs)
return call
return wrapper
class request:
pass
5(unittest.TestCase):
def setUp(self):
global config_connection
import redis
self.conn = config_connection = redis.Redis(db=15, password='123456')
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
del self.conn
global config_connection, QUIT, SAMPLE_COUNT
config_connection = None
QUIT = False
SAMPLE_COUNT = 100
print()
print()
def test_log_recent(self):
import pprint
conn = self.conn
print("Let's write a few logs to the recent log")
for msg in range(5):
log_recent(conn, 'test', 'this is message %s' % msg)
recent = conn.lrange('recent:test:info', 0, -1)
print("The current recent message log has this many messages:", len(recent))
print("Those messages include:")
pprint.pprint(recent[:10])
self.assertTrue(len(recent) >= 5)
def test_log_common(self):
import pprint
conn = self.conn
print("Let's write some items to the common log")
for count in range(1, 6):
for i in range(count):
log_common(conn, 'test', "message-%s" % count)
common = conn.zrevrange('common:test:info', 0, -1, withscores=True)
print("The current number of common messages is:", len(common))
print("Those common messages are:")
pprint.pprint(common)
self.assertTrue(len(common) >= 5)
def test_counters(self):
import pprint
global QUIT, SAMPLE_COUNT
conn = self.conn
print("Let's update some counters for now and a little in the future")
now = time.time()
for delta in range(10):
update_counter(conn, 'test', count=random.randrange(1, 5), now=now + delta)
counter = get_counter(conn, 'test', 1)
print("We have some per-second counters:", len(counter))
self.assertTrue(len(counter) >= 10)
counter = get_counter(conn, 'test', 5)
print("We have some per-5-second counters:", len(counter))
print("These counters include:")
pprint.pprint(counter[:10])
self.assertTrue(len(counter) >= 2)
print()
tt = time.time
def new_tt():
return tt() + 2 * 86400
time.time = new_tt
print("Let's clean out some counters by setting our sample count to 0")
SAMPLE_COUNT = 0
t = threading.Thread(target=clean_counters, args=(conn,))
t.setDaemon(1)
t.start()
time.sleep(1)
QUIT = True
time.time = tt
counter = get_counter(conn, 'test', 86400)
print("Did we clean out all of the counters?", not counter)
self.assertFalse(counter)
def test_stats(self):
import pprint
conn = self.conn
print("Let's add some data for our statistics!")
for i in range(5):
r = update_stats(conn, 'temp', 'example', random.randrange(5, 15))
print("We have some aggregate statistics:", r)
rr = get_stats(conn, 'temp', 'example')
print("Which we can also fetch manually:")
pprint.pprint(rr)
self.assertTrue(rr[b'count'] >= 5)
def test_access_time(self):
import pprint
conn = self.conn
print("Let's calculate some access times...")
for i in range(10):
with access_time(conn, "req-%s" % i):
time.sleep(.5 + random.random())
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
print()
def cb():
time.sleep(1 + random.random())
print("Let's use the callback version...")
for i in range(5):
request.path = 'cbreq-%s' % i
process_view(conn, cb)
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
def test_ip_lookup(self):
conn = self.conn
try:
open('GeoLiteCity-Blocks.csv', 'rb')
open('GeoLiteCity-Location.csv', 'rb')
except:
print("********")
print("You do not have the GeoLiteCity database available, aborting test")
print("Please have the following two files in the current path:")
print("GeoLiteCity-Blocks.csv")
print("GeoLiteCity-Location.csv")
print("********")
return
print("Importing IP addresses to Redis... (this may take a while)")
import_ips_to_redis(conn, 'GeoLiteCity-Blocks.csv')
ranges = conn.zcard('ip2cityid:')
print("Loaded ranges into Redis:", ranges)
self.assertTrue(ranges > 1000)
print()
print("Importing Location lookups to Redis... (this may take a while)")
import_cities_to_redis(conn, 'GeoLiteCity-Location.csv')
cities = conn.hlen('cityid2city:')
print("Loaded city lookups into Redis:", cities)
self.assertTrue(cities > 1000)
print()
print("Let's lookup some locations!")
rr = random.randrange
for i in range(5):
print(find_city_by_ip(conn, '%s.%s.%s.%s' % (rr(1, 255), rr(256), rr(256), rr(256))))
def test_is_under_maintenance(self):
print("Are we under maintenance (we shouldn't be)?", is_under_maintenance(self.conn))
self.conn.set('is-under-maintenance', 'yes')
print("We cached this, so it should be the same:", is_under_maintenance(self.conn))
time.sleep(1)
print("But after a sleep, it should change:", is_under_maintenance(self.conn))
print("Cleaning up...")
self.conn.delete('is-under-maintenance')
time.sleep(1)
print("Should be False again:", is_under_maintenance(self.conn))
def test_config(self):
print("Let's set a config and then get a connection from that config...")
set_config(self.conn, 'redis', 'test', {'db': 15})
@redis_connection('test')
def test(conn2):
return bool(conn2.info())
print("We can run commands from the configured connection:", test())
if __name__ == '__main__':
unittest.main()
| true | true |
f7311bee17a55636231d33e9456da8fd2182c8c0 | 4,311 | py | Python | benchmark/startQiskit1590.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit1590.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit1590.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=50
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=44
prog.cz(input_qubit[3],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=46
prog.cx(input_qubit[3],input_qubit[0]) # number=47
prog.z(input_qubit[3]) # number=48
prog.cx(input_qubit[3],input_qubit[0]) # number=49
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1590.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.413534 | 82 | 0.617954 |
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[4])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.z(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.rx(0.11938052083641225,input_qubit[1])
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0])
prog.rx(1.4765485471872026,input_qubit[2])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[4])
prog.x(input_qubit[1])
prog.x(input_qubit[2])
prog.rx(0.45238934211692994,input_qubit[3])
prog.y(input_qubit[1])
prog.rx(-2.5258404934861938,input_qubit[1])
prog.h(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0])
prog.rx(-0.0722566310325653,input_qubit[4])
prog.x(input_qubit[1])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.h(input_qubit[4])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1590.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f7311d85c66f7c08923fda4bf9edb27d6e1b385e | 305 | py | Python | data/multilingual/Latn.VMW/Serif_12/pdf_to_json_test_Latn.VMW_Serif_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.VMW/Serif_12/pdf_to_json_test_Latn.VMW_Serif_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.VMW/Serif_12/pdf_to_json_test_Latn.VMW_Serif_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.VMW/Serif_12/udhr_Latn.VMW_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5 | 75 | 0.813115 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.VMW/Serif_12/udhr_Latn.VMW_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| true | true |
f7311e1a59e2934d6f9c806d49f878f1efa58c9f | 475 | py | Python | bootcamp/articles/migrations/0008_auto_20180321_1336.py | ngaurav/j | 99dc01f153155b287f419b7af357e1f7d694466d | [
"MIT"
] | null | null | null | bootcamp/articles/migrations/0008_auto_20180321_1336.py | ngaurav/j | 99dc01f153155b287f419b7af357e1f7d694466d | [
"MIT"
] | null | null | null | bootcamp/articles/migrations/0008_auto_20180321_1336.py | ngaurav/j | 99dc01f153155b287f419b7af357e1f7d694466d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-21 13:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20180113_2139'),
]
operations = [
migrations.AlterField(
model_name='article',
name='slug',
field=models.SlugField(blank=True, max_length=80, null=True),
),
]
| 22.619048 | 73 | 0.623158 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20180113_2139'),
]
operations = [
migrations.AlterField(
model_name='article',
name='slug',
field=models.SlugField(blank=True, max_length=80, null=True),
),
]
| true | true |
f7311e2ccb16402f340b2a257b71cbb087285749 | 7,221 | py | Python | XLMMacroDeobfuscator/xls_wrapper.py | wmetcalf/XLMMacroDeobfuscator | 1a854d8effb4cf5d2e02f10bbb16d58c64c1ebe8 | [
"Apache-2.0"
] | null | null | null | XLMMacroDeobfuscator/xls_wrapper.py | wmetcalf/XLMMacroDeobfuscator | 1a854d8effb4cf5d2e02f10bbb16d58c64c1ebe8 | [
"Apache-2.0"
] | null | null | null | XLMMacroDeobfuscator/xls_wrapper.py | wmetcalf/XLMMacroDeobfuscator | 1a854d8effb4cf5d2e02f10bbb16d58c64c1ebe8 | [
"Apache-2.0"
] | null | null | null | from XLMMacroDeobfuscator.excel_wrapper import ExcelWrapper
from XLMMacroDeobfuscator.boundsheet import Boundsheet
from XLMMacroDeobfuscator.boundsheet import Cell
from win32com.client import Dispatch
import pywintypes
from enum import Enum
import os
import re
class XlCellType(Enum):
xlCellTypeFormulas = -4123
xlCellTypeConstants = 2
class XLSWrapper(ExcelWrapper):
XLEXCEL4MACROSHEET = 3
def __init__(self, xls_doc_path):
self._excel = Dispatch("Excel.Application")
self.xls_workbook = self._excel.Workbooks.Open(xls_doc_path)
self.xls_workbook_name = os.path.basename(xls_doc_path)
self._macrosheets = None
self._defined_names = None
self.xl_international_flags = {}
self._international_flags = None
def get_xl_international_char(self, flag_name):
if flag_name not in self.xl_international_flags:
if self._international_flags is None:
self._international_flags = self._excel.Application.International
# flag value starts at 1, list index starts at 0
self.xl_international_flags[flag_name] = self._international_flags[flag_name.value - 1]
result = self.xl_international_flags[flag_name]
return result
def get_defined_names(self):
result = {}
name_objects = self.xls_workbook.Excel4MacroSheets.Application.Names
for name_obj in name_objects:
result[name_obj.NameLocal.lower()] = str(name_obj.RefersToLocal).strip('=')
return result
def get_defined_name(self, name, full_match=True):
result = []
name = name.lower()
if self._defined_names is None:
self._defined_names = self.get_defined_names()
if full_match:
if name in self._defined_names:
result = self._defined_names[name]
else:
for defined_name, cell_address in self._defined_names.items():
if defined_name.startswith(name):
result.append((defined_name, cell_address))
return result
def load_cells(self, macrosheet, xls_sheet):
cells = {}
try:
self._excel.Application.ScreenUpdating = False
col_offset = xls_sheet.UsedRange.Column
row_offset = xls_sheet.UsedRange.Row
formulas = xls_sheet.UsedRange.Formula
if formulas is not None:
for row_no, row in enumerate(formulas):
for col_no, col in enumerate(row):
if col:
cell = Cell()
cell.sheet = macrosheet
if len(col)>1 and col.startswith('='):
cell.formula = col
else:
cell.value = col
row_addr = row_offset + row_no
col_addr = col_offset + col_no
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
self._excel.Application.ScreenUpdating = True
except pywintypes.com_error as error:
print('CELL(Formula): ' + str(error.args[2]))
try:
values= xls_sheet.UsedRange.Value
if values is not None:
for row_no, row in enumerate(values):
for col_no, col in enumerate(row):
if col:
row_addr = row_offset + row_no
col_addr = col_offset + col_no
if (col_addr, row_addr) in cells:
cell = cells[(col_addr, row_addr)]
cell.value = col
else:
cell = Cell()
cell.sheet = macrosheet
cell.value = col
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
except pywintypes.com_error as error:
print('CELL(Constant): ' + str(error.args[2]))
for cell in cells:
macrosheet.add_cell(cells[cell])
def get_macrosheets(self):
if self._macrosheets is None:
self._macrosheets = {}
for sheet in self.xls_workbook.Excel4MacroSheets:
macrosheet = Boundsheet(sheet.name, 'Macrosheet')
self.load_cells(macrosheet, sheet)
self._macrosheets[sheet.name] = macrosheet
return self._macrosheets
def get_workbook_name(self):
return self.xls_workbook_name
def get_cell_info(self, sheet_name, col, row, type_ID):
sheet = self._excel.Excel4MacroSheets(sheet_name)
cell = col + row
data = None
if int(type_ID) == 2:
data = sheet.Range(col + row).Row
print(data)
elif int(type_ID) == 3:
data = sheet.Range(cell).Column
print(data)
elif int(type_ID) == 8:
data = sheet.Range(cell).HorizontalAlignment
elif int(type_ID) == 17:
data = sheet.Range(cell).Height
elif int(type_ID) == 19:
data = sheet.Range(cell).Font.Size
elif int(type_ID) == 20:
data = sheet.Range(cell).Font.Bold
elif int(type_ID) == 21:
data = sheet.Range(cell).Font.Italic
elif int(type_ID) == 23:
data = sheet.Range(cell).Font.Strikethrough
elif int(type_ID) == 24:
data = sheet.Range(cell).Font.ColorIndex
elif int(type_ID) == 50:
data = sheet.Range(cell).VerticalAlignment
else:
print("Unknown info_type (%d) at cell %s" % (type_ID, cell))
return data, False, False
if __name__ == '__main__':
path = r"tmp\xls\edd554502033d78ac18e4bd917d023da2fd64843c823c1be8bc273f48a5f3f5f.xls"
path = os.path.abspath(path)
excel_doc = XLSWrapper(path)
try:
macrosheets = excel_doc.get_macrosheets()
auto_open_labels = excel_doc.get_defined_name('auto_open', full_match=False)
for label in auto_open_labels:
print('auto_open: {}->{}'.format(label[0], label[1]))
for macrosheet_name in macrosheets:
print('SHEET: {}\t{}'.format(macrosheets[macrosheet_name].name,
macrosheets[macrosheet_name].type))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is not None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
finally:
excel_doc._excel.Application.DisplayAlerts = False
excel_doc._excel.Application.Quit()
| 36.654822 | 99 | 0.568481 | from XLMMacroDeobfuscator.excel_wrapper import ExcelWrapper
from XLMMacroDeobfuscator.boundsheet import Boundsheet
from XLMMacroDeobfuscator.boundsheet import Cell
from win32com.client import Dispatch
import pywintypes
from enum import Enum
import os
import re
class XlCellType(Enum):
xlCellTypeFormulas = -4123
xlCellTypeConstants = 2
class XLSWrapper(ExcelWrapper):
XLEXCEL4MACROSHEET = 3
def __init__(self, xls_doc_path):
self._excel = Dispatch("Excel.Application")
self.xls_workbook = self._excel.Workbooks.Open(xls_doc_path)
self.xls_workbook_name = os.path.basename(xls_doc_path)
self._macrosheets = None
self._defined_names = None
self.xl_international_flags = {}
self._international_flags = None
def get_xl_international_char(self, flag_name):
if flag_name not in self.xl_international_flags:
if self._international_flags is None:
self._international_flags = self._excel.Application.International
self.xl_international_flags[flag_name] = self._international_flags[flag_name.value - 1]
result = self.xl_international_flags[flag_name]
return result
def get_defined_names(self):
result = {}
name_objects = self.xls_workbook.Excel4MacroSheets.Application.Names
for name_obj in name_objects:
result[name_obj.NameLocal.lower()] = str(name_obj.RefersToLocal).strip('=')
return result
def get_defined_name(self, name, full_match=True):
result = []
name = name.lower()
if self._defined_names is None:
self._defined_names = self.get_defined_names()
if full_match:
if name in self._defined_names:
result = self._defined_names[name]
else:
for defined_name, cell_address in self._defined_names.items():
if defined_name.startswith(name):
result.append((defined_name, cell_address))
return result
def load_cells(self, macrosheet, xls_sheet):
cells = {}
try:
self._excel.Application.ScreenUpdating = False
col_offset = xls_sheet.UsedRange.Column
row_offset = xls_sheet.UsedRange.Row
formulas = xls_sheet.UsedRange.Formula
if formulas is not None:
for row_no, row in enumerate(formulas):
for col_no, col in enumerate(row):
if col:
cell = Cell()
cell.sheet = macrosheet
if len(col)>1 and col.startswith('='):
cell.formula = col
else:
cell.value = col
row_addr = row_offset + row_no
col_addr = col_offset + col_no
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
self._excel.Application.ScreenUpdating = True
except pywintypes.com_error as error:
print('CELL(Formula): ' + str(error.args[2]))
try:
values= xls_sheet.UsedRange.Value
if values is not None:
for row_no, row in enumerate(values):
for col_no, col in enumerate(row):
if col:
row_addr = row_offset + row_no
col_addr = col_offset + col_no
if (col_addr, row_addr) in cells:
cell = cells[(col_addr, row_addr)]
cell.value = col
else:
cell = Cell()
cell.sheet = macrosheet
cell.value = col
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
except pywintypes.com_error as error:
print('CELL(Constant): ' + str(error.args[2]))
for cell in cells:
macrosheet.add_cell(cells[cell])
def get_macrosheets(self):
if self._macrosheets is None:
self._macrosheets = {}
for sheet in self.xls_workbook.Excel4MacroSheets:
macrosheet = Boundsheet(sheet.name, 'Macrosheet')
self.load_cells(macrosheet, sheet)
self._macrosheets[sheet.name] = macrosheet
return self._macrosheets
def get_workbook_name(self):
return self.xls_workbook_name
def get_cell_info(self, sheet_name, col, row, type_ID):
sheet = self._excel.Excel4MacroSheets(sheet_name)
cell = col + row
data = None
if int(type_ID) == 2:
data = sheet.Range(col + row).Row
print(data)
elif int(type_ID) == 3:
data = sheet.Range(cell).Column
print(data)
elif int(type_ID) == 8:
data = sheet.Range(cell).HorizontalAlignment
elif int(type_ID) == 17:
data = sheet.Range(cell).Height
elif int(type_ID) == 19:
data = sheet.Range(cell).Font.Size
elif int(type_ID) == 20:
data = sheet.Range(cell).Font.Bold
elif int(type_ID) == 21:
data = sheet.Range(cell).Font.Italic
elif int(type_ID) == 23:
data = sheet.Range(cell).Font.Strikethrough
elif int(type_ID) == 24:
data = sheet.Range(cell).Font.ColorIndex
elif int(type_ID) == 50:
data = sheet.Range(cell).VerticalAlignment
else:
print("Unknown info_type (%d) at cell %s" % (type_ID, cell))
return data, False, False
if __name__ == '__main__':
path = r"tmp\xls\edd554502033d78ac18e4bd917d023da2fd64843c823c1be8bc273f48a5f3f5f.xls"
path = os.path.abspath(path)
excel_doc = XLSWrapper(path)
try:
macrosheets = excel_doc.get_macrosheets()
auto_open_labels = excel_doc.get_defined_name('auto_open', full_match=False)
for label in auto_open_labels:
print('auto_open: {}->{}'.format(label[0], label[1]))
for macrosheet_name in macrosheets:
print('SHEET: {}\t{}'.format(macrosheets[macrosheet_name].name,
macrosheets[macrosheet_name].type))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is not None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
finally:
excel_doc._excel.Application.DisplayAlerts = False
excel_doc._excel.Application.Quit()
| true | true |
f7311e5292fc49af1408b0183fe1700dc5f1512b | 4,995 | py | Python | .tox/scenario/lib/python2.7/site-packages/futurist/tests/test_executors.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/futurist/tests/test_executors.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/futurist/tests/test_executors.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import testscenarios
from testtools import testcase
import futurist
from futurist.tests import base
# Module level functions need to be used since the process pool
# executor can not access instance or lambda level functions (since those
# are not pickleable).
def returns_one():
return 1
def blows_up():
raise RuntimeError("no worky")
def delayed(wait_secs):
time.sleep(wait_secs)
class TestExecutors(testscenarios.TestWithScenarios, base.TestCase):
scenarios = [
('sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True, 'executor_kwargs': {}}),
('green_sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True,
'executor_kwargs': {'green': True}}),
('green', {'executor_cls': futurist.GreenThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('thread', {'executor_cls': futurist.ThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('process', {'executor_cls': futurist.ProcessPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
]
def setUp(self):
super(TestExecutors, self).setUp()
self.executor = self.executor_cls(**self.executor_kwargs)
def tearDown(self):
super(TestExecutors, self).tearDown()
self.executor.shutdown()
self.executor = None
def test_run_one(self):
fut = self.executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertTrue(fut.done())
def test_blows_up(self):
fut = self.executor.submit(blows_up)
self.assertRaises(RuntimeError, fut.result)
self.assertIsInstance(fut.exception(), RuntimeError)
def test_gather_stats(self):
self.executor.submit(blows_up)
self.executor.submit(delayed, 0.2)
self.executor.submit(returns_one)
self.executor.shutdown()
self.assertEqual(3, self.executor.statistics.executed)
self.assertEqual(1, self.executor.statistics.failures)
self.assertGreaterEqual(self.executor.statistics.runtime,
# It appears that the the thread run loop
# may call this before 0.2 seconds (or 0.2
# will not be represented as a float correctly)
# is really up so accommodate for that
# happening...
0.199)
def test_post_shutdown_raises(self):
executor = self.executor_cls(**self.executor_kwargs)
executor.shutdown()
self.assertRaises(RuntimeError, executor.submit, returns_one)
def test_restartable(self):
if not self.restartable:
raise testcase.TestSkipped("not restartable")
else:
executor = self.executor_cls(**self.executor_kwargs)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
executor.shutdown()
self.assertEqual(1, executor.statistics.executed)
self.assertRaises(RuntimeError, executor.submit, returns_one)
executor.restart()
self.assertEqual(0, executor.statistics.executed)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertEqual(1, executor.statistics.executed)
executor.shutdown()
def test_alive(self):
with self.executor_cls(**self.executor_kwargs) as executor:
self.assertTrue(executor.alive)
self.assertFalse(executor.alive)
def test_done_callback(self):
happy_completed = []
unhappy_completed = []
def on_done(fut):
if fut.exception():
unhappy_completed.append(fut)
else:
happy_completed.append(fut)
for i in range(0, 10):
if i % 2 == 0:
fut = self.executor.submit(returns_one)
else:
fut = self.executor.submit(blows_up)
fut.add_done_callback(on_done)
self.executor.shutdown()
self.assertEqual(10, len(happy_completed) + len(unhappy_completed))
self.assertEqual(5, len(unhappy_completed))
self.assertEqual(5, len(happy_completed))
| 35.678571 | 79 | 0.626226 |
import time
import testscenarios
from testtools import testcase
import futurist
from futurist.tests import base
def returns_one():
return 1
def blows_up():
raise RuntimeError("no worky")
def delayed(wait_secs):
time.sleep(wait_secs)
class TestExecutors(testscenarios.TestWithScenarios, base.TestCase):
scenarios = [
('sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True, 'executor_kwargs': {}}),
('green_sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True,
'executor_kwargs': {'green': True}}),
('green', {'executor_cls': futurist.GreenThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('thread', {'executor_cls': futurist.ThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('process', {'executor_cls': futurist.ProcessPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
]
def setUp(self):
super(TestExecutors, self).setUp()
self.executor = self.executor_cls(**self.executor_kwargs)
def tearDown(self):
super(TestExecutors, self).tearDown()
self.executor.shutdown()
self.executor = None
def test_run_one(self):
fut = self.executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertTrue(fut.done())
def test_blows_up(self):
fut = self.executor.submit(blows_up)
self.assertRaises(RuntimeError, fut.result)
self.assertIsInstance(fut.exception(), RuntimeError)
def test_gather_stats(self):
self.executor.submit(blows_up)
self.executor.submit(delayed, 0.2)
self.executor.submit(returns_one)
self.executor.shutdown()
self.assertEqual(3, self.executor.statistics.executed)
self.assertEqual(1, self.executor.statistics.failures)
self.assertGreaterEqual(self.executor.statistics.runtime,
0.199)
def test_post_shutdown_raises(self):
executor = self.executor_cls(**self.executor_kwargs)
executor.shutdown()
self.assertRaises(RuntimeError, executor.submit, returns_one)
def test_restartable(self):
if not self.restartable:
raise testcase.TestSkipped("not restartable")
else:
executor = self.executor_cls(**self.executor_kwargs)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
executor.shutdown()
self.assertEqual(1, executor.statistics.executed)
self.assertRaises(RuntimeError, executor.submit, returns_one)
executor.restart()
self.assertEqual(0, executor.statistics.executed)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertEqual(1, executor.statistics.executed)
executor.shutdown()
def test_alive(self):
with self.executor_cls(**self.executor_kwargs) as executor:
self.assertTrue(executor.alive)
self.assertFalse(executor.alive)
def test_done_callback(self):
happy_completed = []
unhappy_completed = []
def on_done(fut):
if fut.exception():
unhappy_completed.append(fut)
else:
happy_completed.append(fut)
for i in range(0, 10):
if i % 2 == 0:
fut = self.executor.submit(returns_one)
else:
fut = self.executor.submit(blows_up)
fut.add_done_callback(on_done)
self.executor.shutdown()
self.assertEqual(10, len(happy_completed) + len(unhappy_completed))
self.assertEqual(5, len(unhappy_completed))
self.assertEqual(5, len(happy_completed))
| true | true |
f7311e8c0c66339dabaf7cf0082947fc52f84663 | 16,658 | py | Python | Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/internal/type_checkers.py | amirpaia/election-campaign-dynamics | b2b32c627cb79c7eb60e458511210308b7ff4035 | [
"CC0-1.0"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/internal/type_checkers.py | amirpaia/election-campaign-dynamics | b2b32c627cb79c7eb60e458511210308b7ff4035 | [
"CC0-1.0"
] | null | null | null | Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/internal/type_checkers.py | amirpaia/election-campaign-dynamics | b2b32c627cb79c7eb60e458511210308b7ff4035 | [
"CC0-1.0"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
corresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import ctypes
import numbers
from google.protobuf.internal import api_implementation
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def TruncateToFourByteFloat(original):
return ctypes.c_float(original).value
def ToShortestFloat(original):
"""Returns the shortest float that has same value in wire."""
# All 4 byte floats have between 6 and 9 significant digits, so we
# start with 6 as the lower bound.
# It has to be iterative because use '.9g' directly can not get rid
# of the noises for most values. For example if set a float_field=0.9
# use '.9g' will print 0.899999976.
precision = 6
rounded = float('{0:.{1}g}'.format(original, precision))
while TruncateToFourByteFloat(rounded) != original:
precision += 1
rounded = float('{0:.{1}g}'.format(original, precision))
return rounded
def SupportsOpenEnums(field_descriptor):
return field_descriptor.containing_type.syntax == "proto3"
def GetTypeChecker(field):
"""Returns a type checker for a message field of the specified types.
Args:
field: FieldDescriptor object for this field.
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field.type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
if SupportsOpenEnums(field):
# When open enums are supported, any int32 can be assigned.
return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]
else:
return EnumValueChecker(field.enum_type)
return _VALUE_CHECKERS[field.cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
"""Type check the provided value and return it.
The returned value might have been normalized to another type.
"""
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# Some field types(float, double and bool) accept other types, must
# convert to the correct type in such cases.
if self._acceptable_types:
if self._acceptable_types[0] in (bool, float):
return self._acceptable_types[0](proposed_value)
return proposed_value
class TypeCheckerWithDefault(TypeChecker):
def __init__(self, default_value, *acceptable_types):
TypeChecker.__init__(self, *acceptable_types)
self._default_value = default_value
def DefaultValue(self):
return self._default_value
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if not self._MIN <= int(proposed_value) <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
# We force all values to int to make alternate implementations where the
# distinction is more significant (e.g. the C++ implementation) simpler.
proposed_value = int(proposed_value)
return proposed_value
def DefaultValue(self):
return 0
class EnumValueChecker(object):
"""Checker used for enum fields. Performs type-check and range check."""
def __init__(self, enum_type):
self._enum_type = enum_type
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if int(proposed_value) not in self._enum_type.values_by_number:
raise ValueError('Unknown enum value: %d' % proposed_value)
return proposed_value
def DefaultValue(self):
return self._enum_type.values[0].number
class UnicodeValueChecker(object):
"""Checker used for string fields.
Always returns a unicode value, even if the input is of type str.
"""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (bytes, str)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (bytes, str)))
raise TypeError(message)
# If the value is of type 'bytes' make sure that it is valid UTF-8 data.
if isinstance(proposed_value, bytes):
try:
proposed_value = proposed_value.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 '
'encoding. Non-UTF-8 strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
else:
try:
proposed_value.encode('utf8')
except UnicodeEncodeError:
raise ValueError('%.1024r isn\'t a valid unicode string and '
'can\'t be encoded in UTF-8.'%
(proposed_value))
return proposed_value
def DefaultValue(self):
return u""
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# The max 4 bytes float is about 3.4028234663852886e+38
_FLOAT_MAX = float.fromhex('0x1.fffffep+127')
_FLOAT_MIN = -_FLOAT_MAX
_INF = float('inf')
_NEG_INF = float('-inf')
class FloatValueChecker(object):
"""Checker used for float fields. Performs type-check and range check.
Values exceeding a 32-bit float will be converted to inf/-inf.
"""
def CheckValue(self, proposed_value):
"""Check and convert proposed_value to float."""
if not isinstance(proposed_value, numbers.Real):
message = ('%.1024r has type %s, but expected one of: numbers.Real' %
(proposed_value, type(proposed_value)))
raise TypeError(message)
converted_value = float(proposed_value)
# This inf rounding matches the C++ proto SafeDoubleToFloat logic.
if converted_value > _FLOAT_MAX:
return _INF
if converted_value < _FLOAT_MIN:
return _NEG_INF
return TruncateToFourByteFloat(converted_value)
def DefaultValue(self):
return 0.0
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeCheckerWithDefault(
0.0, float, numbers.Real),
_FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(),
_FieldDescriptor.CPPTYPE_BOOL: TypeCheckerWithDefault(
False, bool, numbers.Integral),
_FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| 40.530414 | 81 | 0.742466 |
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = 'robinson@google.com (Will Robinson)'
import ctypes
import numbers
from google.protobuf.internal import api_implementation
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def TruncateToFourByteFloat(original):
return ctypes.c_float(original).value
def ToShortestFloat(original):
# All 4 byte floats have between 6 and 9 significant digits, so we
# start with 6 as the lower bound.
# It has to be iterative because use '.9g' directly can not get rid
# of the noises for most values. For example if set a float_field=0.9
# use '.9g' will print 0.899999976.
precision = 6
rounded = float('{0:.{1}g}'.format(original, precision))
while TruncateToFourByteFloat(rounded) != original:
precision += 1
rounded = float('{0:.{1}g}'.format(original, precision))
return rounded
def SupportsOpenEnums(field_descriptor):
return field_descriptor.containing_type.syntax == "proto3"
def GetTypeChecker(field):
if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field.type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
if SupportsOpenEnums(field):
# When open enums are supported, any int32 can be assigned.
return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]
else:
return EnumValueChecker(field.enum_type)
return _VALUE_CHECKERS[field.cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
class TypeChecker(object):
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
if self._acceptable_types:
if self._acceptable_types[0] in (bool, float):
return self._acceptable_types[0](proposed_value)
return proposed_value
class TypeCheckerWithDefault(TypeChecker):
def __init__(self, default_value, *acceptable_types):
TypeChecker.__init__(self, *acceptable_types)
self._default_value = default_value
def DefaultValue(self):
return self._default_value
class IntValueChecker(object):
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if not self._MIN <= int(proposed_value) <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
proposed_value = int(proposed_value)
return proposed_value
def DefaultValue(self):
return 0
class EnumValueChecker(object):
def __init__(self, enum_type):
self._enum_type = enum_type
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if int(proposed_value) not in self._enum_type.values_by_number:
raise ValueError('Unknown enum value: %d' % proposed_value)
return proposed_value
def DefaultValue(self):
return self._enum_type.values[0].number
class UnicodeValueChecker(object):
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (bytes, str)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (bytes, str)))
raise TypeError(message)
if isinstance(proposed_value, bytes):
try:
proposed_value = proposed_value.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 '
'encoding. Non-UTF-8 strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
else:
try:
proposed_value.encode('utf8')
except UnicodeEncodeError:
raise ValueError('%.1024r isn\'t a valid unicode string and '
'can\'t be encoded in UTF-8.'%
(proposed_value))
return proposed_value
def DefaultValue(self):
return u""
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
_FLOAT_MAX = float.fromhex('0x1.fffffep+127')
_FLOAT_MIN = -_FLOAT_MAX
_INF = float('inf')
_NEG_INF = float('-inf')
class FloatValueChecker(object):
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Real):
message = ('%.1024r has type %s, but expected one of: numbers.Real' %
(proposed_value, type(proposed_value)))
raise TypeError(message)
converted_value = float(proposed_value)
if converted_value > _FLOAT_MAX:
return _INF
if converted_value < _FLOAT_MIN:
return _NEG_INF
return TruncateToFourByteFloat(converted_value)
def DefaultValue(self):
return 0.0
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeCheckerWithDefault(
0.0, float, numbers.Real),
_FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(),
_FieldDescriptor.CPPTYPE_BOOL: TypeCheckerWithDefault(
False, bool, numbers.Integral),
_FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes),
}
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| true | true |
f7311f4832eeb38d5c97a3363066842f0f43dfd1 | 3,243 | py | Python | youtube_dl/extractor/libsyn.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 24 | 2017-03-17T10:27:12.000Z | 2022-02-16T05:55:50.000Z | youtube_dl/extractor/libsyn.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 7 | 2017-07-26T08:15:27.000Z | 2018-09-20T12:56:53.000Z | youtube_dl/extractor/libsyn.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 3 | 2017-03-17T10:27:13.000Z | 2019-01-28T01:19:17.000Z | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class LibsynIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
_TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/6385796/',
'md5': '2a55e75496c790cdeb058e7e6c087746',
'info_dict': {
'id': '6385796',
'ext': 'mp3',
'title': "Champion Minded - Developing a Growth Mindset",
'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.',
'upload_date': '20180320',
'thumbnail': 're:^https?://.*',
},
}, {
'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
'md5': '6c5cb21acd622d754d3b1a92b582ce42',
'info_dict': {
'id': '3727166',
'ext': 'mp3',
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
'upload_date': '20150818',
'thumbnail': 're:^https?://.*',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
podcast_title = self._search_regex(
r'<h3>([^<]+)</h3>', webpage, 'podcast title', default=None)
if podcast_title:
podcast_title = podcast_title.strip()
episode_title = self._search_regex(
r'(?:<div class="episode-title">|<h4>)([^<]+)</', webpage, 'episode title')
if episode_title:
episode_title = episode_title.strip()
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<p\s+id="info_text_body">(.+?)</p>', webpage,
'description', default=None)
if description:
# Strip non-breaking and normal spaces
description = description.replace('\u00A0', ' ').strip()
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
data_json = self._search_regex(r'var\s+playlistItem\s*=\s*(\{.*?\});\n', webpage, 'JSON data block')
data = json.loads(data_json)
formats = [{
'url': data['media_url'],
'format_id': 'main',
}, {
'url': data['media_url_libsyn'],
'format_id': 'libsyn',
}]
thumbnail = data.get('thumbnail_url')
duration = parse_duration(data.get('duration'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': release_date,
'duration': duration,
'formats': formats,
}
| 36.852273 | 185 | 0.573235 |
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class LibsynIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
_TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/6385796/',
'md5': '2a55e75496c790cdeb058e7e6c087746',
'info_dict': {
'id': '6385796',
'ext': 'mp3',
'title': "Champion Minded - Developing a Growth Mindset",
'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.',
'upload_date': '20180320',
'thumbnail': 're:^https?://.*',
},
}, {
'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
'md5': '6c5cb21acd622d754d3b1a92b582ce42',
'info_dict': {
'id': '3727166',
'ext': 'mp3',
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
'upload_date': '20150818',
'thumbnail': 're:^https?://.*',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
podcast_title = self._search_regex(
r'<h3>([^<]+)</h3>', webpage, 'podcast title', default=None)
if podcast_title:
podcast_title = podcast_title.strip()
episode_title = self._search_regex(
r'(?:<div class="episode-title">|<h4>)([^<]+)</', webpage, 'episode title')
if episode_title:
episode_title = episode_title.strip()
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<p\s+id="info_text_body">(.+?)</p>', webpage,
'description', default=None)
if description:
description = description.replace('\u00A0', ' ').strip()
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
data_json = self._search_regex(r'var\s+playlistItem\s*=\s*(\{.*?\});\n', webpage, 'JSON data block')
data = json.loads(data_json)
formats = [{
'url': data['media_url'],
'format_id': 'main',
}, {
'url': data['media_url_libsyn'],
'format_id': 'libsyn',
}]
thumbnail = data.get('thumbnail_url')
duration = parse_duration(data.get('duration'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': release_date,
'duration': duration,
'formats': formats,
}
| true | true |
f7311facdc21a3212e9fa9d8178b074821aeaa20 | 1,401 | py | Python | mediagoblin/tools/request.py | stenwt/mediagoblin-quickstart-openshift | 4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2 | [
"CC0-1.0"
] | 1 | 2016-02-10T18:22:42.000Z | 2016-02-10T18:22:42.000Z | mediagoblin/tools/request.py | stenwt/mediagoblin-quickstart-openshift | 4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2 | [
"CC0-1.0"
] | 1 | 2016-04-19T13:03:17.000Z | 2016-04-19T13:03:17.000Z | mediagoblin/tools/request.py | stenwt/mediagoblin-quickstart-openshift | 4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2 | [
"CC0-1.0"
] | null | null | null | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from mediagoblin.db.models import User
_log = logging.getLogger(__name__)
def setup_user_in_request(request):
"""
Examine a request and tack on a request.user parameter if that's
appropriate.
"""
if not request.session.has_key('user_id'):
request.user = None
return
request.user = User.query.get(request.session['user_id'])
if not request.user:
# Something's wrong... this user doesn't exist? Invalidate
# this session.
_log.warn("Killing session for user id %r", request.session['user_id'])
request.session.invalidate()
| 35.025 | 79 | 0.723769 |
import logging
from mediagoblin.db.models import User
_log = logging.getLogger(__name__)
def setup_user_in_request(request):
if not request.session.has_key('user_id'):
request.user = None
return
request.user = User.query.get(request.session['user_id'])
if not request.user:
_log.warn("Killing session for user id %r", request.session['user_id'])
request.session.invalidate()
| true | true |
f731200874a2e61790ea940792017eb5feadab06 | 13,142 | py | Python | survol/lib_export_json.py | rchateauneu/survol | ba66d3ec453b2d9dd3a8dabc6d53f71aa9ba8c78 | [
"BSD-3-Clause"
] | 9 | 2017-10-05T23:36:23.000Z | 2021-08-09T15:40:03.000Z | survol/lib_export_json.py | rchateauneu/survol | ba66d3ec453b2d9dd3a8dabc6d53f71aa9ba8c78 | [
"BSD-3-Clause"
] | 21 | 2018-01-02T09:33:03.000Z | 2018-08-27T11:09:52.000Z | survol/lib_export_json.py | rchateauneu/survol | ba66d3ec453b2d9dd3a8dabc6d53f71aa9ba8c78 | [
"BSD-3-Clause"
] | 4 | 2018-06-23T09:05:45.000Z | 2021-01-22T15:36:50.000Z | import sys
import six
import os
import json
import logging
import lib_kbase
import lib_patterns
import lib_naming
import lib_util
from lib_properties import pc
import lib_exports
_node_json_number = 0
class NodeJson:
"""This models a node as it will be saved to Json."""
# TODO: This creates a useless layer of lookup that could be suppressed.
def __init__(self,rdf_node):
global _node_json_number
subj_str = str(rdf_node)
entity_label, entity_graphic_class, entity_id = lib_naming.ParseEntityUri(
subj_str, long_display=False, force_entity_ip_addr=None)
self.m_label = entity_label.strip()
self.m_class = entity_graphic_class
array_graph_params = lib_patterns.TypeToGraphParams(self.m_class)
# "Graphic_shape","Graphic_colorfill","Graphic_colorbg","Graphic_border","Graphic_is_rounded"
self.m_color = array_graph_params[1]
# TODO: Display the doc in the module with FromModuleToDoc(importedMod,filDfltText):
self.m_info_list = [entity_graphic_class]
self.m_info_dict = dict()
self.m_index = _node_json_number
the_survol_url = lib_util.survol_unescape(rdf_node)
self.m_survol_url = the_survol_url
self.m_survol_universal_alias = lib_exports.NodeToUniversalAlias(rdf_node)
_node_json_number += 1 # One more node.
# Only some scripts and urls are exported to Json.
# The most frequent should come first.
# root=http://mymachine:8000/survol
# url=http://mymachine:8000/survol/class_type_all.py?xid=com.
# url=http://mymachine:8000/survol/objtypes.py
# This must be a tuple because of startswith.
_urls_for_json = (
"/entity.py",
"/entity_wmi.py",
"/entity_wbem.py",
"/entity_info_only.py",
"/objtypes.py",
"/class_type_all.py",
"/class_wbem.py",
"/class_wmi.py",
# TODO: Maybe pass portal_wbem.py and portal_wmi.py ??
)
def _script_for_json(url):
"""
This tells if an URL should appear in the RDF graph displayed by the D3 interface to Survol.
This avoids creating a node for the "seel also" urls which returns another graph.
In other words, it selects URL which designate an instance, not the URL returning a graph about an instance.
On the other hand, scripts returning a graph of informatons about an instance are displayed
in the contextual menu of a node (associated to an instance).
http://mymachine:8000/survol/entity_mime.py?xid=CIM_DataFile.Name=C://smh_installer.log&amp;mode=mime:text/plain
http://mymachine:8000/survol/sources_types/CIM_Directory/file_directory.py?xid=CIM_Directory.Name=C%3A%2F%2Fpkg
"""
if url.startswith(lib_util.uriRoot):
# Where the script starts from.
idx_script = len(lib_util.uriRoot)
# Other scripts are forbidden.
return url.startswith(_urls_for_json, idx_script)
# Foreign scripts are OK.
return True
def _write_json_header(buf_json, with_content_length=False):
"""
This writes to the output a JSON content with the appropriate HTTP header.
It for example used by the Javascript interface, to get a contextual menu.
What must be avoided: Cross-Origin Request Blocked:
The Same Origin Policy disallows reading the remote resource at
http://192.168.0.17/Survol/survol/sources_types/enumerate_CIM_Process.py?xid=.&mode=json.
(Reason: CORS header 'Access-Control-Allow-Origin' missing)
https://stackoverflow.com/questions/5027705/error-in-chrome-content-type-is-not-allowed-by-access-control-allow-headers
The body of the reply is base-64 encoded.
"""
arr_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Methods', 'POST,GET,OPTIONS'),
('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'),
]
# It is difficult to calculate the length because the output is encoded
# in Base64, which takes more room than JSon. And also, at least on Windows,
# each line gets an extra character ("\n\r" ?).
# So it is confusing.
# The reason for adding the length is: When an error is detected, sometimes a second error
# comes immediately after the one, even if the thread (or process ?) quits.
#
# Also, with Chrome and Android, sometimes it is not happy with the length,
# even if we checked it. It works without the length, except if this is an error message.
if with_content_length:
num_lines = buf_json.count("\n")
len_buf = len(buf_json) + num_lines
arr_headers.append(('Content-Length', str(len_buf)))
lib_util.WrtHeader('application/json', arr_headers)
# No text conversion.
lib_util.WrtAsUtf(buf_json)
def write_json_error(message):
"""
This is called only by ErrorMessageHtml when an error is detected and the output format is JSON,
for the D3 Survol interface.
After that, the calling function makes an exit.
The error message is formatted in the standard for returning errors.
http://labs.omniti.com/labs/jsend
"""
logging.warning("WriteJsonError message="+message)
json_err = {"status": "error", "message": message}
# The only case where Content-Length is added.
_write_json_header(json.dumps(json_err, indent=2), True)
def output_rdf_graph_as_json_d3(page_title, error_msg, parameters, grph):
"""
Transforms a RDF graph into a JSON document.
This returns a graph made of Json objects which are suitable for visualisation in the Javascript
interface to Survol, which is based on D3.
"""
# Must be reset to zero between several executions, when run by WSGI.
global _node_json_number
_node_json_number = 0
# It contains a cache because the same nodes may appear several times.
def node_to_json_obj(the_nod):
try:
return node_to_json_obj.dictNod2Json[the_nod]
except KeyError:
json_obj = NodeJson(the_nod)
node_to_json_obj.dictNod2Json[the_nod] = json_obj
return json_obj
node_to_json_obj.dictNod2Json = dict()
links = []
for subj, pred, obj in grph:
# This applies only to entity.py : In rendering based on Json, scripts are not displayed as nodes,
# but in hierarchical menus. The node must not appear at all.
# TODO: Should probably also eliminate pc.property_rdf_data_nolist2 etc ... See lib_client.
if pred == pc.property_script:
logging.debug("continue subj=%s obj=%s",subj,obj)
continue
# Normal data scripts are not accepted. This should apply only to file_directory.py and file_to_mime.py
if not _script_for_json(subj):
continue
if not _script_for_json(obj):
continue
subj_obj = node_to_json_obj(subj)
subj_id = subj_obj.m_survol_url
prop_nam = lib_exports.PropToShortPropNam(pred)
# TODO: BUG: If several nodes for the same properties, only the last one is kept.
if lib_kbase.IsLink(obj):
obj_obj = node_to_json_obj(obj)
obj_id = obj_obj.m_survol_url
links.extend([{'source': subj_id, 'target': obj_id, 'survol_link_prop': prop_nam}])
# TODO: Add the name corresponding to the URL, in m_info_dict so that some elements
# of the tooltip would be clickable. On the other hand, one just need to merge
# the nodes relative to the object, by right-clicking.
elif lib_kbase.IsLiteral(obj):
if pred == pc.property_information:
try:
subj_obj.m_info_list.append(str(obj.value))
except UnicodeEncodeError:
# 'ascii' codec can't encode character u'\xf3' in position 17: ordinal not in range(128)
# https://stackoverflow.com/questions/9942594/unicodeencodeerror-ascii-codec-cant-encode-character-u-xa0-in-position-20
subj_obj.m_info_list.append(obj.value.encode('utf-8'))
else:
if isinstance(obj.value, six.integer_types) or isinstance(obj.value, six.string_types):
subj_obj.m_info_dict[prop_nam] = obj.value
else:
# If the value cannot be serializable to JSON.
subj_obj.m_info_dict[prop_nam] = type(obj.value).__name__
else:
raise Exception(__file__ + " Cannot happen here")
# Now, this creates the nodes sent as json objects.
num_nodes = len(node_to_json_obj.dictNod2Json)
nodes = [None] * num_nodes
for nod in node_to_json_obj.dictNod2Json:
nod_obj = node_to_json_obj.dictNod2Json[nod]
nod_titl = nod_obj.m_label
nod_id = nod_obj.m_index
# The URL must not contain any HTML entities when in a XML or SVG document,
# and therefore must be escaped. Therefore they have to be unescaped when transmitted in JSON.
# This is especially needed for RabbitMQ because the parameter defining its connection name
# has the form: "Url=LOCALHOST:12345,Connection=127.0.0.1:51748 -> 127.0.0.1:5672"
# HTTP_MIME_URL
the_survol_nam = lib_util.survol_unescape(nod_titl) # MUST UNESCAPE HTML ENTITIES !
# TODO: Use the same object for lookup and Json.
nodes[nod_id] = {
'id' : nod_obj.m_survol_url, # Required by D3
'name' : the_survol_nam,
# Theoretically, this URL should be HTML unescaped then CGI escaped.
'survol_url' : nod_obj.m_survol_url, # Duplicate of 'id'
'survol_universal_alias' : nod_obj.m_survol_universal_alias,
'survol_fill' : nod_obj.m_color,
'entity_class' : nod_obj.m_class, # TODO: Maybe not needed because also in the URL ?
'survol_info_list' : nod_obj.m_info_list,
'survol_info_dict' : nod_obj.m_info_dict
}
# This is the graph displayed by D3.
graph = {
"page_title": page_title,
"nodes": nodes,
"links": links}
_write_json_header(json.dumps(graph, indent=2))
def output_rdf_graph_as_json_menu(page_title, error_msg, parameters, grph):
"""
This returns a tree of scripts, usable as the contextual menu of a node displayed
in the D3 Javascript interface to Survol.
The RDF content is already created, so this keeps only the nodes related to scripts.
TODO: It would be faster to keep only the tree of scripts. The script "entity.py"
should have a different output when mode=json.
It does not return a network but a tree to be displayed in a contextual menu.
It has a completely different layout as a normal RDF transformed into JSON,
so probably the URL should be different as well.
Input example: "http://127.0.0.1:8000/survol/entity.py?xid=CIM_Process.Handle=3812&mode=json"
"""
# TODO: Should add WBEM and WMI ?
# For each node, the subscripts. Therefore it can only be a directory.
nodes_to_items = {}
# Nodes of scripts which have a parent.
nodes_with_parent = set()
# Later used to calculate the list of scripts which do not have a parent
# directory: They will be displayed at the top of the contextual menu.
subject_nodes = set()
# The name of each node.
nodes_to_names = dict()
for subj, pred, obj in grph:
if pred == pc.property_script:
try:
nodes_to_items[subj].append(obj)
except KeyError:
nodes_to_items[subj] = [obj]
if lib_kbase.IsLiteral(obj):
# This is the name of a subdirectory containing scripts.
nodes_to_names[obj] = obj
nodes_with_parent.add(obj)
subject_nodes.add(subj)
elif pred == pc.property_information:
if lib_kbase.IsLiteral(obj):
nodes_to_names[subj] = obj.value
else:
raise Exception("Cannot happen here also")
else:
pass
top_level_nodes = subject_nodes - nodes_with_parent
# The output result must be sorted.
def add_stuff(the_nod_list, depth=0):
list_json_items = {}
for one_rdf_nod in the_nod_list:
one_json_nod = {
"name": nodes_to_names.get(one_rdf_nod, "No name"),
"url": one_rdf_nod}
# This should be the sort key.
# Maybe it does not have subitems.
try:
lst_item = nodes_to_items[one_rdf_nod]
one_json_nod["items"] = add_stuff(lst_item, depth+1)
except KeyError:
pass
list_json_items[one_rdf_nod] = one_json_nod
return list_json_items
menu_json = add_stuff(top_level_nodes)
# There is only one top-level element.
one_menu_val = {}
for one_menu_key in menu_json:
one_menu_val = menu_json[one_menu_key]["items"]
break
# Writes the content to the HTTP client.
_write_json_header(json.dumps(one_menu_val, sort_keys=True, indent=2))
| 39.465465 | 139 | 0.666489 | import sys
import six
import os
import json
import logging
import lib_kbase
import lib_patterns
import lib_naming
import lib_util
from lib_properties import pc
import lib_exports
_node_json_number = 0
class NodeJson:
def __init__(self,rdf_node):
global _node_json_number
subj_str = str(rdf_node)
entity_label, entity_graphic_class, entity_id = lib_naming.ParseEntityUri(
subj_str, long_display=False, force_entity_ip_addr=None)
self.m_label = entity_label.strip()
self.m_class = entity_graphic_class
array_graph_params = lib_patterns.TypeToGraphParams(self.m_class)
self.m_color = array_graph_params[1]
self.m_info_list = [entity_graphic_class]
self.m_info_dict = dict()
self.m_index = _node_json_number
the_survol_url = lib_util.survol_unescape(rdf_node)
self.m_survol_url = the_survol_url
self.m_survol_universal_alias = lib_exports.NodeToUniversalAlias(rdf_node)
_node_json_number += 1
_urls_for_json = (
"/entity.py",
"/entity_wmi.py",
"/entity_wbem.py",
"/entity_info_only.py",
"/objtypes.py",
"/class_type_all.py",
"/class_wbem.py",
"/class_wmi.py",
)
def _script_for_json(url):
if url.startswith(lib_util.uriRoot):
idx_script = len(lib_util.uriRoot)
return url.startswith(_urls_for_json, idx_script)
return True
def _write_json_header(buf_json, with_content_length=False):
arr_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Methods', 'POST,GET,OPTIONS'),
('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'),
]
if with_content_length:
num_lines = buf_json.count("\n")
len_buf = len(buf_json) + num_lines
arr_headers.append(('Content-Length', str(len_buf)))
lib_util.WrtHeader('application/json', arr_headers)
lib_util.WrtAsUtf(buf_json)
def write_json_error(message):
logging.warning("WriteJsonError message="+message)
json_err = {"status": "error", "message": message}
_write_json_header(json.dumps(json_err, indent=2), True)
def output_rdf_graph_as_json_d3(page_title, error_msg, parameters, grph):
global _node_json_number
_node_json_number = 0
def node_to_json_obj(the_nod):
try:
return node_to_json_obj.dictNod2Json[the_nod]
except KeyError:
json_obj = NodeJson(the_nod)
node_to_json_obj.dictNod2Json[the_nod] = json_obj
return json_obj
node_to_json_obj.dictNod2Json = dict()
links = []
for subj, pred, obj in grph:
if pred == pc.property_script:
logging.debug("continue subj=%s obj=%s",subj,obj)
continue
if not _script_for_json(subj):
continue
if not _script_for_json(obj):
continue
subj_obj = node_to_json_obj(subj)
subj_id = subj_obj.m_survol_url
prop_nam = lib_exports.PropToShortPropNam(pred)
if lib_kbase.IsLink(obj):
obj_obj = node_to_json_obj(obj)
obj_id = obj_obj.m_survol_url
links.extend([{'source': subj_id, 'target': obj_id, 'survol_link_prop': prop_nam}])
elif lib_kbase.IsLiteral(obj):
if pred == pc.property_information:
try:
subj_obj.m_info_list.append(str(obj.value))
except UnicodeEncodeError:
# https://stackoverflow.com/questions/9942594/unicodeencodeerror-ascii-codec-cant-encode-character-u-xa0-in-position-20
subj_obj.m_info_list.append(obj.value.encode('utf-8'))
else:
if isinstance(obj.value, six.integer_types) or isinstance(obj.value, six.string_types):
subj_obj.m_info_dict[prop_nam] = obj.value
else:
# If the value cannot be serializable to JSON.
subj_obj.m_info_dict[prop_nam] = type(obj.value).__name__
else:
raise Exception(__file__ + " Cannot happen here")
# Now, this creates the nodes sent as json objects.
num_nodes = len(node_to_json_obj.dictNod2Json)
nodes = [None] * num_nodes
for nod in node_to_json_obj.dictNod2Json:
nod_obj = node_to_json_obj.dictNod2Json[nod]
nod_titl = nod_obj.m_label
nod_id = nod_obj.m_index
# The URL must not contain any HTML entities when in a XML or SVG document,
# and therefore must be escaped. Therefore they have to be unescaped when transmitted in JSON.
# This is especially needed for RabbitMQ because the parameter defining its connection name
# has the form: "Url=LOCALHOST:12345,Connection=127.0.0.1:51748 -> 127.0.0.1:5672"
# HTTP_MIME_URL
the_survol_nam = lib_util.survol_unescape(nod_titl) # MUST UNESCAPE HTML ENTITIES !
# TODO: Use the same object for lookup and Json.
nodes[nod_id] = {
'id' : nod_obj.m_survol_url, # Required by D3
'name' : the_survol_nam,
# Theoretically, this URL should be HTML unescaped then CGI escaped.
'survol_url' : nod_obj.m_survol_url, # Duplicate of 'id'
'survol_universal_alias' : nod_obj.m_survol_universal_alias,
'survol_fill' : nod_obj.m_color,
'entity_class' : nod_obj.m_class, # TODO: Maybe not needed because also in the URL ?
'survol_info_list' : nod_obj.m_info_list,
'survol_info_dict' : nod_obj.m_info_dict
}
# This is the graph displayed by D3.
graph = {
"page_title": page_title,
"nodes": nodes,
"links": links}
_write_json_header(json.dumps(graph, indent=2))
def output_rdf_graph_as_json_menu(page_title, error_msg, parameters, grph):
# TODO: Should add WBEM and WMI ?
# For each node, the subscripts. Therefore it can only be a directory.
nodes_to_items = {}
# Nodes of scripts which have a parent.
nodes_with_parent = set()
# Later used to calculate the list of scripts which do not have a parent
# directory: They will be displayed at the top of the contextual menu.
subject_nodes = set()
# The name of each node.
nodes_to_names = dict()
for subj, pred, obj in grph:
if pred == pc.property_script:
try:
nodes_to_items[subj].append(obj)
except KeyError:
nodes_to_items[subj] = [obj]
if lib_kbase.IsLiteral(obj):
# This is the name of a subdirectory containing scripts.
nodes_to_names[obj] = obj
nodes_with_parent.add(obj)
subject_nodes.add(subj)
elif pred == pc.property_information:
if lib_kbase.IsLiteral(obj):
nodes_to_names[subj] = obj.value
else:
raise Exception("Cannot happen here also")
else:
pass
top_level_nodes = subject_nodes - nodes_with_parent
# The output result must be sorted.
def add_stuff(the_nod_list, depth=0):
list_json_items = {}
for one_rdf_nod in the_nod_list:
one_json_nod = {
"name": nodes_to_names.get(one_rdf_nod, "No name"),
"url": one_rdf_nod}
# This should be the sort key.
# Maybe it does not have subitems.
try:
lst_item = nodes_to_items[one_rdf_nod]
one_json_nod["items"] = add_stuff(lst_item, depth+1)
except KeyError:
pass
list_json_items[one_rdf_nod] = one_json_nod
return list_json_items
menu_json = add_stuff(top_level_nodes)
# There is only one top-level element.
one_menu_val = {}
for one_menu_key in menu_json:
one_menu_val = menu_json[one_menu_key]["items"]
break
# Writes the content to the HTTP client.
_write_json_header(json.dumps(one_menu_val, sort_keys=True, indent=2))
| true | true |
f73121b08999df30d985f558b20f79d8fc680663 | 8,052 | py | Python | ietf/group/tests_js.py | hassanakbar4/ietfdb-git | b899ee18604e878fb4133ef38cfeb6af781ce116 | [
"BSD-3-Clause"
] | null | null | null | ietf/group/tests_js.py | hassanakbar4/ietfdb-git | b899ee18604e878fb4133ef38cfeb6af781ce116 | [
"BSD-3-Clause"
] | null | null | null | ietf/group/tests_js.py | hassanakbar4/ietfdb-git | b899ee18604e878fb4133ef38cfeb6af781ce116 | [
"BSD-3-Clause"
] | 1 | 2021-10-05T12:49:27.000Z | 2021-10-05T12:49:27.000Z | # Copyright The IETF Trust 2021, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
import debug # pyflakes:ignore
from ietf.doc.factories import WgDraftFactory
from ietf.group.factories import GroupFactory, RoleFactory, DatedGroupMilestoneFactory
from ietf.utils.jstest import IetfSeleniumTestCase, ifSeleniumEnabled, selenium_enabled
if selenium_enabled():
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
@ifSeleniumEnabled
class MilestoneTests(IetfSeleniumTestCase):
def setUp(self):
super(MilestoneTests, self).setUp()
self.wait = WebDriverWait(self.driver, 2)
self.group = GroupFactory()
self.chair = RoleFactory(group=self.group, name_id='chair').person
def _search_draft_and_locate_result(self, draft_input, search_string, draft):
"""Search for a draft and get the search result element"""
draft_input.send_keys(search_string)
result_selector = 'ul.select2-results > li > div.select2-result-label'
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, result_selector),
draft.name
))
results = self.driver.find_elements_by_css_selector(result_selector)
matching_results = [r for r in results if draft.name in r.text]
self.assertEqual(len(matching_results), 1)
return matching_results[0]
def _click_milestone_submit_button(self, label):
submit_button_selector = 'form#milestones-form button[type="submit"]'
submit_button = self.wait.until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, submit_button_selector))
)
self.assertIn(label, submit_button.text)
self.scroll_to_element(submit_button)
submit_button.click()
def _assert_milestone_changed(self):
"""Wait for milestone to be marked as changed and assert that this succeeded"""
milestone_selector = 'form#milestones-form .milestone'
try:
found_expected_text = self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, milestone_selector),
'Changed'
)
)
except TimeoutException:
found_expected_text = False
self.assertTrue(found_expected_text, 'Milestone never marked as "changed"')
return self.driver.find_element_by_css_selector(milestone_selector)
def test_add_milestone(self):
draft = WgDraftFactory()
WgDraftFactory.create_batch(3) # some drafts to ignore
description = 'some description'
due_date = datetime.date.today() + datetime.timedelta(days=60)
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
self.login(self.chair.user.username)
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.driver.get(url)
add_milestone_button = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'button.add-milestone')
))
self.scroll_to_element(add_milestone_button)
add_milestone_button.click()
edit_div = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'form#milestones-form div.edit-milestone')
))
desc_input = edit_div.find_element_by_css_selector('input[id$="_desc"]')
due_input = edit_div.find_element_by_css_selector('input[id$="_due"]')
draft_input = edit_div.find_element_by_css_selector(
'div.select2-container[id$="id_docs"] input.select2-input'
)
# fill in the edit milestone form
desc_input.send_keys(description)
due_input.send_keys(due_date.strftime('%m %Y\n')) # \n closes the date selector
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
result_row = self._assert_milestone_changed()
self.assertIn(description, result_row.text)
self._click_milestone_submit_button('Save')
# Wait for page to return to group page
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
self.assertIn('1 new milestone', self.driver.page_source)
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, description)
self.assertEqual(gms.due.strftime('%m %Y'), due_date.strftime('%m %Y'))
self.assertEqual(list(gms.docs.all()), [draft])
def test_edit_milestone(self):
milestone = DatedGroupMilestoneFactory(group=self.group)
draft = WgDraftFactory()
WgDraftFactory.create_batch(3) # some drafts to ignore
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.login(self.chair.user.username)
self.driver.get(url)
# should only be one milestone row - test will fail later if we somehow get the wrong one
edit_element = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'form#milestones-form div.milestonerow')
)
)
edit_element.click()
# find the description field corresponding to our milestone
desc_field = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'input[value="%s"]' % milestone.desc)
)
)
# Get the prefix used to identify inputs related to this milestone
prefix = desc_field.get_attribute('id')[:-4] # -4 to strip off 'desc', leave '-'
due_field = self.driver.find_element_by_id(prefix + 'due')
hidden_drafts_field = self.driver.find_element_by_id(prefix + 'docs')
draft_input = self.driver.find_element_by_css_selector(
'div.select2-container[id*="%s"] input.select2-input' % prefix
)
self.assertEqual(due_field.get_attribute('value'), milestone.due.strftime('%B %Y'))
self.assertEqual(hidden_drafts_field.get_attribute('value'),
','.join([str(doc.pk) for doc in milestone.docs.all()]))
# modify the fields
new_due_date = (milestone.due + datetime.timedelta(days=31)).strftime('%m %Y')
due_field.clear()
due_field.send_keys(new_due_date + '\n')
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
self._assert_milestone_changed()
self._click_milestone_submit_button('Save')
# Wait for page to return to group page
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
expected_desc = milestone.desc
expected_due_date = new_due_date
expected_docs = [draft]
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, expected_desc)
self.assertEqual(gms.due.strftime('%m %Y'), expected_due_date)
self.assertCountEqual(expected_docs, gms.docs.all())
| 42.603175 | 98 | 0.656607 |
import datetime
import debug
from ietf.doc.factories import WgDraftFactory
from ietf.group.factories import GroupFactory, RoleFactory, DatedGroupMilestoneFactory
from ietf.utils.jstest import IetfSeleniumTestCase, ifSeleniumEnabled, selenium_enabled
if selenium_enabled():
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
@ifSeleniumEnabled
class MilestoneTests(IetfSeleniumTestCase):
def setUp(self):
super(MilestoneTests, self).setUp()
self.wait = WebDriverWait(self.driver, 2)
self.group = GroupFactory()
self.chair = RoleFactory(group=self.group, name_id='chair').person
def _search_draft_and_locate_result(self, draft_input, search_string, draft):
draft_input.send_keys(search_string)
result_selector = 'ul.select2-results > li > div.select2-result-label'
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, result_selector),
draft.name
))
results = self.driver.find_elements_by_css_selector(result_selector)
matching_results = [r for r in results if draft.name in r.text]
self.assertEqual(len(matching_results), 1)
return matching_results[0]
def _click_milestone_submit_button(self, label):
submit_button_selector = 'form#milestones-form button[type="submit"]'
submit_button = self.wait.until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, submit_button_selector))
)
self.assertIn(label, submit_button.text)
self.scroll_to_element(submit_button)
submit_button.click()
def _assert_milestone_changed(self):
milestone_selector = 'form#milestones-form .milestone'
try:
found_expected_text = self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, milestone_selector),
'Changed'
)
)
except TimeoutException:
found_expected_text = False
self.assertTrue(found_expected_text, 'Milestone never marked as "changed"')
return self.driver.find_element_by_css_selector(milestone_selector)
def test_add_milestone(self):
draft = WgDraftFactory()
WgDraftFactory.create_batch(3)
description = 'some description'
due_date = datetime.date.today() + datetime.timedelta(days=60)
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
self.login(self.chair.user.username)
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.driver.get(url)
add_milestone_button = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'button.add-milestone')
))
self.scroll_to_element(add_milestone_button)
add_milestone_button.click()
edit_div = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'form#milestones-form div.edit-milestone')
))
desc_input = edit_div.find_element_by_css_selector('input[id$="_desc"]')
due_input = edit_div.find_element_by_css_selector('input[id$="_due"]')
draft_input = edit_div.find_element_by_css_selector(
'div.select2-container[id$="id_docs"] input.select2-input'
)
desc_input.send_keys(description)
due_input.send_keys(due_date.strftime('%m %Y\n'))
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
result_row = self._assert_milestone_changed()
self.assertIn(description, result_row.text)
self._click_milestone_submit_button('Save')
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
self.assertIn('1 new milestone', self.driver.page_source)
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, description)
self.assertEqual(gms.due.strftime('%m %Y'), due_date.strftime('%m %Y'))
self.assertEqual(list(gms.docs.all()), [draft])
def test_edit_milestone(self):
milestone = DatedGroupMilestoneFactory(group=self.group)
draft = WgDraftFactory()
WgDraftFactory.create_batch(3)
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.login(self.chair.user.username)
self.driver.get(url)
edit_element = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'form#milestones-form div.milestonerow')
)
)
edit_element.click()
desc_field = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'input[value="%s"]' % milestone.desc)
)
)
prefix = desc_field.get_attribute('id')[:-4]
due_field = self.driver.find_element_by_id(prefix + 'due')
hidden_drafts_field = self.driver.find_element_by_id(prefix + 'docs')
draft_input = self.driver.find_element_by_css_selector(
'div.select2-container[id*="%s"] input.select2-input' % prefix
)
self.assertEqual(due_field.get_attribute('value'), milestone.due.strftime('%B %Y'))
self.assertEqual(hidden_drafts_field.get_attribute('value'),
','.join([str(doc.pk) for doc in milestone.docs.all()]))
new_due_date = (milestone.due + datetime.timedelta(days=31)).strftime('%m %Y')
due_field.clear()
due_field.send_keys(new_due_date + '\n')
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
self._assert_milestone_changed()
self._click_milestone_submit_button('Save')
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
expected_desc = milestone.desc
expected_due_date = new_due_date
expected_docs = [draft]
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, expected_desc)
self.assertEqual(gms.due.strftime('%m %Y'), expected_due_date)
self.assertCountEqual(expected_docs, gms.docs.all())
| true | true |
f73122345854561a45917cbc6b8d22293f08b06b | 1,681 | py | Python | src/pygetwindow/__init__.py | EMOholcicka/PyGetWindow | 55743692fadd5faca330f1d5f9aa1b4ade20d786 | [
"BSD-3-Clause"
] | 1 | 2018-12-18T15:15:21.000Z | 2018-12-18T15:15:21.000Z | src/pygetwindow/__init__.py | EMOholcicka/PyGetWindow | 55743692fadd5faca330f1d5f9aa1b4ade20d786 | [
"BSD-3-Clause"
] | 3 | 2019-01-17T01:55:16.000Z | 2019-02-21T16:27:35.000Z | src/pygetwindow/__init__.py | EMOholcicka/PyGetWindow | 55743692fadd5faca330f1d5f9aa1b4ade20d786 | [
"BSD-3-Clause"
] | 1 | 2019-01-16T21:51:08.000Z | 2019-01-16T21:51:08.000Z | # PyGetWindow
# A cross-platform module to find information about the windows on the screen.
"""
# Work in progress
# Useful info:
#https://stackoverflow.com/questions/373020/finding-the-current-active-window-in-mac-os-x-using-python
#https://stackoverflow.com/questions/7142342/get-window-position-size-with-python
win32 api and ctypes on Windows
cocoa api and pyobjc on Mac
Xlib on linux
Possible Future Features:
get/click menu (win32: GetMenuItemCount, GetMenuItemInfo, GetMenuItemID, GetMenu, GetMenuItemRect)
"""
__version__ = '0.0.4'
import sys
import collections
class PyGetWindowException(Exception):
pass
def pointInRect(x, y, left, top, width, height):
return left < x < left + width and top < y < top + height
if sys.platform == 'darwin':
raise NotImplementedError('PyGetWindow currently does not support macOS. If you have Appkit/Cocoa knowledge, please contribute! https://github.com/asweigart/pygetwindow') # TODO - implement mac
elif sys.platform == 'win32':
from ._pygetwindow_win import Win32Window, getActiveWindow, getWindowsAt, getWindowsWithTitle, getAllWindows, getAllTitles
Window = Win32Window
else:
raise NotImplementedError('PyGetWindow currently does not support Linux. If you have Xlib knowledge, please contribute! https://github.com/asweigart/pygetwindow')
# NOTE: `Rect` is a named tuple for use in Python, while structs.RECT represents
# the win32 RECT struct. PyRect's Rect class is used for handling changing
# geometry of rectangular areas.
Rect = collections.namedtuple('Rect', 'left top right bottom')
Point = collections.namedtuple('Point', 'x y')
Size = collections.namedtuple('Size', 'width height') | 33.62 | 197 | 0.766805 |
__version__ = '0.0.4'
import sys
import collections
class PyGetWindowException(Exception):
pass
def pointInRect(x, y, left, top, width, height):
return left < x < left + width and top < y < top + height
if sys.platform == 'darwin':
raise NotImplementedError('PyGetWindow currently does not support macOS. If you have Appkit/Cocoa knowledge, please contribute! https://github.com/asweigart/pygetwindow')
elif sys.platform == 'win32':
from ._pygetwindow_win import Win32Window, getActiveWindow, getWindowsAt, getWindowsWithTitle, getAllWindows, getAllTitles
Window = Win32Window
else:
raise NotImplementedError('PyGetWindow currently does not support Linux. If you have Xlib knowledge, please contribute! https://github.com/asweigart/pygetwindow')
# geometry of rectangular areas.
Rect = collections.namedtuple('Rect', 'left top right bottom')
Point = collections.namedtuple('Point', 'x y')
Size = collections.namedtuple('Size', 'width height') | true | true |
f731233587b1356960a9308c19c84399ba7b1cad | 8,353 | py | Python | manuscript/migrations/0007_auto__add_field_title_num_volumes__add_field_title_editor__add_field_t.py | adamsc64/django-manuscript | 9d17a8a93ddaa789a269dc5683b78f2be84778c1 | [
"MIT"
] | 1 | 2015-06-18T07:21:51.000Z | 2015-06-18T07:21:51.000Z | manuscript/migrations/0007_auto__add_field_title_num_volumes__add_field_title_editor__add_field_t.py | adamsc64/django-manuscript | 9d17a8a93ddaa789a269dc5683b78f2be84778c1 | [
"MIT"
] | null | null | null | manuscript/migrations/0007_auto__add_field_title_num_volumes__add_field_title_editor__add_field_t.py | adamsc64/django-manuscript | 9d17a8a93ddaa789a269dc5683b78f2be84778c1 | [
"MIT"
] | 1 | 2021-11-23T09:21:31.000Z | 2021-11-23T09:21:31.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Title.num_volumes'
db.add_column('manuscript_title', 'num_volumes', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'Title.editor'
db.add_column('manuscript_title', 'editor', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
# Adding field 'Title.publisher'
db.add_column('manuscript_title', 'publisher', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
# Adding field 'Title.place_of_publication'
db.add_column('manuscript_title', 'place_of_publication', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
# Adding field 'Title.title_page'
db.add_column('manuscript_title', 'title_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='title_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
# Adding field 'Title.copyright_page'
db.add_column('manuscript_title', 'copyright_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='copyright_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
# Adding field 'Title.original_publication_title'
db.add_column('manuscript_title', 'original_publication_title', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['manuscript.Title'], unique=True, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Title.num_volumes'
db.delete_column('manuscript_title', 'num_volumes')
# Deleting field 'Title.editor'
db.delete_column('manuscript_title', 'editor')
# Deleting field 'Title.publisher'
db.delete_column('manuscript_title', 'publisher')
# Deleting field 'Title.place_of_publication'
db.delete_column('manuscript_title', 'place_of_publication')
# Deleting field 'Title.title_page'
db.delete_column('manuscript_title', 'title_page_id')
# Deleting field 'Title.copyright_page'
db.delete_column('manuscript_title', 'copyright_page_id')
# Deleting field 'Title.original_publication_title'
db.delete_column('manuscript_title', 'original_publication_title_id')
models = {
'manuscript.author': {
'Meta': {'object_name': 'Author'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'manuscript.chapter': {
'Meta': {'object_name': 'Chapter'},
'heading': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '70', 'blank': 'True'}),
'start_page_no': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"}),
'xml_chapter_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'})
},
'manuscript.compositeparagraph': {
'Meta': {'object_name': 'CompositeParagraph'},
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['manuscript.Page']", 'symmetrical': 'False'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.page': {
'Meta': {'unique_together': "(('title', 'number'),)", 'object_name': 'Page'},
'display': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'scan': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"})
},
'manuscript.paragraph': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Paragraph'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'composite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.CompositeParagraph']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'old_page_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Page']"}),
'split': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.sitecopytext': {
'Meta': {'object_name': 'SiteCopyText'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'value': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'manuscript.title': {
'Meta': {'object_name': 'Title'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Author']"}),
'copyright_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'copyright_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'editor': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_volumes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'original_publication_title': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['manuscript.Title']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {}),
'place_of_publication': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '70', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'title_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'title_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'volume': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['manuscript']
| 64.253846 | 234 | 0.600024 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('manuscript_title', 'num_volumes', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
db.add_column('manuscript_title', 'editor', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
db.add_column('manuscript_title', 'publisher', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
db.add_column('manuscript_title', 'place_of_publication', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
db.add_column('manuscript_title', 'title_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='title_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
db.add_column('manuscript_title', 'copyright_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='copyright_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
db.add_column('manuscript_title', 'original_publication_title', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['manuscript.Title'], unique=True, null=True, blank=True), keep_default=False)
def backwards(self, orm):
db.delete_column('manuscript_title', 'num_volumes')
db.delete_column('manuscript_title', 'editor')
db.delete_column('manuscript_title', 'publisher')
db.delete_column('manuscript_title', 'place_of_publication')
db.delete_column('manuscript_title', 'title_page_id')
db.delete_column('manuscript_title', 'copyright_page_id')
db.delete_column('manuscript_title', 'original_publication_title_id')
models = {
'manuscript.author': {
'Meta': {'object_name': 'Author'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'manuscript.chapter': {
'Meta': {'object_name': 'Chapter'},
'heading': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '70', 'blank': 'True'}),
'start_page_no': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"}),
'xml_chapter_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'})
},
'manuscript.compositeparagraph': {
'Meta': {'object_name': 'CompositeParagraph'},
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['manuscript.Page']", 'symmetrical': 'False'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.page': {
'Meta': {'unique_together': "(('title', 'number'),)", 'object_name': 'Page'},
'display': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'scan': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"})
},
'manuscript.paragraph': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Paragraph'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'composite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.CompositeParagraph']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'old_page_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Page']"}),
'split': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.sitecopytext': {
'Meta': {'object_name': 'SiteCopyText'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'value': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'manuscript.title': {
'Meta': {'object_name': 'Title'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Author']"}),
'copyright_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'copyright_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'editor': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_volumes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'original_publication_title': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['manuscript.Title']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {}),
'place_of_publication': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '70', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'title_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'title_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'volume': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['manuscript']
| true | true |
f731236ba73b3b9e12b15dec7926dc5c3b69bd93 | 1,359 | py | Python | jpype/JavaLib.py | Hi-Fi/robotframework-after-jython | 1c4956f49fa28bad1156d8243456f8f102eeb8cb | [
"MIT"
] | null | null | null | jpype/JavaLib.py | Hi-Fi/robotframework-after-jython | 1c4956f49fa28bad1156d8243456f8f102eeb8cb | [
"MIT"
] | null | null | null | jpype/JavaLib.py | Hi-Fi/robotframework-after-jython | 1c4956f49fa28bad1156d8243456f8f102eeb8cb | [
"MIT"
] | null | null | null | """Main library."""
from typing import Optional
# Import module
import jpype
# Enable Java imports
import jpype.imports
# Pull in types
from jpype.types import *
import importlib
class JavaLib:
ROBOT_LIBRARY_SCOPE = "GLOBAL"
"""General library documentation."""
def __init__(
self,
library: str,
classpath: Optional[str] = None):
if jpype.isJVMStarted():
print("JVM running")
else:
jpype.startJVM(classpath=classpath.split(":"))
JavaLibrary = importlib.import_module(library)
self.javaLibrary = JavaLibrary()
def get_keyword_names(self):
keywords = []
# AnnotationLibrary return Java's ArrayList with Java's Strings, converting to Python
for keyword in self.javaLibrary.getKeywordNames():
keywords.append(str(keyword))
return keywords
def run_keyword(self, keyword: str, args, kwargs):
import java
return self.javaLibrary.runKeyword(JString(keyword), java.util.ArrayList(args), java.util.HashMap(kwargs))
def get_keyword_documentation(self, keyword: str):
try:
# AnnotationLibrary returns java.lang.String
documentation = str(self.javaLibrary.getKeywordDocumentation(keyword))
except:
documentation = ""
return documentation
| 27.18 | 114 | 0.656365 |
from typing import Optional
import jpype
import jpype.imports
from jpype.types import *
import importlib
class JavaLib:
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(
self,
library: str,
classpath: Optional[str] = None):
if jpype.isJVMStarted():
print("JVM running")
else:
jpype.startJVM(classpath=classpath.split(":"))
JavaLibrary = importlib.import_module(library)
self.javaLibrary = JavaLibrary()
def get_keyword_names(self):
keywords = []
for keyword in self.javaLibrary.getKeywordNames():
keywords.append(str(keyword))
return keywords
def run_keyword(self, keyword: str, args, kwargs):
import java
return self.javaLibrary.runKeyword(JString(keyword), java.util.ArrayList(args), java.util.HashMap(kwargs))
def get_keyword_documentation(self, keyword: str):
try:
documentation = str(self.javaLibrary.getKeywordDocumentation(keyword))
except:
documentation = ""
return documentation
| true | true |
f7312400eaec87ffea3fdf898814764925b356f6 | 2,267 | py | Python | cogs/pin.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | cogs/pin.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | cogs/pin.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | from discord.ext.commands import Cog, command
from discord import Embed, File
from discord.ext import commands
import os, discord
class Pin(Cog):
def __init__(self, bot):
self.bot = bot
self.emoji = "📌"
@Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in payload.member.roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
else:
pers = list(a for a in channel.permissions_for(payload.member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
@Cog.listener()
async def on_raw_reaction_remove(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
member = discord.utils.get(self.bot.get_all_members(), id=payload.user_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in member.roles)
print(member_roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
else:
pers = list(a for a in channel.permissions_for(member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
| 44.45098 | 87 | 0.539038 | from discord.ext.commands import Cog, command
from discord import Embed, File
from discord.ext import commands
import os, discord
class Pin(Cog):
def __init__(self, bot):
self.bot = bot
self.emoji = "📌"
@Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in payload.member.roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
else:
pers = list(a for a in channel.permissions_for(payload.member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
@Cog.listener()
async def on_raw_reaction_remove(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
member = discord.utils.get(self.bot.get_all_members(), id=payload.user_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in member.roles)
print(member_roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
else:
pers = list(a for a in channel.permissions_for(member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
| true | true |
f731245784b2ec4ad02c78b9de42af4e227ee2c1 | 900 | py | Python | test/test_dist.py | bachew/mollusc | 9ae0eff4455b55314c2b3fe153c51403e2affa1c | [
"MIT"
] | null | null | null | test/test_dist.py | bachew/mollusc | 9ae0eff4455b55314c2b3fe153c51403e2affa1c | [
"MIT"
] | 3 | 2017-11-20T06:46:47.000Z | 2019-12-06T07:45:59.000Z | test/test_dist.py | bachew/mollusc | 9ae0eff4455b55314c2b3fe153c51403e2affa1c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from mollusc.dist import Twine
class TestTwine(object):
def test_register_command(self):
twine = Twine(username='registrar', password='reg1strar')
assert twine.get_command('register', 'package.whl', {'-c': 'test register'}) == [
'twine',
'register',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'registrar',
'-c', 'test register',
'package.whl'
]
def test_upload_command(self):
twine = Twine(username='uploader', password='upl0ader')
assert twine.get_command('upload', ['package.whl', 'package.tar.gz'], {'-c': 'test upload'}) == [
'twine',
'upload',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'uploader',
'-c', 'test upload',
'package.whl', 'package.tar.gz'
]
| 33.333333 | 105 | 0.53 |
from mollusc.dist import Twine
class TestTwine(object):
def test_register_command(self):
twine = Twine(username='registrar', password='reg1strar')
assert twine.get_command('register', 'package.whl', {'-c': 'test register'}) == [
'twine',
'register',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'registrar',
'-c', 'test register',
'package.whl'
]
def test_upload_command(self):
twine = Twine(username='uploader', password='upl0ader')
assert twine.get_command('upload', ['package.whl', 'package.tar.gz'], {'-c': 'test upload'}) == [
'twine',
'upload',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'uploader',
'-c', 'test upload',
'package.whl', 'package.tar.gz'
]
| true | true |
f731257080db5facc50d1cd0d6a59693883c2335 | 107 | py | Python | slack/forms.py | pandabearcoder/pythonph | f0a1b93cd3f6234f1eb2d8eae83a8ad8b6741006 | [
"MIT"
] | 23 | 2015-02-26T04:01:02.000Z | 2021-11-09T01:48:09.000Z | slack/forms.py | pandabearcoder/pythonph | f0a1b93cd3f6234f1eb2d8eae83a8ad8b6741006 | [
"MIT"
] | 32 | 2015-04-27T14:17:16.000Z | 2022-03-11T23:12:03.000Z | slack/forms.py | pandabearcoder/pythonph | f0a1b93cd3f6234f1eb2d8eae83a8ad8b6741006 | [
"MIT"
] | 27 | 2015-02-16T17:00:18.000Z | 2022-03-29T01:01:46.000Z | from django import forms
class SlackInviteForm(forms.Form):
email = forms.EmailField(label="Email")
| 15.285714 | 43 | 0.747664 | from django import forms
class SlackInviteForm(forms.Form):
email = forms.EmailField(label="Email")
| true | true |
f73126514631ade1b408efeff6b45a1afa8ead8e | 124 | py | Python | src/opencmiss/__init__.py | rchristie/opencmiss.argon | c5cf8f313e31fc2f9d647a64ce8694cbb4f9e9cf | [
"Apache-2.0"
] | null | null | null | src/opencmiss/__init__.py | rchristie/opencmiss.argon | c5cf8f313e31fc2f9d647a64ce8694cbb4f9e9cf | [
"Apache-2.0"
] | 2 | 2016-01-15T04:17:35.000Z | 2016-02-26T04:01:02.000Z | src/opencmiss/__init__.py | rchristie/opencmiss.argon | c5cf8f313e31fc2f9d647a64ce8694cbb4f9e9cf | [
"Apache-2.0"
] | 6 | 2015-11-29T20:57:16.000Z | 2021-06-08T04:02:26.000Z |
# OpenCMISS Python package initialisation file.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| 24.8 | 47 | 0.830645 |
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| true | true |
f73126570233a0b7d7357c728250600918f9ec85 | 126 | py | Python | mscreen/autodocktools_prepare_py3k/bhtree/__init__.py | e-mayo/mscreen | a50f0b2f7104007c730baa51b4ec65c891008c47 | [
"MIT"
] | 9 | 2021-03-06T04:24:28.000Z | 2022-01-03T09:53:07.000Z | bhtree/__init__.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 3 | 2021-03-07T05:37:16.000Z | 2021-09-19T15:06:54.000Z | bhtree/__init__.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 4 | 2019-08-28T23:11:39.000Z | 2021-11-27T08:43:36.000Z | from .bhtreelib import *
__MGLTOOLSVersion__ = '1-4alpha3'
CRITICAL_DEPENDENCIES = ['mglutil']
NONCRITICAL_DEPENDENCIES = []
| 21 | 35 | 0.777778 | from .bhtreelib import *
__MGLTOOLSVersion__ = '1-4alpha3'
CRITICAL_DEPENDENCIES = ['mglutil']
NONCRITICAL_DEPENDENCIES = []
| true | true |
f73126e4c01e01f20bce82c5853d0aa6eb16c032 | 656 | py | Python | app/utils/prediction_utils.py | Ukasz09/Clothing-recognition | 9332b0d3eac59782c0e8a72078ba97d67805d512 | [
"MIT"
] | 3 | 2020-06-11T12:38:28.000Z | 2020-11-01T13:26:47.000Z | app/utils/prediction_utils.py | Ukasz09/Clothing-recognition | 9332b0d3eac59782c0e8a72078ba97d67805d512 | [
"MIT"
] | null | null | null | app/utils/prediction_utils.py | Ukasz09/Clothing-recognition | 9332b0d3eac59782c0e8a72078ba97d67805d512 | [
"MIT"
] | null | null | null | import time
import datetime
import numpy as np
__start_time = time.time()
__end_time = time.time()
def calc_accuracy(predicted_labels, real_labels):
correct_qty = 0
for i in range(len(predicted_labels)):
if predicted_labels[i] == real_labels[i]:
correct_qty += 1
return correct_qty * 100 / len(predicted_labels)
def predict_labels(pyx):
"""
:param pyx: matrix with probability distribution p(y|x) for every class and *X_test* object
:return: list with predicted class labels
"""
return [np.argmax(row, axis=0) for row in pyx]
def convert_time(sec):
return str(datetime.timedelta(seconds=sec))
| 24.296296 | 95 | 0.696646 | import time
import datetime
import numpy as np
__start_time = time.time()
__end_time = time.time()
def calc_accuracy(predicted_labels, real_labels):
correct_qty = 0
for i in range(len(predicted_labels)):
if predicted_labels[i] == real_labels[i]:
correct_qty += 1
return correct_qty * 100 / len(predicted_labels)
def predict_labels(pyx):
return [np.argmax(row, axis=0) for row in pyx]
def convert_time(sec):
return str(datetime.timedelta(seconds=sec))
| true | true |
f73127413286956e4030d84fb610da04ec29aad6 | 8,529 | py | Python | saleor/discount/models.py | dnordio/saleor | 323963748e6a2702265ec6635b930a234abde4f5 | [
"BSD-3-Clause"
] | 1 | 2019-05-02T17:24:05.000Z | 2019-05-02T17:24:05.000Z | saleor/discount/models.py | valentine217/saleor | 323963748e6a2702265ec6635b930a234abde4f5 | [
"BSD-3-Clause"
] | null | null | null | saleor/discount/models.py | valentine217/saleor | 323963748e6a2702265ec6635b930a234abde4f5 | [
"BSD-3-Clause"
] | 1 | 2019-05-23T07:30:50.000Z | 2019-05-23T07:30:50.000Z | from datetime import date
from decimal import Decimal
from functools import partial
from django.conf import settings
from django.db import models
from django.db.models import F, Q
from django.utils.translation import pgettext, pgettext_lazy
from django_countries.fields import CountryField
from django_prices.models import MoneyField
from django_prices.templatetags.prices_i18n import amount
from prices import Money, fixed_discount, percentage_discount
from ..core.utils.translations import TranslationProxy
from . import DiscountValueType, VoucherType
class NotApplicable(ValueError):
"""Exception raised when a discount is not applicable to a checkout.
The error is raised if the order value is below the minimum required
price.
Minimum price will be available as the `min_amount_spent` attribute.
"""
def __init__(self, msg, min_amount_spent=None):
super().__init__(msg)
self.min_amount_spent = min_amount_spent
class VoucherQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(usage_limit__isnull=True) | Q(used__lt=F('usage_limit')),
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
Q(used__gte=F('usage_limit')) | Q(end_date__lt=date),
start_date__lt=date)
class Voucher(models.Model):
type = models.CharField(
max_length=20, choices=VoucherType.CHOICES, default=VoucherType.VALUE)
name = models.CharField(max_length=255, null=True, blank=True)
code = models.CharField(max_length=12, unique=True, db_index=True)
usage_limit = models.PositiveIntegerField(null=True, blank=True)
used = models.PositiveIntegerField(default=0, editable=False)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
# this field indicates if discount should be applied per order or
# individually to every item
apply_once_per_order = models.BooleanField(default=False)
discount_value_type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
discount_value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES)
# not mandatory fields, usage depends on type
countries = CountryField(multiple=True, blank=True)
min_amount_spent = MoneyField(
currency=settings.DEFAULT_CURRENCY,
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES, null=True, blank=True)
products = models.ManyToManyField('product.Product', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
objects = VoucherQueryset.as_manager()
translated = TranslationProxy()
def __str__(self):
if self.name:
return self.name
discount = '%s %s' % (
self.discount_value, self.get_discount_value_type_display())
if self.type == VoucherType.SHIPPING:
if self.is_free:
return pgettext('Voucher type', 'Free shipping')
return pgettext(
'Voucher type',
'%(discount)s off shipping') % {'discount': discount}
if self.type == VoucherType.PRODUCT:
products = len(self.products.all())
if products:
return pgettext(
'Voucher type',
'%(discount)s off %(product_num)d products') % {
'discount': discount,
'product_num': products}
if self.type == VoucherType.COLLECTION:
collections = len(self.collections.all())
if collections:
return pgettext(
'Voucher type',
'%(discount)s off %(collections_num)d collections') % {
'discount': discount,
'collections_num': collections}
if self.type == VoucherType.CATEGORY:
categories = len(self.categories.all())
if categories:
return pgettext(
'Voucher type',
'%(discount)s off %(categories_num)d categories') % {
'discount': discount,
'categories_num': categories}
return pgettext(
'Voucher type', '%(discount)s off') % {'discount': discount}
@property
def is_free(self):
return (
self.discount_value == Decimal(100) and
self.discount_value_type == DiscountValueType.PERCENTAGE)
def get_discount(self):
if self.discount_value_type == DiscountValueType.FIXED:
discount_amount = Money(
self.discount_value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.discount_value_type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.discount_value)
raise NotImplementedError('Unknown discount type')
def get_discount_amount_for(self, price):
discount = self.get_discount()
gross_price = price.gross
gross_after_discount = discount(gross_price)
if gross_after_discount.amount < 0:
return gross_price
return gross_price - gross_after_discount
def validate_min_amount_spent(self, value):
min_amount_spent = self.min_amount_spent
if min_amount_spent and value.gross < min_amount_spent:
msg = pgettext(
'Voucher not applicable',
'This offer is only valid for orders over %(amount)s.')
raise NotApplicable(
msg % {'amount': amount(min_amount_spent)},
min_amount_spent=min_amount_spent)
class SaleQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
end_date__lt=date, start_date__lt=date)
class VoucherTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
voucher = models.ForeignKey(
Voucher, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'voucher'),)
class Sale(models.Model):
name = models.CharField(max_length=255)
type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0)
products = models.ManyToManyField('product.Product', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
objects = SaleQueryset.as_manager()
translated = TranslationProxy()
class Meta:
app_label = 'discount'
permissions = ((
'manage_discounts', pgettext_lazy(
'Permission description', 'Manage sales and vouchers.')),)
def __repr__(self):
return 'Sale(name=%r, value=%r, type=%s)' % (
str(self.name), self.value, self.get_type_display())
def __str__(self):
return self.name
def get_discount(self):
if self.type == DiscountValueType.FIXED:
discount_amount = Money(self.value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.value)
raise NotImplementedError('Unknown discount type')
class SaleTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
sale = models.ForeignKey(
Sale, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'sale'),)
| 39.486111 | 79 | 0.663735 | from datetime import date
from decimal import Decimal
from functools import partial
from django.conf import settings
from django.db import models
from django.db.models import F, Q
from django.utils.translation import pgettext, pgettext_lazy
from django_countries.fields import CountryField
from django_prices.models import MoneyField
from django_prices.templatetags.prices_i18n import amount
from prices import Money, fixed_discount, percentage_discount
from ..core.utils.translations import TranslationProxy
from . import DiscountValueType, VoucherType
class NotApplicable(ValueError):
def __init__(self, msg, min_amount_spent=None):
super().__init__(msg)
self.min_amount_spent = min_amount_spent
class VoucherQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(usage_limit__isnull=True) | Q(used__lt=F('usage_limit')),
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
Q(used__gte=F('usage_limit')) | Q(end_date__lt=date),
start_date__lt=date)
class Voucher(models.Model):
type = models.CharField(
max_length=20, choices=VoucherType.CHOICES, default=VoucherType.VALUE)
name = models.CharField(max_length=255, null=True, blank=True)
code = models.CharField(max_length=12, unique=True, db_index=True)
usage_limit = models.PositiveIntegerField(null=True, blank=True)
used = models.PositiveIntegerField(default=0, editable=False)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
apply_once_per_order = models.BooleanField(default=False)
discount_value_type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
discount_value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES)
countries = CountryField(multiple=True, blank=True)
min_amount_spent = MoneyField(
currency=settings.DEFAULT_CURRENCY,
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES, null=True, blank=True)
products = models.ManyToManyField('product.Product', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
objects = VoucherQueryset.as_manager()
translated = TranslationProxy()
def __str__(self):
if self.name:
return self.name
discount = '%s %s' % (
self.discount_value, self.get_discount_value_type_display())
if self.type == VoucherType.SHIPPING:
if self.is_free:
return pgettext('Voucher type', 'Free shipping')
return pgettext(
'Voucher type',
'%(discount)s off shipping') % {'discount': discount}
if self.type == VoucherType.PRODUCT:
products = len(self.products.all())
if products:
return pgettext(
'Voucher type',
'%(discount)s off %(product_num)d products') % {
'discount': discount,
'product_num': products}
if self.type == VoucherType.COLLECTION:
collections = len(self.collections.all())
if collections:
return pgettext(
'Voucher type',
'%(discount)s off %(collections_num)d collections') % {
'discount': discount,
'collections_num': collections}
if self.type == VoucherType.CATEGORY:
categories = len(self.categories.all())
if categories:
return pgettext(
'Voucher type',
'%(discount)s off %(categories_num)d categories') % {
'discount': discount,
'categories_num': categories}
return pgettext(
'Voucher type', '%(discount)s off') % {'discount': discount}
@property
def is_free(self):
return (
self.discount_value == Decimal(100) and
self.discount_value_type == DiscountValueType.PERCENTAGE)
def get_discount(self):
if self.discount_value_type == DiscountValueType.FIXED:
discount_amount = Money(
self.discount_value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.discount_value_type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.discount_value)
raise NotImplementedError('Unknown discount type')
def get_discount_amount_for(self, price):
discount = self.get_discount()
gross_price = price.gross
gross_after_discount = discount(gross_price)
if gross_after_discount.amount < 0:
return gross_price
return gross_price - gross_after_discount
def validate_min_amount_spent(self, value):
min_amount_spent = self.min_amount_spent
if min_amount_spent and value.gross < min_amount_spent:
msg = pgettext(
'Voucher not applicable',
'This offer is only valid for orders over %(amount)s.')
raise NotApplicable(
msg % {'amount': amount(min_amount_spent)},
min_amount_spent=min_amount_spent)
class SaleQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
end_date__lt=date, start_date__lt=date)
class VoucherTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
voucher = models.ForeignKey(
Voucher, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'voucher'),)
class Sale(models.Model):
name = models.CharField(max_length=255)
type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0)
products = models.ManyToManyField('product.Product', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
objects = SaleQueryset.as_manager()
translated = TranslationProxy()
class Meta:
app_label = 'discount'
permissions = ((
'manage_discounts', pgettext_lazy(
'Permission description', 'Manage sales and vouchers.')),)
def __repr__(self):
return 'Sale(name=%r, value=%r, type=%s)' % (
str(self.name), self.value, self.get_type_display())
def __str__(self):
return self.name
def get_discount(self):
if self.type == DiscountValueType.FIXED:
discount_amount = Money(self.value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.value)
raise NotImplementedError('Unknown discount type')
class SaleTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
sale = models.ForeignKey(
Sale, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'sale'),)
| true | true |
f7312755a82193a6713ea6ec20053148ab734cac | 1,585 | py | Python | migrations/versions/f0793141fd6b_added_notifications.py | dyachoksa/flask-microblog | f956ba0199ab3fd4226806a9c1e0b3b38092c3d4 | [
"MIT"
] | null | null | null | migrations/versions/f0793141fd6b_added_notifications.py | dyachoksa/flask-microblog | f956ba0199ab3fd4226806a9c1e0b3b38092c3d4 | [
"MIT"
] | null | null | null | migrations/versions/f0793141fd6b_added_notifications.py | dyachoksa/flask-microblog | f956ba0199ab3fd4226806a9c1e0b3b38092c3d4 | [
"MIT"
] | null | null | null | """Added notifications
Revision ID: f0793141fd6b
Revises: 9ecc68fdc92d
Create Date: 2020-05-02 17:17:31.252794
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f0793141fd6b"
down_revision = "9ecc68fdc92d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"notification",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("timestamp", sa.Float(), nullable=True),
sa.Column("payload_json", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["user.id"],),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_notification_name"), "notification", ["name"], unique=False
)
op.create_index(
op.f("ix_notification_timestamp"), "notification", ["timestamp"], unique=False
)
op.create_index(
op.f("ix_notification_user_id"), "notification", ["user_id"], unique=False
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_notification_user_id"), table_name="notification")
op.drop_index(op.f("ix_notification_timestamp"), table_name="notification")
op.drop_index(op.f("ix_notification_name"), table_name="notification")
op.drop_table("notification")
# ### end Alembic commands ###
| 31.7 | 86 | 0.666877 | from alembic import op
import sqlalchemy as sa
revision = "f0793141fd6b"
down_revision = "9ecc68fdc92d"
branch_labels = None
depends_on = None
def upgrade():
umn("user_id", sa.Integer(), nullable=True),
sa.Column("timestamp", sa.Float(), nullable=True),
sa.Column("payload_json", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["user.id"],),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_notification_name"), "notification", ["name"], unique=False
)
op.create_index(
op.f("ix_notification_timestamp"), "notification", ["timestamp"], unique=False
)
op.create_index(
op.f("ix_notification_user_id"), "notification", ["user_id"], unique=False
)
op_table("notification")
| true | true |
f73127aea2857de2faadcfe865b063892f3787a7 | 1,823 | py | Python | elfsample.py | TheMindVirus/pico-uf22elf | ee5d95208851e6eba4b21675cae66fdf07176d0e | [
"MIT"
] | null | null | null | elfsample.py | TheMindVirus/pico-uf22elf | ee5d95208851e6eba4b21675cae66fdf07176d0e | [
"MIT"
] | null | null | null | elfsample.py | TheMindVirus/pico-uf22elf | ee5d95208851e6eba4b21675cae66fdf07176d0e | [
"MIT"
] | null | null | null | data = b""
data += b"\x7F\x45\x4C\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x02\x00\x28\x00\x01\x00\x00\x00\x60\x00\x00\x00\x40\x00\x00\x00"
data += b"\xB0\x00\x00\x00\x00\x00\x00\x00\x34\x00\x20\x00\x01\x00\x28\x00"
data += b"\x04\x00\x03\x00"
data += b"\x00" * (0x40 - len(data))
data += b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x08"
data += b"\x90\x00\x00\x00\x90\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00" * (0x60 - len(data))
data += b"\x0D\x20\xA0\xE3\x14\x10\x8F\xE2\x01\x00\xA0\xE3\x04\x70\xA0\xE3"
data += b"\x00\x00\x00\xEF\x01\x00\xA0\xE3\x01\x70\xA0\xE3\x00\x00\x00\xEF"
data += b"\x00" * (0x80 - len(data))
data += b"\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x21\x0A\x00" # Hello World!
data += b"\x00" * (0x90 - len(data))
data += b"\x00\x2E\x73\x68\x73\x74\x72\x74\x61\x62\x00\x2E\x74\x65\x78\x74" #.shstrtab
data += b"\x00\x2E\x72\x6F\x64\x61\x74\x61\x00"
data += b"\x00" * (0xB0 - len(data))
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x0B\x00\x00\x00\x01\x00\x00\x00"
data += b"\x06\x00\x00\x00\x60\x00\x00\x08\x60\x00\x00\x00\x20\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x11\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x80\x00\x00\x08"
data += b"\x00\x00\x00\x00\x0D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x90\x00\x00\x00\x19\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
file = open("sample.elf", "wb")
file.write(data)
file.close()
print("Done!")
| 41.431818 | 86 | 0.668129 | data = b""
data += b"\x7F\x45\x4C\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x02\x00\x28\x00\x01\x00\x00\x00\x60\x00\x00\x00\x40\x00\x00\x00"
data += b"\xB0\x00\x00\x00\x00\x00\x00\x00\x34\x00\x20\x00\x01\x00\x28\x00"
data += b"\x04\x00\x03\x00"
data += b"\x00" * (0x40 - len(data))
data += b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x08"
data += b"\x90\x00\x00\x00\x90\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00" * (0x60 - len(data))
data += b"\x0D\x20\xA0\xE3\x14\x10\x8F\xE2\x01\x00\xA0\xE3\x04\x70\xA0\xE3"
data += b"\x00\x00\x00\xEF\x01\x00\xA0\xE3\x01\x70\xA0\xE3\x00\x00\x00\xEF"
data += b"\x00" * (0x80 - len(data))
data += b"\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x21\x0A\x00"
data += b"\x00" * (0x90 - len(data))
data += b"\x00\x2E\x73\x68\x73\x74\x72\x74\x61\x62\x00\x2E\x74\x65\x78\x74"
data += b"\x00\x2E\x72\x6F\x64\x61\x74\x61\x00"
data += b"\x00" * (0xB0 - len(data))
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x0B\x00\x00\x00\x01\x00\x00\x00"
data += b"\x06\x00\x00\x00\x60\x00\x00\x08\x60\x00\x00\x00\x20\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x11\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x80\x00\x00\x08"
data += b"\x00\x00\x00\x00\x0D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x90\x00\x00\x00\x19\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
file = open("sample.elf", "wb")
file.write(data)
file.close()
print("Done!")
| true | true |
f731283f2aec8441075c7506a401da140cdbead6 | 20,464 | py | Python | test/functional/test_framework/test_framework.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_framework.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_framework.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a bitcoinflex test script.
Individual bitcoinflex test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinflexds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinflexds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitcoinflexd/bitcoinflex-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinflexds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoinflexd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinflexds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a bitcoinflexd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoinflexd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'bitcoinflexd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoinflexd should have exited with an error"
else:
assert_msg = "bitcoinflexd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoinflexd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "bitcoinflexd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some bitcoinflexd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| 42.020534 | 310 | 0.622215 |
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
def __init__(self):
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinflexds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinflexds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitcoinflexd/bitcoinflex-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinflexds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'bitcoinflexd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoinflexd should have exited with an error"
else:
assert_msg = "bitcoinflexd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoinflexd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "bitcoinflexd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
for node in self.nodes:
node.wait_for_rpc_connection()
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
sync_blocks(self.nodes)
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i)
def _initialize_chain_clean(self):
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
def __init__(self, message):
self.message = message
| true | true |
f731292a1a4cec34ff126f57ab5b89f88bdd9de5 | 2,733 | py | Python | test/test.py | QITI/Halftones | 8d0692b88711e858a93b90941abd6cb794a592a3 | [
"OLDAP-2.6",
"Python-2.0"
] | 4 | 2020-10-11T13:36:54.000Z | 2021-10-08T07:10:03.000Z | test/test.py | QITI/Halftones | 8d0692b88711e858a93b90941abd6cb794a592a3 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | test/test.py | QITI/Halftones | 8d0692b88711e858a93b90941abd6cb794a592a3 | [
"OLDAP-2.6",
"Python-2.0"
] | 2 | 2019-10-04T16:54:47.000Z | 2020-05-18T13:34:17.000Z | import halftones
from scipy.misc import *
gray = imread('lena1.jpg', True)
# halftones
jarvis = halftones.halftone.error_diffusion_jarvis(gray)
floyd_steinberg = halftones.halftone.error_diffusion_floyd_steinberg(gray)
stucki = halftones.halftone.error_diffusion_stucki(gray)
burkes = halftones.halftone.error_diffusion_burkes(gray)
sierra3 = halftones.halftone.error_diffusion_sierra3(gray)
sierra2 = halftones.halftone.error_diffusion_sierra2(gray)
sierra_simple = halftones.halftone.error_diffusion_sierra_simple(gray)
atkinson = halftones.halftone.error_diffusion_atkinson(gray)
shiaufan = halftones.halftone.error_diffusion_shiaufan(gray)
combinatorial3x3 = halftones.halftone.ordered_combinatorial3(gray)
combinatorial2x2 = halftones.halftone.ordered_combinatorial2(gray)
combinatorial4x4 = halftones.halftone.ordered_combinatorial4(gray)
OD = halftones.halftone.ordered_dithering_generalized_bayer(gray, 3)
diagonal_matrix = halftones.halftone.ordered_dithering_diagonal_ordered_matrix(gray)
clustered_dots = halftones.halftone.ordered_dithering_clustered_dots(gray)
central_white_points = halftones.halftone.ordered_dithering_central_white_point(gray)
balanced_centered_points = halftones.halftone.ordered_dithering_balanced_centered_point(gray)
dispersed_dots = halftones.halftone.ordered_dithering_dispersed_dots(gray)
# inverse halftones
inverseJarvis = halftones.inverse_halftone.inverse_fbih(jarvis)
inverseComb2x2 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial2(combinatorial2x2)
inverseComb3x3 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial3(combinatorial3x3)
inverseComb4x4 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial4(combinatorial4x4)
inverseOD = halftones.inverse_halftone.inverse_ordered_dithering_generalized(OD, 3)
# save some figures
imsave('halftone_ordered_dither_bayer.png', OD)
imsave('halftone_jarvis.png', jarvis)
imsave('halftone_stucki.png', stucki)
imsave('halftone_burkes.png', burkes)
imsave('halftone_sierra3.png', sierra3)
imsave('halftone_sierra2.png', sierra2)
imsave('halftone_sierra_simple.png', sierra_simple)
imsave('halftone_atkinson.png', atkinson)
imsave('halftone_shiaufan.png', shiaufan)
imsave('halftone_floyd_steinberg.png', floyd_steinberg)
imsave('halftone_ordered_dither_diagonal_matrix.png', diagonal_matrix)
imsave('halftone_ordered_dither_clustered_dots.png', clustered_dots)
imsave('halftone_ordered_dither_central_white_points.png', central_white_points)
imsave('halftone_ordered_dither_balanced_centered_points.png', balanced_centered_points)
imsave('halftone_ordered_dither_dispersed_dots.png', dispersed_dots)
imsave('inverse_ordered_dither.png', inverseOD)
imsave('inverse_jarvis.png', inverseJarvis)
| 52.557692 | 101 | 0.868277 | import halftones
from scipy.misc import *
gray = imread('lena1.jpg', True)
jarvis = halftones.halftone.error_diffusion_jarvis(gray)
floyd_steinberg = halftones.halftone.error_diffusion_floyd_steinberg(gray)
stucki = halftones.halftone.error_diffusion_stucki(gray)
burkes = halftones.halftone.error_diffusion_burkes(gray)
sierra3 = halftones.halftone.error_diffusion_sierra3(gray)
sierra2 = halftones.halftone.error_diffusion_sierra2(gray)
sierra_simple = halftones.halftone.error_diffusion_sierra_simple(gray)
atkinson = halftones.halftone.error_diffusion_atkinson(gray)
shiaufan = halftones.halftone.error_diffusion_shiaufan(gray)
combinatorial3x3 = halftones.halftone.ordered_combinatorial3(gray)
combinatorial2x2 = halftones.halftone.ordered_combinatorial2(gray)
combinatorial4x4 = halftones.halftone.ordered_combinatorial4(gray)
OD = halftones.halftone.ordered_dithering_generalized_bayer(gray, 3)
diagonal_matrix = halftones.halftone.ordered_dithering_diagonal_ordered_matrix(gray)
clustered_dots = halftones.halftone.ordered_dithering_clustered_dots(gray)
central_white_points = halftones.halftone.ordered_dithering_central_white_point(gray)
balanced_centered_points = halftones.halftone.ordered_dithering_balanced_centered_point(gray)
dispersed_dots = halftones.halftone.ordered_dithering_dispersed_dots(gray)
inverseJarvis = halftones.inverse_halftone.inverse_fbih(jarvis)
inverseComb2x2 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial2(combinatorial2x2)
inverseComb3x3 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial3(combinatorial3x3)
inverseComb4x4 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial4(combinatorial4x4)
inverseOD = halftones.inverse_halftone.inverse_ordered_dithering_generalized(OD, 3)
imsave('halftone_ordered_dither_bayer.png', OD)
imsave('halftone_jarvis.png', jarvis)
imsave('halftone_stucki.png', stucki)
imsave('halftone_burkes.png', burkes)
imsave('halftone_sierra3.png', sierra3)
imsave('halftone_sierra2.png', sierra2)
imsave('halftone_sierra_simple.png', sierra_simple)
imsave('halftone_atkinson.png', atkinson)
imsave('halftone_shiaufan.png', shiaufan)
imsave('halftone_floyd_steinberg.png', floyd_steinberg)
imsave('halftone_ordered_dither_diagonal_matrix.png', diagonal_matrix)
imsave('halftone_ordered_dither_clustered_dots.png', clustered_dots)
imsave('halftone_ordered_dither_central_white_points.png', central_white_points)
imsave('halftone_ordered_dither_balanced_centered_points.png', balanced_centered_points)
imsave('halftone_ordered_dither_dispersed_dots.png', dispersed_dots)
imsave('inverse_ordered_dither.png', inverseOD)
imsave('inverse_jarvis.png', inverseJarvis)
| true | true |
f7312a9191b2633265f3e838996158b974786d78 | 4,679 | py | Python | desitrip/py/desitrip/scripts/process.py | EquinoxOmega0/timedomain | 092dcda58ed380cdb41f02c1c7af33ac19c52b63 | [
"MIT"
] | 4 | 2021-02-24T15:02:35.000Z | 2022-01-18T19:24:27.000Z | desitrip/py/desitrip/scripts/process.py | MatthewPortman/timedomain | b9c6c2e6804d7dde56311d9402769be545d505d0 | [
"MIT"
] | 35 | 2020-11-06T17:51:08.000Z | 2021-10-14T01:47:16.000Z | desitrip/py/desitrip/scripts/process.py | MatthewPortman/timedomain | b9c6c2e6804d7dde56311d9402769be545d505d0 | [
"MIT"
] | 10 | 2020-03-13T20:34:15.000Z | 2021-09-23T13:35:27.000Z | #!/usr/bin/env python
"""Apply the DESITrIP CNN classifier to observed spectra,
chosen by tile ID and date.
"""
from desispec.io import read_spectra, write_spectra
from desispec.spectra import Spectra
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitrip.preproc import rebin_flux, rescale_flux
from astropy.io import fits
from astropy.table import Table, vstack, hstack
from glob import glob
from datetime import date
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow import keras
p = ArgumentParser(description='DESITrIP data processing',
formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('--tile', type=int, default=0,
help='Tile ID for processing.')
p.add_argument('--date', default=date.today().strftime('%Y%m%d'),
help='Date of observation [YYYYMMDD]')
p.add_argument('--tfmodel', default=None,
help='TensorFlow model HDF5 definition')
args = p.parse_args()
# Access redux folder.
redux='/global/project/projectdirs/desi/spectro/redux/daily/tiles'
prefix_in='/'.join([redux, '{:05d}'.format(args.tile), args.date])
if not os.path.isdir(prefix_in):
raise SystemExit('{} does not exist.'.format(prefix_in))
# Set up BGS target bit selection.
cmx_bgs_bits = '|'.join([_ for _ in cmx_mask.names() if 'BGS' in _])
# List zbest and coadd files.
zbfiles = sorted(glob('{}/zbest*.fits'.format(prefix_in)))
cafiles = sorted(glob('{}/coadd*.fits'.format(prefix_in)))
if args.tfmodel is not None:
classifier = keras.models.load_model(args.tfmodel)
# Loop through zbest and coadd files for each petal.
# Extract the fibermaps, ZBEST tables, and spectra.
# Keep only BGS targets passing basic event selection.
allzbest = None
allfmap = None
allwave = None
allflux = None
allivar = None
allmask = None
allres = None
for cafile, zbfile in zip(cafiles, zbfiles):
# Access data per petal.
zbest = Table.read(zbfile, 'ZBEST')
fibermap = Table.read(zbfile, 'FIBERMAP')
pspectra = read_spectra(cafile)
# Apply standard event selection.
isTGT = fibermap['OBJTYPE'] == 'TGT'
isGAL = zbest['SPECTYPE'] == 'GALAXY'
isBGS = fibermap['CMX_TARGET'] & cmx_mask.mask(cmx_bgs_bits) != 0
select = isTGT & isGAL & isBGS
# Accumulate spectrum data.
if allzbest is None:
allzbest = zbest[select]
allfmap = fibermap[select]
allwave = pspectra.wave['brz']
allflux = pspectra.flux['brz'][select]
allivar = pspectra.ivar['brz'][select]
allmask = pspectra.mask['brz'][select]
allres = pspectra.resolution_data['brz'][select]
else:
allzbest = vstack([allzbest, zbest[select]])
allfmap = vstack([allfmap, fibermap[select]])
allflux = np.vstack([allflux, pspectra.flux['brz'][select]])
allivar = np.vstack([allivar, pspectra.ivar['brz'][select]])
allmask = np.vstack([allmask, pspectra.mask['brz'][select]])
allres = np.vstack([allres, pspectra.resolution_data['brz'][select]])
# Apply the DESITrIP preprocessing to selected spectra.
rewave, reflux, reivar = rebin_flux(allwave, allflux, allivar, allzbest['Z'],
minwave=2500., maxwave=9500., nbins=150,
log=True, clip=True)
rsflux = rescale_flux(reflux)
# Run the classification.
if args.tfmodel is not None:
pred = classifier.predict(rsflux)
# Create output: selected target spectra.
selected_spectra = Spectra(bands=['brz'],
wave={'brz' : allwave},
flux={'brz' : allflux},
ivar={'brz' : allivar},
mask={'brz' : allmask},
resolution_data={'brz' : allres},
fibermap=allfmap)
write_spectra('selected-{}-{}.fits'.format(args.tile, args.date), selected_spectra)
# Append preprocess spectra to output.
hx = fits.HDUList()
hdu_rewave = fits.PrimaryHDU(rewave)
hdu_rewave.header['EXTNAME'] = 'REWAVE'
hdu_rewave.header['BUNIT'] = 'Angstrom'
hdu_rewave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_rewave)
hdu_reflux = fits.ImageHDU(reflux)
hdu_reflux.header['EXTNAME'] = 'REFLUX'
hx.append(hdu_reflux)
hdu_rsflux = fits.ImageHDU(rsflux)
hdu_rsflux.header['EXTNAME'] = 'RSFLUX'
hx.append(hdu_rsflux)
hdu_classify = fits.ImageHDU(pred)
hdu_classify.header['EXTNAME'] = 'OBJCLASS'
hx.append(hdu_classify)
hx.append(fits.BinTableHDU(allzbest))
hx.writeto('reduced-{}-{}.fits'.format(args.tile, args.date), overwrite=True)
| 34.404412 | 83 | 0.674717 |
from desispec.io import read_spectra, write_spectra
from desispec.spectra import Spectra
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitrip.preproc import rebin_flux, rescale_flux
from astropy.io import fits
from astropy.table import Table, vstack, hstack
from glob import glob
from datetime import date
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow import keras
p = ArgumentParser(description='DESITrIP data processing',
formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('--tile', type=int, default=0,
help='Tile ID for processing.')
p.add_argument('--date', default=date.today().strftime('%Y%m%d'),
help='Date of observation [YYYYMMDD]')
p.add_argument('--tfmodel', default=None,
help='TensorFlow model HDF5 definition')
args = p.parse_args()
redux='/global/project/projectdirs/desi/spectro/redux/daily/tiles'
prefix_in='/'.join([redux, '{:05d}'.format(args.tile), args.date])
if not os.path.isdir(prefix_in):
raise SystemExit('{} does not exist.'.format(prefix_in))
cmx_bgs_bits = '|'.join([_ for _ in cmx_mask.names() if 'BGS' in _])
zbfiles = sorted(glob('{}/zbest*.fits'.format(prefix_in)))
cafiles = sorted(glob('{}/coadd*.fits'.format(prefix_in)))
if args.tfmodel is not None:
classifier = keras.models.load_model(args.tfmodel)
allzbest = None
allfmap = None
allwave = None
allflux = None
allivar = None
allmask = None
allres = None
for cafile, zbfile in zip(cafiles, zbfiles):
zbest = Table.read(zbfile, 'ZBEST')
fibermap = Table.read(zbfile, 'FIBERMAP')
pspectra = read_spectra(cafile)
isTGT = fibermap['OBJTYPE'] == 'TGT'
isGAL = zbest['SPECTYPE'] == 'GALAXY'
isBGS = fibermap['CMX_TARGET'] & cmx_mask.mask(cmx_bgs_bits) != 0
select = isTGT & isGAL & isBGS
if allzbest is None:
allzbest = zbest[select]
allfmap = fibermap[select]
allwave = pspectra.wave['brz']
allflux = pspectra.flux['brz'][select]
allivar = pspectra.ivar['brz'][select]
allmask = pspectra.mask['brz'][select]
allres = pspectra.resolution_data['brz'][select]
else:
allzbest = vstack([allzbest, zbest[select]])
allfmap = vstack([allfmap, fibermap[select]])
allflux = np.vstack([allflux, pspectra.flux['brz'][select]])
allivar = np.vstack([allivar, pspectra.ivar['brz'][select]])
allmask = np.vstack([allmask, pspectra.mask['brz'][select]])
allres = np.vstack([allres, pspectra.resolution_data['brz'][select]])
rewave, reflux, reivar = rebin_flux(allwave, allflux, allivar, allzbest['Z'],
minwave=2500., maxwave=9500., nbins=150,
log=True, clip=True)
rsflux = rescale_flux(reflux)
if args.tfmodel is not None:
pred = classifier.predict(rsflux)
selected_spectra = Spectra(bands=['brz'],
wave={'brz' : allwave},
flux={'brz' : allflux},
ivar={'brz' : allivar},
mask={'brz' : allmask},
resolution_data={'brz' : allres},
fibermap=allfmap)
write_spectra('selected-{}-{}.fits'.format(args.tile, args.date), selected_spectra)
hx = fits.HDUList()
hdu_rewave = fits.PrimaryHDU(rewave)
hdu_rewave.header['EXTNAME'] = 'REWAVE'
hdu_rewave.header['BUNIT'] = 'Angstrom'
hdu_rewave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_rewave)
hdu_reflux = fits.ImageHDU(reflux)
hdu_reflux.header['EXTNAME'] = 'REFLUX'
hx.append(hdu_reflux)
hdu_rsflux = fits.ImageHDU(rsflux)
hdu_rsflux.header['EXTNAME'] = 'RSFLUX'
hx.append(hdu_rsflux)
hdu_classify = fits.ImageHDU(pred)
hdu_classify.header['EXTNAME'] = 'OBJCLASS'
hx.append(hdu_classify)
hx.append(fits.BinTableHDU(allzbest))
hx.writeto('reduced-{}-{}.fits'.format(args.tile, args.date), overwrite=True)
| true | true |
f7312aa4fd65012229519ec351624dc9f6a06ed9 | 3,060 | py | Python | ethtx_ce/backend/processors/abi_processor/balances.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | 1 | 2021-07-26T11:05:24.000Z | 2021-07-26T11:05:24.000Z | ethtx_ce/backend/processors/abi_processor/balances.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | null | null | null | ethtx_ce/backend/processors/abi_processor/balances.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | 1 | 2021-07-26T11:05:32.000Z | 2021-07-26T11:05:32.000Z | # Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import List
from .abc import ABISubmoduleAbc
from ...models.decoded_model import DecodedTransfer, DecodedBalance, AddressInfo
ZERO_ADDRESS = "0x" + 40 * "0"
class ABIBalancesDecoder(ABISubmoduleAbc):
""" Abi Balances Decoder. """
def decode(self, transfers: List[DecodedTransfer]) -> List:
""" Decode balances. """
balance_holders = dict()
balance_tokens = dict()
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_holders[
transfer.from_address.address
] = transfer.from_address.name
if transfer.to_address.address != ZERO_ADDRESS:
balance_holders[transfer.to_address.address] = transfer.to_address.name
balance_tokens[transfer.token_address] = (
transfer.token_standard,
transfer.token_symbol,
)
balance_sheet: dict = {address: defaultdict(int) for address in balance_holders}
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_sheet[transfer.from_address.address][
transfer.token_address
] -= transfer.value
if transfer.to_address.address != ZERO_ADDRESS:
balance_sheet[transfer.to_address.address][
transfer.token_address
] += transfer.value
balances = []
for holder_address in balance_holders:
tokens = []
for token_address in balance_sheet[holder_address]:
if balance_sheet[holder_address][token_address]:
token_standard, token_symbol = balance_tokens[token_address]
tokens.append(
dict(
token_address=token_address,
token_symbol=token_symbol,
token_standard=token_standard,
balance=balance_sheet[holder_address][token_address],
)
)
if tokens:
holder_name = balance_holders[holder_address]
balances.append(
DecodedBalance(
holder=AddressInfo(holder_address, holder_name), tokens=tokens
)
)
return balances
| 39.230769 | 88 | 0.605556 |
from collections import defaultdict
from typing import List
from .abc import ABISubmoduleAbc
from ...models.decoded_model import DecodedTransfer, DecodedBalance, AddressInfo
ZERO_ADDRESS = "0x" + 40 * "0"
class ABIBalancesDecoder(ABISubmoduleAbc):
def decode(self, transfers: List[DecodedTransfer]) -> List:
balance_holders = dict()
balance_tokens = dict()
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_holders[
transfer.from_address.address
] = transfer.from_address.name
if transfer.to_address.address != ZERO_ADDRESS:
balance_holders[transfer.to_address.address] = transfer.to_address.name
balance_tokens[transfer.token_address] = (
transfer.token_standard,
transfer.token_symbol,
)
balance_sheet: dict = {address: defaultdict(int) for address in balance_holders}
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_sheet[transfer.from_address.address][
transfer.token_address
] -= transfer.value
if transfer.to_address.address != ZERO_ADDRESS:
balance_sheet[transfer.to_address.address][
transfer.token_address
] += transfer.value
balances = []
for holder_address in balance_holders:
tokens = []
for token_address in balance_sheet[holder_address]:
if balance_sheet[holder_address][token_address]:
token_standard, token_symbol = balance_tokens[token_address]
tokens.append(
dict(
token_address=token_address,
token_symbol=token_symbol,
token_standard=token_standard,
balance=balance_sheet[holder_address][token_address],
)
)
if tokens:
holder_name = balance_holders[holder_address]
balances.append(
DecodedBalance(
holder=AddressInfo(holder_address, holder_name), tokens=tokens
)
)
return balances
| true | true |
f7312b9e3d53282b3517fa0a599b4e1dc17e254b | 827 | py | Python | ehome/utils/response_code.py | gavinliu4011/eHome | 2fb06a40ba7092835bd0904145086868cb9d45ed | [
"Apache-2.0"
] | 4 | 2018-07-12T11:49:05.000Z | 2020-03-23T15:14:15.000Z | ehome/utils/response_code.py | gavinliu4011/eHome | 2fb06a40ba7092835bd0904145086868cb9d45ed | [
"Apache-2.0"
] | null | null | null | ehome/utils/response_code.py | gavinliu4011/eHome | 2fb06a40ba7092835bd0904145086868cb9d45ed | [
"Apache-2.0"
] | null | null | null | class RET:
OK = '0'
DBERR = '4001'
NODATA = '4002'
DATAEXIST = '4003'
DATAERR = '4004'
SESSIONERR = '4101'
LOGINERR = '4102'
PARAMERR = '4103'
USERERR = '4104'
ROLEERR = '4105'
PWDERR = '4106'
REQERR = '4201'
IPERR = '4202'
THIRDERR = '4301'
IOERR = '4302'
SERVERERR = '4500'
UNKOWNERR = '4501'
error_map = {
RET.OK: '成功',
RET.DBERR: '数据库查询错误',
RET.NODATA: '无数据',
RET.DATAEXIST: '数据已存在',
RET.DATAERR: '数据错误',
RET.SESSIONERR: '用户未登录',
RET.LOGINERR: '用户登录失败',
RET.PARAMERR: '参数错误',
RET.USERERR: '用户不存在或未激活',
RET.ROLEERR: '用户身份错误',
RET.PWDERR: '密码错误',
RET.REQERR: '非法请求或请求次数受限',
RET.IPERR: 'IP受限',
RET.THIRDERR: '第三方系统错误',
RET.IOERR: '文件读写错误',
RET.SERVERERR: '内部错误',
RET.UNKOWNERR: '未知错误',
}
| 20.675 | 30 | 0.553809 | class RET:
OK = '0'
DBERR = '4001'
NODATA = '4002'
DATAEXIST = '4003'
DATAERR = '4004'
SESSIONERR = '4101'
LOGINERR = '4102'
PARAMERR = '4103'
USERERR = '4104'
ROLEERR = '4105'
PWDERR = '4106'
REQERR = '4201'
IPERR = '4202'
THIRDERR = '4301'
IOERR = '4302'
SERVERERR = '4500'
UNKOWNERR = '4501'
error_map = {
RET.OK: '成功',
RET.DBERR: '数据库查询错误',
RET.NODATA: '无数据',
RET.DATAEXIST: '数据已存在',
RET.DATAERR: '数据错误',
RET.SESSIONERR: '用户未登录',
RET.LOGINERR: '用户登录失败',
RET.PARAMERR: '参数错误',
RET.USERERR: '用户不存在或未激活',
RET.ROLEERR: '用户身份错误',
RET.PWDERR: '密码错误',
RET.REQERR: '非法请求或请求次数受限',
RET.IPERR: 'IP受限',
RET.THIRDERR: '第三方系统错误',
RET.IOERR: '文件读写错误',
RET.SERVERERR: '内部错误',
RET.UNKOWNERR: '未知错误',
}
| true | true |
f7312e1e9ac00ea57e3041da27b8c0ce92fd33e4 | 3,146 | py | Python | manabi/apps/books/models.py | aehlke/manabi | 1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b | [
"MIT"
] | 14 | 2015-10-03T07:34:28.000Z | 2021-09-20T07:10:29.000Z | manabi/apps/books/models.py | aehlke/manabi | 1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b | [
"MIT"
] | 23 | 2019-10-25T08:47:23.000Z | 2022-01-30T02:00:45.000Z | manabi/apps/books/models.py | aehlke/manabi | 1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b | [
"MIT"
] | 7 | 2016-10-04T08:10:36.000Z | 2021-09-20T07:10:33.000Z | from functools import wraps
from urllib.error import URLError
from django.db import models
from django.urls import reverse
#from amazonproduct import API as AmazonAPI
from manabi.apps.utils.slugs import slugify
from django.conf import settings
#TODO-OLD find different way.
#amazon_api = AmazonAPI(settings.AWS_KEY, settings.AWS_SECRET_KEY, 'us')
class DeckedTextbookManager(models.Manager):
def get_query_set(self):
return super(DeckedTextbookManager, self).get_query_set().filter(
deck__active=True, deck__shared=True).distinct()
def uses_amazon_api(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self.isbn:
raise Exception('Textbook has no ISBN.')
return func(self, *args, **kwargs)
return wrapped
class Textbook(models.Model):
objects = models.Manager()
decked_objects = DeckedTextbookManager()
slug = models.SlugField(blank=True) # Defaults to max_length=50
isbn = models.CharField(max_length=13)
custom_title = models.CharField(max_length=200, blank=True,
help_text='Set this to override the Amazon product name.')
#TODO-OLD student level field
class Meta:
app_label = 'flashcards'
def __unicode__(self):
try:
return self.get_basic_info()['title'] + ' [{0}]'.format(self.isbn)
except URLError:
return 'ISBN: {0}'.format(self.isbn)
def save(self, *args, **kwargs):
title = self.get_basic_info()['title']
self.slug = slugify(title)
super(Textbook, self).save(*args, **kwargs)
@property
def shared_decks(self):
return self.deck_set.filter(
active=True, shared=True)
def get_absolute_url(self):
if self.slug:
return reverse('book_detail_with_slug', (), {
'object_id': self.id,
'slug': self.slug,
})
else:
return reverse('book_detail_without_slug', (), {
'object_id': self.id,
})
@property
def cleaned_isbn(self):
return self.isbn.strip().replace('-', '')
def _item_lookup(self, **kwargs):
return
#TODO-OLD fix
return amazon_api.item_lookup(
self.cleaned_isbn, IdType='ISBN', SearchIndex='Books', **kwargs)
@uses_amazon_api
def get_image_urls(self):
'''
Returns a dict with each available image size:
{'size': 'url'}
'''
urls = {}
root = self._item_lookup(ResponseGroup='Images')
for size in ('Small', 'Medium', 'Large'):
urls[size.lower()] = getattr(root.Items.Item, size + 'Image').URL.pyval
return urls
@uses_amazon_api
def get_basic_info(self):
'''
Returns the following in a dict:
author
title
purchase_url
'''
root = self._item_lookup(ResponseGroup='Small')
attribs = root.Items.Item.ItemAttributes
return {
'author': attribs.Author.pyval,
'title': self.custom_title or attribs.Title.pyval,
}
| 28.862385 | 83 | 0.608392 | from functools import wraps
from urllib.error import URLError
from django.db import models
from django.urls import reverse
from manabi.apps.utils.slugs import slugify
from django.conf import settings
class DeckedTextbookManager(models.Manager):
def get_query_set(self):
return super(DeckedTextbookManager, self).get_query_set().filter(
deck__active=True, deck__shared=True).distinct()
def uses_amazon_api(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self.isbn:
raise Exception('Textbook has no ISBN.')
return func(self, *args, **kwargs)
return wrapped
class Textbook(models.Model):
objects = models.Manager()
decked_objects = DeckedTextbookManager()
slug = models.SlugField(blank=True)
isbn = models.CharField(max_length=13)
custom_title = models.CharField(max_length=200, blank=True,
help_text='Set this to override the Amazon product name.')
class Meta:
app_label = 'flashcards'
def __unicode__(self):
try:
return self.get_basic_info()['title'] + ' [{0}]'.format(self.isbn)
except URLError:
return 'ISBN: {0}'.format(self.isbn)
def save(self, *args, **kwargs):
title = self.get_basic_info()['title']
self.slug = slugify(title)
super(Textbook, self).save(*args, **kwargs)
@property
def shared_decks(self):
return self.deck_set.filter(
active=True, shared=True)
def get_absolute_url(self):
if self.slug:
return reverse('book_detail_with_slug', (), {
'object_id': self.id,
'slug': self.slug,
})
else:
return reverse('book_detail_without_slug', (), {
'object_id': self.id,
})
@property
def cleaned_isbn(self):
return self.isbn.strip().replace('-', '')
def _item_lookup(self, **kwargs):
return
return amazon_api.item_lookup(
self.cleaned_isbn, IdType='ISBN', SearchIndex='Books', **kwargs)
@uses_amazon_api
def get_image_urls(self):
urls = {}
root = self._item_lookup(ResponseGroup='Images')
for size in ('Small', 'Medium', 'Large'):
urls[size.lower()] = getattr(root.Items.Item, size + 'Image').URL.pyval
return urls
@uses_amazon_api
def get_basic_info(self):
root = self._item_lookup(ResponseGroup='Small')
attribs = root.Items.Item.ItemAttributes
return {
'author': attribs.Author.pyval,
'title': self.custom_title or attribs.Title.pyval,
}
| true | true |
f7312ef87c220e201a4ce0da73de12af13f9c8a5 | 17,506 | py | Python | packages/python/plotly/plotly/validators/_surface.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/_surface.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/_surface.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="surface", parent_name="", **kwargs):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here z
or surfacecolor) or the bounds set in `cmin`
and `cmax` Defaults to `false` when `cmin` and
`cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `cmin` and/or `cmax` to be equidistant
to this point. Value should have the same units
as z or surfacecolor. Has no effect when
`cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Blac
kbody,Bluered,Blues,Cividis,Earth,Electric,Gree
ns,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,R
eds,Viridis,YlGnBu,YlOrRd.
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for `customdata`.
hidesurface
Determines whether or not a surface is drawn.
For example, set `hidesurface` to False
`contours.x.show` to True and `contours.y.show`
to True to draw a wire frame plot.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for `hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel
` instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-format/tre
e/v1.4.5#d3-format for details on the
formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on
the date formatting syntax. The variables
available in `hovertemplate` are the ones
emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for `hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for `hovertext`.
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for `ids`.
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrou
ptitle` instance or dict with compatible
properties
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
lighting
:class:`plotly.graph_objects.surface.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposit
ion` instance or dict with compatible
properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for `meta`.
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the surface. Please note
that in the case of using high `opacity` values
for example a value greater than or equal to
0.5 on two surfaces (and 0.25 with four
surfaces), an overlay of multiple transparent
surfaces may not perfectly be sorted in depth
by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be
an array containing arrays mapping a normalized
value to an opacity value. At minimum, a
mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1],
[0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and
those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette
name string of the following list: 'min',
'max', 'extremes' and 'uniform'. The default is
'uniform'.
reversescale
Reverses the color mapping if true. If true,
`cmin` will correspond to the last color in the
array and `cmax` will correspond to the first
color.
scene
Sets a reference between this trace's 3D
coordinate system and a 3D scene. If "scene"
(the default value), the (x,y,z) coordinates
refer to `layout.scene`. If "scene2", the
(x,y,z) coordinates refer to `layout.scene2`,
and so on.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
:class:`plotly.graph_objects.surface.Stream`
instance or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting
a color scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud
for `surfacecolor`.
text
Sets the text elements associated with each z
value. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements
will be seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for `text`.
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date
data.
xhoverformat
Sets the hover text formatting rulefor `x`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud
for `x`.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date
data.
yhoverformat
Sets the hover text formatting rulefor `y`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud
for `y`.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date
data.
zhoverformat
Sets the hover text formatting rulefor `z`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud
for `z`.
""",
),
**kwargs,
)
| 49.451977 | 72 | 0.542214 | import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="surface", parent_name="", **kwargs):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here z
or surfacecolor) or the bounds set in `cmin`
and `cmax` Defaults to `false` when `cmin` and
`cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `cmin` and/or `cmax` to be equidistant
to this point. Value should have the same units
as z or surfacecolor. Has no effect when
`cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Blac
kbody,Bluered,Blues,Cividis,Earth,Electric,Gree
ns,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,R
eds,Viridis,YlGnBu,YlOrRd.
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for `customdata`.
hidesurface
Determines whether or not a surface is drawn.
For example, set `hidesurface` to False
`contours.x.show` to True and `contours.y.show`
to True to draw a wire frame plot.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for `hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel
` instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-format/tre
e/v1.4.5#d3-format for details on the
formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on
the date formatting syntax. The variables
available in `hovertemplate` are the ones
emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for `hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for `hovertext`.
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for `ids`.
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrou
ptitle` instance or dict with compatible
properties
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
lighting
:class:`plotly.graph_objects.surface.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposit
ion` instance or dict with compatible
properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for `meta`.
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the surface. Please note
that in the case of using high `opacity` values
for example a value greater than or equal to
0.5 on two surfaces (and 0.25 with four
surfaces), an overlay of multiple transparent
surfaces may not perfectly be sorted in depth
by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be
an array containing arrays mapping a normalized
value to an opacity value. At minimum, a
mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1],
[0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and
those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette
name string of the following list: 'min',
'max', 'extremes' and 'uniform'. The default is
'uniform'.
reversescale
Reverses the color mapping if true. If true,
`cmin` will correspond to the last color in the
array and `cmax` will correspond to the first
color.
scene
Sets a reference between this trace's 3D
coordinate system and a 3D scene. If "scene"
(the default value), the (x,y,z) coordinates
refer to `layout.scene`. If "scene2", the
(x,y,z) coordinates refer to `layout.scene2`,
and so on.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
:class:`plotly.graph_objects.surface.Stream`
instance or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting
a color scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud
for `surfacecolor`.
text
Sets the text elements associated with each z
value. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements
will be seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for `text`.
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date
data.
xhoverformat
Sets the hover text formatting rulefor `x`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud
for `x`.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date
data.
yhoverformat
Sets the hover text formatting rulefor `y`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud
for `y`.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date
data.
zhoverformat
Sets the hover text formatting rulefor `z`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud
for `z`.
""",
),
**kwargs,
)
| true | true |
f731303f5f0120e8733ff5fb197045d0cce54435 | 34 | py | Python | env/lib/python3.9/site-packages/spline/__init__.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 30 | 2017-12-05T11:12:06.000Z | 2021-11-06T18:27:58.000Z | env/lib/python3.9/site-packages/spline/__init__.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 112 | 2017-10-15T12:13:38.000Z | 2021-01-12T22:29:58.000Z | env/lib/python3.9/site-packages/spline/__init__.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 6 | 2018-08-12T17:01:52.000Z | 2021-08-17T06:05:24.000Z | """Package tool."""
VERSION = 0.1
| 11.333333 | 19 | 0.588235 | VERSION = 0.1
| true | true |
f73130757b4a0554d8ee6445c089aeb67d3e3931 | 4,942 | py | Python | tensor_rl/agents/bandits/LinUCBAgentClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | tensor_rl/agents/bandits/LinUCBAgentClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | tensor_rl/agents/bandits/LinUCBAgentClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | '''
Basic LinUCB implementation.
'''
# Python imports.
import numpy as np
from collections import defaultdict
# Other imports.
from tensor_rl.agents.AgentClass import Agent
class LinUCBAgent(Agent):
'''
From:
Lihong Li, et al. "A Contextual-Bandit Approach to Personalized
News Article Recommendation." In Proceedings of the 19th
International Conference on World Wide Web (WWW), 2010.
'''
def __init__(self, actions, name="LinUCB", rand_init=True, context_size=1, alpha=1.5):
'''
Args:
actions (list): Contains a string for each action.
name (str)
context_size (int)
alpha (float): Uncertainty parameter.
'''
Agent.__init__(self, name, actions)
self.alpha = alpha
self.context_size = context_size
self.prev_context = None
self.step_number = 0
self.rand_init = rand_init
self._init_action_model(rand_init)
def get_parameters(self):
'''
Returns:
(dict) key=param_name (str) --> val=param_val (object).
'''
param_dict = defaultdict(int)
param_dict["rand_init"] = self.rand_init
param_dict["context_size"] = self.context_size
param_dict["alpha"] = self.alpha
return param_dict
def _init_action_model(self, rand_init=True):
'''
Summary:
Initializes model parameters
'''
self.model = {'act': {}, 'act_inv': {}, 'theta': {}, 'b': {}}
for action_id in range(len(self.actions)):
self.model['act'][action_id] = np.identity(self.context_size)
self.model['act_inv'][action_id] = np.identity(self.context_size)
if rand_init:
self.model['theta'][action_id] = np.random.random((self.context_size, 1))
else:
self.model['theta'][action_id] = np.zeros((self.context_size, 1))
self.model['b'][action_id] = np.zeros((self.context_size,1))
def _compute_score(self, context):
'''
Args:
context (list)
Returns:
(dict):
K (str): action
V (float): score
'''
a_inv = self.model['act_inv']
theta = self.model['theta']
estimated_reward = {}
uncertainty = {}
score_dict = {}
max_score = 0
for action_id in range(len(self.actions)):
action_context = np.reshape(context[action_id], (-1, 1))
estimated_reward[action_id] = float(theta[action_id].T.dot(action_context))
uncertainty[action_id] = float(self.alpha * np.sqrt(action_context.T.dot(a_inv[action_id]).dot(action_context)))
score_dict[action_id] = estimated_reward[action_id] + uncertainty[action_id]
return score_dict
def update(self, reward):
'''
Args:
reward (float)
Summary:
Updates self.model according to self.prev_context, self.prev_action, @reward.
'''
action_id = self.actions.index(self.prev_action)
action_context = np.reshape(self.prev_context[action_id], (-1, 1))
self.model['act'][action_id] += action_context.dot(action_context.T)
self.model['act_inv'][action_id] = np.linalg.inv(self.model['act'][action_id])
self.model['b'][action_id] += reward * action_context
self.model['theta'][action_id] = self.model['act_inv'][action_id].dot(self.model['b'][action_id])
def act(self, context, reward):
'''
Args:
context (iterable)
reward (float)
Returns:
(str): action.
'''
# Update previous context-action pair.
if self.prev_action is not None:
self.update(reward)
# Compute score.
context = self._pre_process_context(context)
score = self._compute_score(context)
# Compute best action.
best_action = np.random.choice(self.actions)
max_score = float("-inf")
for action_id in range(len(self.actions)):
if score[action_id] > max_score:
max_score = score[action_id]
best_action = self.actions[action_id]
# Update prev pointers.
self.prev_action = best_action
self.prev_context = context
self.step_number += 1
return best_action
def _pre_process_context(self, context):
if context.get_num_feats() == 1:
# If there's no context (that is, we're just in a regular bandit).
context = context.features()
if not hasattr(context[0], '__iter__'):
# If we only have a single context.
new_context = {}
for action_id in range(len(self.actions)):
new_context[action_id] = context
context = new_context
return context
| 32.300654 | 124 | 0.581951 |
import numpy as np
from collections import defaultdict
from tensor_rl.agents.AgentClass import Agent
class LinUCBAgent(Agent):
def __init__(self, actions, name="LinUCB", rand_init=True, context_size=1, alpha=1.5):
Agent.__init__(self, name, actions)
self.alpha = alpha
self.context_size = context_size
self.prev_context = None
self.step_number = 0
self.rand_init = rand_init
self._init_action_model(rand_init)
def get_parameters(self):
param_dict = defaultdict(int)
param_dict["rand_init"] = self.rand_init
param_dict["context_size"] = self.context_size
param_dict["alpha"] = self.alpha
return param_dict
def _init_action_model(self, rand_init=True):
self.model = {'act': {}, 'act_inv': {}, 'theta': {}, 'b': {}}
for action_id in range(len(self.actions)):
self.model['act'][action_id] = np.identity(self.context_size)
self.model['act_inv'][action_id] = np.identity(self.context_size)
if rand_init:
self.model['theta'][action_id] = np.random.random((self.context_size, 1))
else:
self.model['theta'][action_id] = np.zeros((self.context_size, 1))
self.model['b'][action_id] = np.zeros((self.context_size,1))
def _compute_score(self, context):
a_inv = self.model['act_inv']
theta = self.model['theta']
estimated_reward = {}
uncertainty = {}
score_dict = {}
max_score = 0
for action_id in range(len(self.actions)):
action_context = np.reshape(context[action_id], (-1, 1))
estimated_reward[action_id] = float(theta[action_id].T.dot(action_context))
uncertainty[action_id] = float(self.alpha * np.sqrt(action_context.T.dot(a_inv[action_id]).dot(action_context)))
score_dict[action_id] = estimated_reward[action_id] + uncertainty[action_id]
return score_dict
def update(self, reward):
action_id = self.actions.index(self.prev_action)
action_context = np.reshape(self.prev_context[action_id], (-1, 1))
self.model['act'][action_id] += action_context.dot(action_context.T)
self.model['act_inv'][action_id] = np.linalg.inv(self.model['act'][action_id])
self.model['b'][action_id] += reward * action_context
self.model['theta'][action_id] = self.model['act_inv'][action_id].dot(self.model['b'][action_id])
def act(self, context, reward):
if self.prev_action is not None:
self.update(reward)
context = self._pre_process_context(context)
score = self._compute_score(context)
best_action = np.random.choice(self.actions)
max_score = float("-inf")
for action_id in range(len(self.actions)):
if score[action_id] > max_score:
max_score = score[action_id]
best_action = self.actions[action_id]
self.prev_action = best_action
self.prev_context = context
self.step_number += 1
return best_action
def _pre_process_context(self, context):
if context.get_num_feats() == 1:
context = context.features()
if not hasattr(context[0], '__iter__'):
new_context = {}
for action_id in range(len(self.actions)):
new_context[action_id] = context
context = new_context
return context
| true | true |
f731324a891f6c3b18ab949c169e91cd701cd440 | 4,071 | py | Python | tests/ast_parser/analyzers/test_module_analyzer.py | FredHappyface/handsdown | 097cfd5addbed22ba8ab21d4657da24459b09667 | [
"MIT"
] | 47 | 2019-10-18T13:59:20.000Z | 2022-03-21T21:46:30.000Z | tests/ast_parser/analyzers/test_module_analyzer.py | FredHappyface/handsdown | 097cfd5addbed22ba8ab21d4657da24459b09667 | [
"MIT"
] | 15 | 2019-10-24T13:42:02.000Z | 2022-03-22T19:25:49.000Z | tests/ast_parser/analyzers/test_module_analyzer.py | FredHappyface/handsdown | 097cfd5addbed22ba8ab21d4657da24459b09667 | [
"MIT"
] | 7 | 2019-11-22T12:24:57.000Z | 2022-01-29T13:18:51.000Z | # pylint: disable=missing-docstring
import unittest
from unittest.mock import MagicMock
import handsdown.ast_parser.smart_ast as ast
from handsdown.ast_parser.analyzers.module_analyzer import ModuleAnalyzer
class TestModuleAnalyzer(unittest.TestCase):
def test_init(self):
analyzer = ModuleAnalyzer()
self.assertEqual(analyzer.all_names, [])
self.assertEqual(analyzer.import_nodes, [])
self.assertEqual(analyzer.function_nodes, [])
self.assertEqual(analyzer.attribute_nodes, [])
self.assertEqual(analyzer.class_nodes, [])
def test_visit_Import(self):
analyzer = ModuleAnalyzer()
node = "import_node"
self.assertIsNone(analyzer.visit_Import(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], node)
def test_visit_ImportFrom(self):
analyzer = ModuleAnalyzer()
node = "import_from_node"
self.assertIsNone(analyzer.visit_ImportFrom(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], "import_from_node")
def test_visit_ClassDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "MyClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
self.assertEqual(analyzer.class_nodes[0], node)
node.name = "_PrivateClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
def test_visit_FunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_AsyncFunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_Assign(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.mock_add_spec(ast.Assign)
node.value = "value"
target = MagicMock()
target.mock_add_spec(ast.Name)
target.id = "attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.attribute_nodes[0], node)
node.targets = [target, target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
node.targets = ["not_name_target"]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "_private_attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "__all__"
node.targets = [target]
name_1 = MagicMock()
name_1.mock_add_spec(ast.Str)
name_1.s = "MyClass"
name_2 = MagicMock()
name_2.mock_add_spec(ast.Str)
name_2.s = b"my_func"
value = MagicMock()
value.mock_add_spec(ast.List)
value.elts = [name_1, name_2, "not_name"]
node.value = value
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.all_names, ["MyClass", "my_func"])
| 37.348624 | 73 | 0.671825 |
import unittest
from unittest.mock import MagicMock
import handsdown.ast_parser.smart_ast as ast
from handsdown.ast_parser.analyzers.module_analyzer import ModuleAnalyzer
class TestModuleAnalyzer(unittest.TestCase):
def test_init(self):
analyzer = ModuleAnalyzer()
self.assertEqual(analyzer.all_names, [])
self.assertEqual(analyzer.import_nodes, [])
self.assertEqual(analyzer.function_nodes, [])
self.assertEqual(analyzer.attribute_nodes, [])
self.assertEqual(analyzer.class_nodes, [])
def test_visit_Import(self):
analyzer = ModuleAnalyzer()
node = "import_node"
self.assertIsNone(analyzer.visit_Import(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], node)
def test_visit_ImportFrom(self):
analyzer = ModuleAnalyzer()
node = "import_from_node"
self.assertIsNone(analyzer.visit_ImportFrom(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], "import_from_node")
def test_visit_ClassDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "MyClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
self.assertEqual(analyzer.class_nodes[0], node)
node.name = "_PrivateClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
def test_visit_FunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_AsyncFunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_Assign(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.mock_add_spec(ast.Assign)
node.value = "value"
target = MagicMock()
target.mock_add_spec(ast.Name)
target.id = "attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.attribute_nodes[0], node)
node.targets = [target, target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
node.targets = ["not_name_target"]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "_private_attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "__all__"
node.targets = [target]
name_1 = MagicMock()
name_1.mock_add_spec(ast.Str)
name_1.s = "MyClass"
name_2 = MagicMock()
name_2.mock_add_spec(ast.Str)
name_2.s = b"my_func"
value = MagicMock()
value.mock_add_spec(ast.List)
value.elts = [name_1, name_2, "not_name"]
node.value = value
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.all_names, ["MyClass", "my_func"])
| true | true |
f73132b3321a9a9b53f0f68a1273c7a905986331 | 18,983 | py | Python | test/generic/pointers/test_pointer_tensor.py | harshkasyap/PySyft | 4575a50f38b78728dafe2615aad9145dae17b085 | [
"Apache-2.0"
] | null | null | null | test/generic/pointers/test_pointer_tensor.py | harshkasyap/PySyft | 4575a50f38b78728dafe2615aad9145dae17b085 | [
"Apache-2.0"
] | null | null | null | test/generic/pointers/test_pointer_tensor.py | harshkasyap/PySyft | 4575a50f38b78728dafe2615aad9145dae17b085 | [
"Apache-2.0"
] | null | null | null | import torch
import torch as th
import syft
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.generic.pointers.pointer_tensor import PointerTensor
import pytest
def test_init(workers):
alice, me = workers["alice"], workers["me"]
pointer = PointerTensor(id=1000, location=alice, owner=me)
pointer.__str__()
def test_create_pointer():
x = torch.Tensor([1, 2])
x.create_pointer()
def test_send_default_garbage_collector_true(workers):
"""
Remote tensor should be garbage collected by default on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.child.garbage_collect_data
def test_send_garbage_collect_data_false(workers):
"""
Remote tensor should be not garbage collected on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.garbage_collection = False
assert x_ptr.child.garbage_collect_data is False
def test_send_gc_false(workers):
"""
Remote tensor should be not garbage collected on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.gc = False
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_gc_true(workers):
"""
Remote tensor by default is garbage collected on
deletion of Pointer Tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.gc
def test_send_disable_gc(workers):
"""Pointer tensor should be not garbage collected."""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice).disable_gc
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_get(workers):
"""Test several send get usages"""
bob = workers["bob"]
alice = workers["alice"]
# simple send
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_back = x_ptr.get()
assert (x == x_back).all()
# send with variable overwriting
x = torch.Tensor([1, 2])
x = x.send(bob)
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
# double send
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr_ptr = x_ptr.send(alice)
x_ptr_back = x_ptr_ptr.get()
x_back_back = x_ptr_back.get()
assert (x == x_back_back).all()
# double send with variable overwriting
x = torch.Tensor([1, 2])
x = x.send(bob)
x = x.send(alice)
x = x.get()
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
# chained double send
x = torch.Tensor([1, 2])
x = x.send(bob).send(alice)
x_back = x.get().get()
assert (torch.Tensor([1, 2]) == x_back).all()
def test_inplace_send_get(workers):
bob = workers["bob"]
tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])
tensor_ptr = tensor.send_(bob)
assert tensor_ptr.id == tensor.id
assert id(tensor_ptr) == id(tensor)
tensor_back = tensor_ptr.get_()
assert tensor_back.id == tensor_ptr.id
assert tensor_back.id == tensor.id
assert id(tensor_back) == id(tensor)
assert id(tensor_back) == id(tensor)
assert (tensor_back == tensor).all()
def test_repeated_send(workers):
"""Tests that repeated calls to .send(bob) works gracefully.
Previously garbage collection deleted the remote object
when .send() was called twice. This test ensures the fix still
works."""
bob = workers["bob"]
# create tensor
x = torch.Tensor([1, 2])
# send tensor to bob
x_ptr = x.send(bob)
# send tensor again
x_ptr = x.send(bob)
# ensure bob has tensor
assert x.id in bob.object_store._objects
def test_remote_autograd(workers):
"""Tests the ability to backpropagate gradients on a remote
worker."""
bob = workers["bob"]
# TEST: simple remote grad calculation
# create a tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
# send tensor to bob
x = x.send(bob)
# do some calculation
y = (x + x).sum()
# backpropagate on remote machine
y.backward()
# check that remote gradient is correct
x_grad = bob.object_store.get_obj(x.id_at_location).grad
x_grad_target = torch.ones(4).float() + 1
assert (x_grad == x_grad_target).all()
# TEST: Ensure remote grad calculation gets properly serded
# create tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)
# compute function
y = x.sum()
# backpropagate
y.backward()
# get the gradient created from backpropagation manually
x_grad = bob.object_store.get_obj(x.id_at_location).grad
# get the entire x tensor (should bring the grad too)
x = x.get()
# make sure that the grads match
assert (x.grad == x_grad).all()
def test_gradient_send_recv(workers):
"""Tests that gradients are properly sent and received along
with their tensors."""
bob = workers["bob"]
# create a tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
# create gradient on tensor
x.sum().backward(th.tensor(1.0))
# save gradient
orig_grad = x.grad
# send and get back
t = x.send(bob).get()
# check that gradient was properly serde
assert (t.grad == orig_grad).all()
def test_method_on_attribute(workers):
bob = workers["bob"]
# create remote object with children
x = torch.Tensor([1, 2, 3])
x = syft.LoggingTensor().on(x).send(bob)
# call method on data tensor directly
x.child.point_to_attr = "child.child"
y = x.add(x)
assert isinstance(y.get(), torch.Tensor)
# call method on loggingtensor directly
x.child.point_to_attr = "child"
y = x.add(x)
y = y.get()
assert isinstance(y.child, syft.LoggingTensor)
# # call method on zeroth attribute
# x.child.point_to_attr = ""
# y = x.add(x)
# y = y.get()
#
# assert isinstance(y, torch.Tensor)
# assert isinstance(y.child, syft.LoggingTensor)
# assert isinstance(y.child.child, torch.Tensor)
# call .get() on pinter to attribute (should error)
x.child.point_to_attr = "child"
try:
x.get()
except syft.exceptions.CannotRequestObjectAttribute as e:
assert True
def test_grad_pointer(workers):
"""Tests the automatic creation of a .grad pointer when
calling .send() on a tensor with requires_grad==True"""
bob = workers["bob"]
x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)
y = (x + x).sum()
y.backward()
assert (bob.object_store.get_obj(x.id_at_location).grad == torch.tensor([2, 2, 2.0])).all()
def test_move(workers):
alice, bob, james, me = workers["alice"], workers["bob"], workers["james"], workers["me"]
x = torch.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
p = x.move(alice)
assert len(alice.object_store._tensors) == 1
# Test .move on remote objects
james.clear_objects()
x = th.tensor([1.0]).send(james)
remote_x = james.object_store.get_obj(x.id_at_location)
remote_ptr = remote_x.send(bob)
assert remote_ptr.id in james.object_store._objects.keys()
remote_ptr2 = remote_ptr.move(alice)
assert remote_ptr2.id in james.object_store._objects.keys()
# Test .move back to myself
alice.clear_objects()
bob.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5])
x = t.send(bob)
y = x.move(alice)
z = y.move(me)
assert (z == t).all()
# Move object to same location
alice.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
t = t.move(bob)
assert torch.all(torch.eq(t.get(), torch.tensor([1.0, 2, 3, 4, 5])))
def test_combine_pointers(workers):
"""
Ensure that the sy.combine_pointers works as expected
"""
bob = workers["bob"]
alice = workers["alice"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
y = th.tensor([1, 2, 3, 4, 5]).send(alice)
a = x.combine(y)
b = a + a
c = b.get(sum_results=True)
assert (c == th.tensor([4, 8, 12, 16, 20])).all()
b = a + a
c = b.get(sum_results=False)
assert len(c) == 2
assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all
def test_remote_to_cpu_device(workers):
"""Ensure remote .to cpu works"""
device = torch.device("cpu")
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
x.to(device)
def test_get_remote_shape(workers):
"""Test pointer.shape functionality"""
bob = workers["bob"]
# tensor directly sent: shape stored at sending
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.shape == torch.Size([5])
# result of an operation: need to make a call to the remote worker
y = x + x
assert y.shape == torch.Size([5])
def test_get_remote_ndim(workers):
"""Test pointer.ndim functionality"""
bob = workers["bob"]
x = th.rand(2, 3, 4).send(bob)
assert x.ndim == 3
def test_remote_T(workers):
"""Test pointer.T functionality"""
bob = workers["bob"]
x = th.rand(2, 3, 4)
bob_x = x.send(bob)
bob_xT = bob_x.T
assert bob_x.shape == torch.Size([2, 3, 4])
assert bob_xT.shape == torch.Size([4, 3, 2])
assert (bob_x.get() == x).all()
assert (bob_xT.get() == x.T).all()
def test_remote_function_with_multi_ouput(workers):
"""
Functions like .split return several tensors, registration and response
must be made carefully in this case
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
r_ptr = torch.split(ptr, 2)
assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
max_value, argmax_idx = torch.max(ptr, 0)
assert max_value.get().item() == 4.0
assert argmax_idx.get().item() == 3
def test_inplace_binary_method_with_non_pointers(workers):
"""Under very specific conditions, ie inplace methods containing a
single argument which is a Tensor, we allow automatic sending of
this tensor. This is helpful to facilitate utilizing python code
of other library for remote execution"""
alice = workers["alice"]
p = th.tensor([1.0, 2]).send(alice)
x = th.tensor([1.0, 1])
p += x
assert (p.get() == th.tensor([2.0, 3])).all()
def test_raising_error_when_item_func_called(workers):
pointer = PointerTensor(id=1000, location=workers["alice"], owner=workers["me"])
with pytest.raises(RuntimeError):
pointer.item()
def test_fix_prec_on_pointer_tensor(workers):
"""
Ensure .fix_precision() works as expected.
Also check that fix_precision() is not inplace.
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr_fp = ptr.fix_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
remote_fp_tensor = bob.object_store.get_obj(ptr_fp.id_at_location)
# check that fix_precision is not inplace
assert (remote_tensor == tensor).all()
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)
def test_fix_prec_on_pointer_of_pointer(workers):
"""
Ensure .fix_precision() works along a chain of pointers.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, FixedPrecisionTensor)
def test_float_prec_on_pointer_tensor(workers):
"""
Ensure .float_precision() works as expected.
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_float_prec_on_pointer_of_pointer(workers):
"""
Ensure .float_precision() works along a chain of pointers.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_share_get(workers):
"""
Ensure .share() works as expected.
"""
bob = workers["bob"]
alice = workers["alice"]
charlie = workers["charlie"]
tensor = torch.tensor([1, 2, 3])
ptr = tensor.send(bob)
ptr = ptr.share(charlie, alice)
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, AdditiveSharingTensor)
def test_registration_of_action_on_pointer_of_pointer(workers):
"""
Ensure actions along a chain of pointers are registered as expected.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr_action = ptr + ptr
assert len(alice.object_store._tensors) == 2
assert len(bob.object_store._tensors) == 2
def test_setting_back_grad_to_origin_after_send(workers):
"""
Calling .backward() on a tensor sent using `.send(..., requires_grad=True)`
should update the origin tensor gradient
"""
me = workers["me"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y) # registration on the local worker is sometimes buggy
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z = z_ptr.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_setting_back_grad_to_origin_after_move(workers):
"""
Calling .backward() on a tensor moved using `.move(..., requires_grad=True)`
should update the origin tensor gradient
"""
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y) # registration on the local worker is sometimes buggy
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z_ptr2 = z_ptr.move(bob, requires_grad=True)
z = z_ptr2.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_remote_grad_fn(workers):
"""
Test that grad_fn can be accessed remotely
"""
alice = workers["alice"]
t = th.tensor([1.0, 1], requires_grad=True)
p = t.sum()
p.backward()
expected_type = type(p.grad_fn)
x = th.tensor([1.0, 1], requires_grad=True).send(alice)
p = x.sum()
p.backward()
p_grad_fn = p.child.grad_fn.child
assert isinstance(p_grad_fn, syft.PointerTensor)
remote_grad_fn = alice._objects[p_grad_fn.id_at_location]
assert type(remote_grad_fn.grad_fn) == expected_type
def test_iadd(workers):
alice = workers["alice"]
a = torch.ones(1, 5)
b = torch.ones(1, 5)
a_pt = a.send(alice)
b_pt = b.send(alice)
b_pt += a_pt
assert len(alice.object_store._objects) == 2
def test_inplace_ops_on_remote_long_tensor(workers):
alice = workers["alice"]
t = torch.LongTensor([2])
p = t.send_(alice) * 2
p.get_()
assert p == torch.LongTensor([4])
def test_iterable_pointer(workers):
alice = workers["alice"]
t = torch.Tensor([[1, 2], [4, 5], [7, 8]])
p = t.send(alice)
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p):
assert len(alice.object_store) == 2
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[idx])
assert len(alice.object_store) == 1
l = []
for idx, tensor in enumerate(p):
l.append(tensor)
assert len(alice.object_store) == 4
del l
del tensor
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p[:, 1]):
# Should be 3 because p[:, 1] will create another tensor on alice side
assert len(alice.object_store) == 3
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[:, 1][idx])
def test_register_hook_on_remote_tensor_or_modules(workers):
alice = workers["alice"]
# we need to set a storage object on the local worker
with syft.local_worker.registration_enabled():
## Tensor hook
flag = []
def hook_function(inputs, outputs):
flag.append(True) # pragma: no cover
p = th.tensor([1.0, 2], requires_grad=True).send(alice)
p.register_hook(hook_function)
assert len(flag) == 0
p.sum().backward()
assert len(flag) == 1
## Module hook
flag = []
def hook_function(model, inputs, outputs):
flag.append(True) # pragma: no cover
x = th.tensor([1.0, 2])
model = torch.nn.Linear(2, 1)
model.register_backward_hook(hook_function)
loss = model(x)
assert len(flag) == 0
loss.backward()
assert len(flag) == 1
| 26.661517 | 95 | 0.645788 | import torch
import torch as th
import syft
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.generic.pointers.pointer_tensor import PointerTensor
import pytest
def test_init(workers):
alice, me = workers["alice"], workers["me"]
pointer = PointerTensor(id=1000, location=alice, owner=me)
pointer.__str__()
def test_create_pointer():
x = torch.Tensor([1, 2])
x.create_pointer()
def test_send_default_garbage_collector_true(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.child.garbage_collect_data
def test_send_garbage_collect_data_false(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.garbage_collection = False
assert x_ptr.child.garbage_collect_data is False
def test_send_gc_false(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.gc = False
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_gc_true(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.gc
def test_send_disable_gc(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice).disable_gc
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_get(workers):
bob = workers["bob"]
alice = workers["alice"]
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_back = x_ptr.get()
assert (x == x_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob)
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr_ptr = x_ptr.send(alice)
x_ptr_back = x_ptr_ptr.get()
x_back_back = x_ptr_back.get()
assert (x == x_back_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob)
x = x.send(alice)
x = x.get()
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob).send(alice)
x_back = x.get().get()
assert (torch.Tensor([1, 2]) == x_back).all()
def test_inplace_send_get(workers):
bob = workers["bob"]
tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])
tensor_ptr = tensor.send_(bob)
assert tensor_ptr.id == tensor.id
assert id(tensor_ptr) == id(tensor)
tensor_back = tensor_ptr.get_()
assert tensor_back.id == tensor_ptr.id
assert tensor_back.id == tensor.id
assert id(tensor_back) == id(tensor)
assert id(tensor_back) == id(tensor)
assert (tensor_back == tensor).all()
def test_repeated_send(workers):
bob = workers["bob"]
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr = x.send(bob)
assert x.id in bob.object_store._objects
def test_remote_autograd(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
x = x.send(bob)
y = (x + x).sum()
y.backward()
x_grad = bob.object_store.get_obj(x.id_at_location).grad
x_grad_target = torch.ones(4).float() + 1
assert (x_grad == x_grad_target).all()
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)
y = x.sum()
y.backward()
x_grad = bob.object_store.get_obj(x.id_at_location).grad
x = x.get()
assert (x.grad == x_grad).all()
def test_gradient_send_recv(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
x.sum().backward(th.tensor(1.0))
orig_grad = x.grad
t = x.send(bob).get()
assert (t.grad == orig_grad).all()
def test_method_on_attribute(workers):
bob = workers["bob"]
x = torch.Tensor([1, 2, 3])
x = syft.LoggingTensor().on(x).send(bob)
x.child.point_to_attr = "child.child"
y = x.add(x)
assert isinstance(y.get(), torch.Tensor)
x.child.point_to_attr = "child"
y = x.add(x)
y = y.get()
assert isinstance(y.child, syft.LoggingTensor)
x.child.point_to_attr = "child"
try:
x.get()
except syft.exceptions.CannotRequestObjectAttribute as e:
assert True
def test_grad_pointer(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)
y = (x + x).sum()
y.backward()
assert (bob.object_store.get_obj(x.id_at_location).grad == torch.tensor([2, 2, 2.0])).all()
def test_move(workers):
alice, bob, james, me = workers["alice"], workers["bob"], workers["james"], workers["me"]
x = torch.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
p = x.move(alice)
assert len(alice.object_store._tensors) == 1
james.clear_objects()
x = th.tensor([1.0]).send(james)
remote_x = james.object_store.get_obj(x.id_at_location)
remote_ptr = remote_x.send(bob)
assert remote_ptr.id in james.object_store._objects.keys()
remote_ptr2 = remote_ptr.move(alice)
assert remote_ptr2.id in james.object_store._objects.keys()
alice.clear_objects()
bob.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5])
x = t.send(bob)
y = x.move(alice)
z = y.move(me)
assert (z == t).all()
alice.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
t = t.move(bob)
assert torch.all(torch.eq(t.get(), torch.tensor([1.0, 2, 3, 4, 5])))
def test_combine_pointers(workers):
bob = workers["bob"]
alice = workers["alice"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
y = th.tensor([1, 2, 3, 4, 5]).send(alice)
a = x.combine(y)
b = a + a
c = b.get(sum_results=True)
assert (c == th.tensor([4, 8, 12, 16, 20])).all()
b = a + a
c = b.get(sum_results=False)
assert len(c) == 2
assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all
def test_remote_to_cpu_device(workers):
device = torch.device("cpu")
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
x.to(device)
def test_get_remote_shape(workers):
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.shape == torch.Size([5])
y = x + x
assert y.shape == torch.Size([5])
def test_get_remote_ndim(workers):
bob = workers["bob"]
x = th.rand(2, 3, 4).send(bob)
assert x.ndim == 3
def test_remote_T(workers):
bob = workers["bob"]
x = th.rand(2, 3, 4)
bob_x = x.send(bob)
bob_xT = bob_x.T
assert bob_x.shape == torch.Size([2, 3, 4])
assert bob_xT.shape == torch.Size([4, 3, 2])
assert (bob_x.get() == x).all()
assert (bob_xT.get() == x.T).all()
def test_remote_function_with_multi_ouput(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
r_ptr = torch.split(ptr, 2)
assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
max_value, argmax_idx = torch.max(ptr, 0)
assert max_value.get().item() == 4.0
assert argmax_idx.get().item() == 3
def test_inplace_binary_method_with_non_pointers(workers):
alice = workers["alice"]
p = th.tensor([1.0, 2]).send(alice)
x = th.tensor([1.0, 1])
p += x
assert (p.get() == th.tensor([2.0, 3])).all()
def test_raising_error_when_item_func_called(workers):
pointer = PointerTensor(id=1000, location=workers["alice"], owner=workers["me"])
with pytest.raises(RuntimeError):
pointer.item()
def test_fix_prec_on_pointer_tensor(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr_fp = ptr.fix_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
remote_fp_tensor = bob.object_store.get_obj(ptr_fp.id_at_location)
assert (remote_tensor == tensor).all()
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)
def test_fix_prec_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, FixedPrecisionTensor)
def test_float_prec_on_pointer_tensor(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_float_prec_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_share_get(workers):
bob = workers["bob"]
alice = workers["alice"]
charlie = workers["charlie"]
tensor = torch.tensor([1, 2, 3])
ptr = tensor.send(bob)
ptr = ptr.share(charlie, alice)
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, AdditiveSharingTensor)
def test_registration_of_action_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr_action = ptr + ptr
assert len(alice.object_store._tensors) == 2
assert len(bob.object_store._tensors) == 2
def test_setting_back_grad_to_origin_after_send(workers):
me = workers["me"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y)
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z = z_ptr.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_setting_back_grad_to_origin_after_move(workers):
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y)
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z_ptr2 = z_ptr.move(bob, requires_grad=True)
z = z_ptr2.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_remote_grad_fn(workers):
alice = workers["alice"]
t = th.tensor([1.0, 1], requires_grad=True)
p = t.sum()
p.backward()
expected_type = type(p.grad_fn)
x = th.tensor([1.0, 1], requires_grad=True).send(alice)
p = x.sum()
p.backward()
p_grad_fn = p.child.grad_fn.child
assert isinstance(p_grad_fn, syft.PointerTensor)
remote_grad_fn = alice._objects[p_grad_fn.id_at_location]
assert type(remote_grad_fn.grad_fn) == expected_type
def test_iadd(workers):
alice = workers["alice"]
a = torch.ones(1, 5)
b = torch.ones(1, 5)
a_pt = a.send(alice)
b_pt = b.send(alice)
b_pt += a_pt
assert len(alice.object_store._objects) == 2
def test_inplace_ops_on_remote_long_tensor(workers):
alice = workers["alice"]
t = torch.LongTensor([2])
p = t.send_(alice) * 2
p.get_()
assert p == torch.LongTensor([4])
def test_iterable_pointer(workers):
alice = workers["alice"]
t = torch.Tensor([[1, 2], [4, 5], [7, 8]])
p = t.send(alice)
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p):
assert len(alice.object_store) == 2
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[idx])
assert len(alice.object_store) == 1
l = []
for idx, tensor in enumerate(p):
l.append(tensor)
assert len(alice.object_store) == 4
del l
del tensor
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p[:, 1]):
assert len(alice.object_store) == 3
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[:, 1][idx])
def test_register_hook_on_remote_tensor_or_modules(workers):
alice = workers["alice"]
with syft.local_worker.registration_enabled():
g = []
def hook_function(inputs, outputs):
flag.append(True)
p = th.tensor([1.0, 2], requires_grad=True).send(alice)
p.register_hook(hook_function)
assert len(flag) == 0
p.sum().backward()
assert len(flag) == 1
g = []
def hook_function(model, inputs, outputs):
flag.append(True)
x = th.tensor([1.0, 2])
model = torch.nn.Linear(2, 1)
model.register_backward_hook(hook_function)
loss = model(x)
assert len(flag) == 0
loss.backward()
assert len(flag) == 1
| true | true |
f73133716abf93447f9a681574d785a552cadd2f | 877 | py | Python | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | 6 | 2015-08-06T00:54:48.000Z | 2022-02-03T13:55:33.000Z | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | null | null | null | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | 1 | 2015-05-19T11:45:34.000Z | 2015-05-19T11:45:34.000Z | from setuptools import setup, find_packages
setup(
name='jogger',
version='0.1.1',
description='Navigate log files.',
long_description=(
open('README.md').read()
),
url='http://github.com/jomido/jogger/',
license='MIT',
author='Jonathan Dobson',
author_email='jon.m.dobson@gmail.com',
packages=[
'jogger'
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Text Processing',
'Topic :: System :: Logging'
],
) | 29.233333 | 54 | 0.586089 | from setuptools import setup, find_packages
setup(
name='jogger',
version='0.1.1',
description='Navigate log files.',
long_description=(
open('README.md').read()
),
url='http://github.com/jomido/jogger/',
license='MIT',
author='Jonathan Dobson',
author_email='jon.m.dobson@gmail.com',
packages=[
'jogger'
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Text Processing',
'Topic :: System :: Logging'
],
) | true | true |
f73134a89fd7d8c446480e1eb914cc01383bee8e | 4,859 | py | Python | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2018_02_14/operations/_private_link_resources_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2018_02_14/operations/_private_link_resources_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2018_02_14/operations/_private_link_resources_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2018_02_14.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_vault(
self,
resource_group_name, # type: str
vault_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.PrivateLinkResourceListResult"
"""Gets the private link resources supported for the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2018_02_14.models.PrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-14"
accept = "application/json"
# Construct URL
url = self.list_by_vault.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_vault.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateLinkResources'} # type: ignore
| 46.27619 | 191 | 0.686149 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_vault(
self,
resource_group_name,
vault_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-14"
accept = "application/json"
url = self.list_by_vault.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_vault.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateLinkResources'}
| true | true |
f73134cfd4c3af902e244125da2780eb7d8fee85 | 16,270 | py | Python | libgoods/libgoods/noaa_coops.py | NOAA-ORR-ERD/GnomeTools | a9ff592bd9c7ed098f6081367aa35eae525c9774 | [
"Unlicense"
] | 2 | 2017-02-15T20:45:42.000Z | 2020-10-09T16:00:00.000Z | libgoods/libgoods/noaa_coops.py | NOAA-ORR-ERD/GnomeTools | a9ff592bd9c7ed098f6081367aa35eae525c9774 | [
"Unlicense"
] | 10 | 2015-06-25T23:42:11.000Z | 2021-06-22T16:19:19.000Z | libgoods/libgoods/noaa_coops.py | NOAA-ORR-ERD/GnomeTools | a9ff592bd9c7ed098f6081367aa35eae525c9774 | [
"Unlicense"
] | 15 | 2016-01-11T20:49:10.000Z | 2020-10-15T18:02:20.000Z | #!/usr/bin/env python
from __future__ import print_function
import datetime
try:
from urllib.request import urlopen, Request #py3
except ImportError:
from urllib2 import urlopen, Request #py2
import requests
from netCDF4 import Dataset
import os, glob
'''
Methods for generating ordered filelist for a time series of CO-OPS data
(Nowcasts + Forecasts) based on user specified start and end dates. If end
date is unspecified or greater than datetime.utcnow() the latest forecast
will be automatically be appended.
Notes on COOPS naming and aggregations:
Nowcast and forecast files are created four times a day. Output is hourly in
individual files. So each update generates 6 nowcast files and 48 forecast files
The update cycle time will be the last model output timestep in the nowcast
files and the first timestep in the forecast files
Example filenames from one update cycle (20141027.t15z):
Nowcast:
nos.ngofs.fields.n000.20141027.t15z.nc
nos.ngofs.fields.n001.20141027.t15z.nc
...
nos.ngofs.fields.n006.20141027.t15z.nc
Forecast:
nos.ngofs.fields.f000.20141027.t15z.nc
nos.ngofs.fields.f002.20141027.t15z.nc
...
nos.ngofs.fields.f048.20141027.t15z.nc
So to make a time series, use subsequent nowcasts updates strung together sequentially
by update date/time then by n0001-n005 (leave off the last one as it overlaps with
the next set of files)
Similarly append the forecast that is the same update cycle as the most recent nowcast
Confusing? Yes. Run the code and look at the output, the hard work is already done :)
!!!!!Important note: this is for newer ROMS and FVCOM models only. The POM models
still have old file structure with more than one time step per file
'''
# def specify_bnd_types(grid,segs,ss_land_nodes=[]):
# '''
# The node values were determined by plotting grid, they
# are not included in the model output
# Land_bnd_segs are needed to get the boundary right for subset grids only
# They are obtained by tri_grid remap_bry_nodes method
# '''
# if grid.lower() == 'ngofs':
# ow = list(range(1,180))
# elif grid.lower() == 'nwgofs':
# ow = list(range(1,207))
# elif grid.lower() == 'negofs':
# ow = list(range(1,139))
# elif grid.lower() == 'creofs':
# ow = [68408,68409,68410,68411,68412,68414,68604,68605,68606,68607,68608,68791,68792,68793,68962,68963,68964,68965,69130,69131,69132,69133,69303,69304,69305,69479,69481,69669,69670,69671,69672,69674,69675,69866,69867,69868,69869,69870,70062,70063,70064,70065,70271,70272,70489,70490,70704,70705,70927,70928,71144,71346,71520,71683,71844,72001,72154,72281,72377,72462,72532,72583,72631,72676,72720,72765,72810,72851,72897,72939,72981,73023,73061,73099,73138,73178,73215,73251,73283,73313,73346,73381,73417,73453,73454,73481,73502,73523]
# elif grid.lower() == 'sfbofs':
# ow = [1,2,3,4,5,97,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,144,51,52,53,54,55,150,56,57,58,59,60,61,62,63,64,65,66,162,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91]
# elif grid.lower() == 'gom3':
# ow = list(range(1,120))
# else:
# ow = [1,10000]
# seg_types= []
# if len(ss_land_nodes) > 0: #subset
# for seg in segs:
# if seg[0] in ss_land_nodes and seg[1] in ss_land_nodes:
# seg_types.append(0)
# else:
# seg_types.append(1)
# else:
# for seg in segs:
# if seg[0] in ow and seg[1] in ow:
# seg_types.append(1)
# else:
# seg_types.append(0)
# return seg_types
def make_server_filelist(model,hour0,start,end=None,test_exist=False):
'''
Create a list of model file urls for an aggregated time series based on
user specified start and end dates
Args:
model (string): The COOPS OFS (e.g. NGOFS)
hour0 (int): The first hour that the model is updated on
For triangular grid models this is typically 3
For ROMS models this is typically 0
start (datetime.date): Look for model output beginning
on this date
end (datetime.date): Look for model output ending before
this date (if None or > datetime.utcnow() append latest forecast,
it will not be truncated so it may go beyond end date)
test_exists(bool): Set to True when accessing files from COOPS server
and want to check existence before operating on them
Returns:
flist (list): List of urls
'''
flist = []
stem = 'https://opendap.co-ops.nos.noaa.gov/thredds/dodsC/NOAA/' + model.upper() + '/MODELS/'
sdate = datetime.datetime.combine(start,datetime.time(hour0,0))
if end is None or end > datetime.datetime.utcnow().date() - datetime.timedelta(hours=8):
edate = datetime.datetime.utcnow() - datetime.timedelta(hours=8)
append_fc = 1
else:
edate = datetime.datetime.combine(end,datetime.time(hour0,0))
append_fc = 0
while sdate <= edate:
ym = str(sdate.year) + str(sdate.month).zfill(2)
ymd = ym + str(sdate.day).zfill(2)
h = str(sdate.hour).zfill(2)
fname = stem + ym + '/nos.' + model.lower() + '.fields.n000.' + ymd + '.t' + h + 'z.nc'
agg = make_agg(fname,type='nc')
flist.extend(agg)
sdate = sdate + datetime.timedelta(days=.25) #nowcast files are 6 hourly
#check files exist by looking for 404 error
if test_exist:
flist = [f for f in flist if test_server_existence(f + '.html')]
if append_fc:
last_nc = flist[-1].split('/')[-1].split('n005.')[-1]
fc_file0 = stem + ym + '/nos.' + model.lower() + '.fields.f000.' + last_nc
fc_flist = make_agg(fc_file0)
flist.extend(fc_flist)
return flist
def sort_local_files(local_dir,model):
'''
Create a filelist for an aggregated time series in local directory
Returns:
flist (list): List of absolute filepaths
'''
nc0_files = glob.glob(os.path.join(local_dir,'*n000*'))
flist = []
for f in nc0_files:
nc_complete = True #Add nc files if all 6 hours are there
agg = make_agg(f,'nc')
for f in agg:
if not os.path.exists(f):
nc_complete = False
if nc_complete:
flist.extend(agg)
fc0_file = flist[-1].replace('n005','f000')
fc_complete = True #Add nc files if all 6 hours are there
agg = make_agg(fc0_file,'fc')
for f in agg:
if not os.path.exists(f):
fc_complete = False
if fc_complete:
flist.extend(agg)
return flist, nc_complete + fc_complete
def make_agg(fc_file0,type='fc'):
if type == 'fc':
num_files = 48
elif type == 'nc':
num_files = 5
# here we leave off the last file in order to make best time series of nowcast files
# there is a one hour overlap between adjacent nowcasts
elif type == 'spec':
num_files = [3,6]
else:
print('Type must be fc or nc')
a,b = fc_file0.split('000')
if not type == 'spec':
agg = [fc_file0,]
for h in range(1,num_files+1):
agg.append(a + str(h).zfill(3) + b)
else:
agg = []
for h in num_files:
agg.append(a + str(h).zfill(3) + b)
return agg
def test_existence(url):
req = Request(url)
try:
urlopen(req)
exists = True
except:
print('Not found: ', url)
exists = False
return exists
def test_server_existence(url):
resp = requests.get(url)
if resp.status_code == 200:
exists = True
else:
print('Not found: ', url)
exists = False
return exists
def download_and_save(url,output_dir):
nc_in = Dataset(url)
fname = url.split('/')[-1]
nc_out = Dataset(os.path.join(output_dir,fname),'w')
#Copy dimensions
for dname, the_dim in nc_in.dimensions.items():
nc_out.createDimension(dname, len(the_dim))
for var in ['time','u','v']:
varin = nc_in.variables[var]
varout = nc_out.createVariable(var, varin.datatype, varin.dimensions)
varout[:] = varin[:]
for name in varin.ncattrs():
value = getattr(varin,name)
setattr(varout, name, value)
nc_in.close()
nc_out.close()
def ofs_info(ofs):
ofs = ofs.upper()
if ofs == 'CREOFS':
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs.html" target="_blank">
Columbia River Estuary Operational Forecast System (CREOFS)</a> was
jointly developed by the <a href="http://www.ohsu.edu/xd/" target="_blank">
Oregon Health & Science University (OHSU)</a>,
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a> and
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>.
The CREOFS model domain includes the upper and lower Columbia River and Estuary.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs_info.html" target="_blank">
model information page</a>.
'''
elif any(x in ofs for x in ['NGOFS','NEGOFS','NWGOFS']):
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs.html" target="_blank">
Northern Gulf of Mexico Operational Forecast System (NGOFS)</a>
including two nested Northeast and Northwest Gulf of Mexico Operational
Forecast Systems (NEGOFS/NWGOFS)
has been developed to serve the maritime user community.
NGOFS was developed in a joint project of the
<a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). NGOFS generates water level, current, temperature and salinity
nowcast and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs_info.html" target="_blank">
model information page.</a>
'''
elif any(x in ofs for x in ['DBOFS','TBOFS','CBOFS']):
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs.html" target="_blank">
Chesapeake Bay Operational Forecast System (CBOFS)</a> was developed by
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service/Office of Coast Survey</a> in a joint project
with the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS/Center for Operational Oceanographic Products and Services
(CO-OPS)</a> and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service/National Centers for
Environmental Prediction (NCEP) Central Operations (NCO)</a> using
<a href="http://www.myroms.org/" target="_blank">Rutgers
University's Regional Ocean Modeling System (ROMS)</a>.
CBOFS generates water level, current, temperature and salinity nowcast
and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs_info.html" target="_blank">
model information page.</a>
'''
if ofs == 'DBOFS':
info = info.replace('cbofs','dbofs')
info = info.replace('CBOFS','DBOFS')
info = info.replace('Chesapeake','Delaware')
elif ofs == 'TBOFS':
info = info.replace('cbofs','tbofs')
info = info.replace('CBOFS','TBOFS')
info = info.replace('Chesapeake','Tampa')
elif ofs == 'SFBOFS':
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs.html" target="_blank">
San Francisco Bay Operational Forecast System (SFBOFS)</a>
has been developed to serve the San Francisco Bay maritime communities.
SFBOFS was jointly developed by <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM).The NWS and NOS work together to run SFBOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs_info.html" target="_blank">
model information page.</a>
'''
elif ofs == 'LEOFS':
info = '''
The upgraded <a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs.html" target="_blank">
Lake Erie Operational Forecast System (LEOFS)</a> was jointly developed by the
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>
and <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
Office of Coast Survey</a>, <a href="http://www.glerl.noaa.gov/" target="_blank">
the Great Lakes Environmental Research Laboratory (GLERL)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>,
and the<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). The NWS and NOS work together to run LEOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs_info.html" target="_blank">
model information page.</a>
'''
else:
return ''
return info
#if __name__ == "__main__":
# ofs = 'ngofs'
# hour0 = 3
# #sdate = datetime.date.today()-datetime.timedelta(days=14)
# sdate = datetime.date(2014,10,28)
# flist = make_server_filelist(ofs,3,sdate)
# output_dir = 'C:\\Users\\amy.macfadyen\\Documents\\Projects\\goods\\trunk\\static\\ocean_models\\COOPS\\NGOFS'
# for f in flist:
# nc = Dataset(f)
# t = nc.variables['time']
# ts = num2date(t[:],t.units)
# print ts, '...writing'
# download_and_save(f,output_dir)
| 42.041344 | 544 | 0.631776 |
from __future__ import print_function
import datetime
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import requests
from netCDF4 import Dataset
import os, glob
# The node values were determined by plotting grid, they
# are not included in the model output
# Land_bnd_segs are needed to get the boundary right for subset grids only
# They are obtained by tri_grid remap_bry_nodes method
# '''
def make_server_filelist(model,hour0,start,end=None,test_exist=False):
flist = []
stem = 'https://opendap.co-ops.nos.noaa.gov/thredds/dodsC/NOAA/' + model.upper() + '/MODELS/'
sdate = datetime.datetime.combine(start,datetime.time(hour0,0))
if end is None or end > datetime.datetime.utcnow().date() - datetime.timedelta(hours=8):
edate = datetime.datetime.utcnow() - datetime.timedelta(hours=8)
append_fc = 1
else:
edate = datetime.datetime.combine(end,datetime.time(hour0,0))
append_fc = 0
while sdate <= edate:
ym = str(sdate.year) + str(sdate.month).zfill(2)
ymd = ym + str(sdate.day).zfill(2)
h = str(sdate.hour).zfill(2)
fname = stem + ym + '/nos.' + model.lower() + '.fields.n000.' + ymd + '.t' + h + 'z.nc'
agg = make_agg(fname,type='nc')
flist.extend(agg)
sdate = sdate + datetime.timedelta(days=.25)
if test_exist:
flist = [f for f in flist if test_server_existence(f + '.html')]
if append_fc:
last_nc = flist[-1].split('/')[-1].split('n005.')[-1]
fc_file0 = stem + ym + '/nos.' + model.lower() + '.fields.f000.' + last_nc
fc_flist = make_agg(fc_file0)
flist.extend(fc_flist)
return flist
def sort_local_files(local_dir,model):
nc0_files = glob.glob(os.path.join(local_dir,'*n000*'))
flist = []
for f in nc0_files:
nc_complete = True
agg = make_agg(f,'nc')
for f in agg:
if not os.path.exists(f):
nc_complete = False
if nc_complete:
flist.extend(agg)
fc0_file = flist[-1].replace('n005','f000')
fc_complete = True
agg = make_agg(fc0_file,'fc')
for f in agg:
if not os.path.exists(f):
fc_complete = False
if fc_complete:
flist.extend(agg)
return flist, nc_complete + fc_complete
def make_agg(fc_file0,type='fc'):
if type == 'fc':
num_files = 48
elif type == 'nc':
num_files = 5
elif type == 'spec':
num_files = [3,6]
else:
print('Type must be fc or nc')
a,b = fc_file0.split('000')
if not type == 'spec':
agg = [fc_file0,]
for h in range(1,num_files+1):
agg.append(a + str(h).zfill(3) + b)
else:
agg = []
for h in num_files:
agg.append(a + str(h).zfill(3) + b)
return agg
def test_existence(url):
req = Request(url)
try:
urlopen(req)
exists = True
except:
print('Not found: ', url)
exists = False
return exists
def test_server_existence(url):
resp = requests.get(url)
if resp.status_code == 200:
exists = True
else:
print('Not found: ', url)
exists = False
return exists
def download_and_save(url,output_dir):
nc_in = Dataset(url)
fname = url.split('/')[-1]
nc_out = Dataset(os.path.join(output_dir,fname),'w')
for dname, the_dim in nc_in.dimensions.items():
nc_out.createDimension(dname, len(the_dim))
for var in ['time','u','v']:
varin = nc_in.variables[var]
varout = nc_out.createVariable(var, varin.datatype, varin.dimensions)
varout[:] = varin[:]
for name in varin.ncattrs():
value = getattr(varin,name)
setattr(varout, name, value)
nc_in.close()
nc_out.close()
def ofs_info(ofs):
ofs = ofs.upper()
if ofs == 'CREOFS':
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs.html" target="_blank">
Columbia River Estuary Operational Forecast System (CREOFS)</a> was
jointly developed by the <a href="http://www.ohsu.edu/xd/" target="_blank">
Oregon Health & Science University (OHSU)</a>,
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a> and
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>.
The CREOFS model domain includes the upper and lower Columbia River and Estuary.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs_info.html" target="_blank">
model information page</a>.
'''
elif any(x in ofs for x in ['NGOFS','NEGOFS','NWGOFS']):
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs.html" target="_blank">
Northern Gulf of Mexico Operational Forecast System (NGOFS)</a>
including two nested Northeast and Northwest Gulf of Mexico Operational
Forecast Systems (NEGOFS/NWGOFS)
has been developed to serve the maritime user community.
NGOFS was developed in a joint project of the
<a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). NGOFS generates water level, current, temperature and salinity
nowcast and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs_info.html" target="_blank">
model information page.</a>
'''
elif any(x in ofs for x in ['DBOFS','TBOFS','CBOFS']):
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs.html" target="_blank">
Chesapeake Bay Operational Forecast System (CBOFS)</a> was developed by
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service/Office of Coast Survey</a> in a joint project
with the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS/Center for Operational Oceanographic Products and Services
(CO-OPS)</a> and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service/National Centers for
Environmental Prediction (NCEP) Central Operations (NCO)</a> using
<a href="http://www.myroms.org/" target="_blank">Rutgers
University's Regional Ocean Modeling System (ROMS)</a>.
CBOFS generates water level, current, temperature and salinity nowcast
and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs_info.html" target="_blank">
model information page.</a>
'''
if ofs == 'DBOFS':
info = info.replace('cbofs','dbofs')
info = info.replace('CBOFS','DBOFS')
info = info.replace('Chesapeake','Delaware')
elif ofs == 'TBOFS':
info = info.replace('cbofs','tbofs')
info = info.replace('CBOFS','TBOFS')
info = info.replace('Chesapeake','Tampa')
elif ofs == 'SFBOFS':
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs.html" target="_blank">
San Francisco Bay Operational Forecast System (SFBOFS)</a>
has been developed to serve the San Francisco Bay maritime communities.
SFBOFS was jointly developed by <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM).The NWS and NOS work together to run SFBOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs_info.html" target="_blank">
model information page.</a>
'''
elif ofs == 'LEOFS':
info = '''
The upgraded <a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs.html" target="_blank">
Lake Erie Operational Forecast System (LEOFS)</a> was jointly developed by the
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>
and <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
Office of Coast Survey</a>, <a href="http://www.glerl.noaa.gov/" target="_blank">
the Great Lakes Environmental Research Laboratory (GLERL)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>,
and the<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). The NWS and NOS work together to run LEOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs_info.html" target="_blank">
model information page.</a>
'''
else:
return ''
return info
| true | true |
f73135263aa5d42f8ba22ba5ad4466b2c6a05dc0 | 2,549 | py | Python | python2/koans/about_modules.py | rameshugar/koans | 35f2407dac045040bfd54ebe9f95ce77fd8a1b23 | [
"MIT"
] | null | null | null | python2/koans/about_modules.py | rameshugar/koans | 35f2407dac045040bfd54ebe9f95ce77fd8a1b23 | [
"MIT"
] | null | null | null | python2/koans/about_modules.py | rameshugar/koans | 35f2407dac045040bfd54ebe9f95ce77fd8a1b23 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This is very different to AboutModules in Ruby Koans
# Our AboutMultipleInheritance class is a little more comparable
#
from runner.koan import *
from another_local_module import *
from local_module_with_all_defined import *
class AboutModules(Koan):
def test_importing_other_python_scripts_as_modules(self):
import local_module # local_module.py
duck = local_module.Duck()
self.assertEqual("Daffy", duck.name)
def test_importing_attributes_from_classes_using_from_keyword(self):
from local_module import Duck
duck = Duck() # no module qualifier needed this time
self.assertEqual("Daffy", duck.name)
def test_we_can_import_multiple_items_at_once(self):
import jims, joes
jims_dog = jims.Dog()
joes_dog = joes.Dog()
self.assertEqual("jims dog", jims_dog.identify())
self.assertEqual("joes dog", joes_dog.identify())
def test_importing_all_module_attributes_at_once(self):
"""
importing all attributes at once is done like so:
from another_local_module import *
The import wildcard cannot be used from within classes or functions.
"""
goose = Goose()
hamster = Hamster()
self.assertEqual("Mr Stabby", goose.name)
self.assertEqual("Phil", hamster.name)
def test_modules_hide_attributes_prefixed_by_underscores(self):
try:
private_squirrel = _SecretSquirrel()
except NameError as ex:
self.assertMatch("global name '_SecretSquirrel' is not defined", ex[0])
def test_private_attributes_are_still_accessible_in_modules(self):
from local_module import Duck # local_module.py
duck = Duck()
self.assertEqual("password", duck._password)
# module level attribute hiding doesn't affect class attributes
# (unless the class itself is hidden).
def test_a_modules_XallX_statement_limits_what_wildcards_will_match(self):
"""Examine results of from local_module_with_all_defined import *"""
# 'Goat' is on the __all__ list
goat = Goat()
self.assertEqual("George", goat.name)
# How about velociraptors?
lizard = _Velociraptor()
self.assertEqual("Cuddles", lizard.name)
# SecretDuck? Never heard of her!
try:
duck = SecretDuck()
except NameError as ex:
self.assertMatch("global name 'SecretDuck' is not defined", ex[0])
| 32.265823 | 83 | 0.673597 |
from runner.koan import *
from another_local_module import *
from local_module_with_all_defined import *
class AboutModules(Koan):
def test_importing_other_python_scripts_as_modules(self):
import local_module
duck = local_module.Duck()
self.assertEqual("Daffy", duck.name)
def test_importing_attributes_from_classes_using_from_keyword(self):
from local_module import Duck
duck = Duck()
self.assertEqual("Daffy", duck.name)
def test_we_can_import_multiple_items_at_once(self):
import jims, joes
jims_dog = jims.Dog()
joes_dog = joes.Dog()
self.assertEqual("jims dog", jims_dog.identify())
self.assertEqual("joes dog", joes_dog.identify())
def test_importing_all_module_attributes_at_once(self):
goose = Goose()
hamster = Hamster()
self.assertEqual("Mr Stabby", goose.name)
self.assertEqual("Phil", hamster.name)
def test_modules_hide_attributes_prefixed_by_underscores(self):
try:
private_squirrel = _SecretSquirrel()
except NameError as ex:
self.assertMatch("global name '_SecretSquirrel' is not defined", ex[0])
def test_private_attributes_are_still_accessible_in_modules(self):
from local_module import Duck
duck = Duck()
self.assertEqual("password", duck._password)
# (unless the class itself is hidden).
def test_a_modules_XallX_statement_limits_what_wildcards_will_match(self):
# 'Goat' is on the __all__ list
goat = Goat()
self.assertEqual("George", goat.name)
# How about velociraptors?
lizard = _Velociraptor()
self.assertEqual("Cuddles", lizard.name)
# SecretDuck? Never heard of her!
try:
duck = SecretDuck()
except NameError as ex:
self.assertMatch("global name 'SecretDuck' is not defined", ex[0])
| true | true |
f7313861d3e6c67d85bfeb4b43faf3df2789455b | 8,380 | py | Python | data/torch_151_data/sampler.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | 1 | 2018-12-09T06:09:29.000Z | 2018-12-09T06:09:29.000Z | data/torch_151_data/sampler.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | data/torch_151_data/sampler.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | import torch
from torch._six import int_classes as _int_classes
class Sampler(object):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
way to iterate over indices of dataset elements, and a :meth:`__len__` method
that returns the length of the returned iterators.
.. note:: The :meth:`__len__` method isn't strictly required by
:class:`~torch.utils.data.DataLoader`, but is expected in any
calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
"""
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
# NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
#
# Many times we have an abstract class representing a collection/iterable of
# data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally
# implementing a `__len__` method. In such cases, we must make sure to not
# provide a default implementation, because both straightforward default
# implementations have their issues:
#
# + `return NotImplemented`:
# Calling `len(subclass_instance)` raises:
# TypeError: 'NotImplementedType' object cannot be interpreted as an integer
#
# + `raise NotImplementedError()`:
# This prevents triggering some fallback behavior. E.g., the built-in
# `list(X)` tries to call `len(X)` first, and executes a different code
# path if the method is not found or `NotImplemented` is returned, while
# raising an `NotImplementedError` will propagate and and make the call
# fail where it could have use `__iter__` to complete the call.
#
# Thus, the only two sensible things to do are
#
# + **not** provide a default `__len__`.
#
# + raise a `TypeError` instead, which is what Python uses when users call
# a method that is not defined on an object.
# (@ssnl verifies that this works on at least Python 3.7.)
class SequentialSampler(Sampler):
r"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self):
return len(self.data_source)
class RandomSampler(Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
class SubsetRandomSampler(Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
class WeightedRandomSampler(Sampler):
r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
Args:
weights (sequence) : a sequence of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
Example:
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
[4, 4, 1, 4, 5]
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
[0, 1, 4, 3, 2]
"""
def __init__(self, weights, num_samples, replacement=True):
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist())
def __len__(self):
return self.num_samples
class BatchSampler(Sampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
| 39.342723 | 100 | 0.631742 | import torch
from torch._six import int_classes as _int_classes
class Sampler(object):
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self):
return len(self.data_source)
class RandomSampler(Sampler):
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
class SubsetRandomSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
class WeightedRandomSampler(Sampler):
def __init__(self, weights, num_samples, replacement=True):
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist())
def __len__(self):
return self.num_samples
class BatchSampler(Sampler):
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.