hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a79d1926c3d415d6b4196e18d7693533ac9c03ef | 35,050 | py | Python | mathics/builtin/linalg.py | pqtoan/mathics | a08db9475c12585b5bd1701fd42b627062b6ad1e | [
"Apache-2.0"
] | null | null | null | mathics/builtin/linalg.py | pqtoan/mathics | a08db9475c12585b5bd1701fd42b627062b6ad1e | [
"Apache-2.0"
] | null | null | null | mathics/builtin/linalg.py | pqtoan/mathics | a08db9475c12585b5bd1701fd42b627062b6ad1e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Linear algebra
"""
from __future__ import unicode_literals
from __future__ import absolute_import
import six
from six.moves import range
from six.moves import zip
import sympy
from mpmath import mp
from mathics.builtin.base import Builtin
from mathics.core.convert import from_sympy
from mathics.core.expression import Expression, Integer, Symbol, Real
def matrix_data(m):
if not m.has_form('List', None):
return None
if all(leaf.has_form('List', None) for leaf in m.leaves):
result =[[item.to_sympy() for item in row.leaves] for row in m.leaves]
if not any(None in row for row in result):
return result
elif not any(leaf.has_form('List', None) for leaf in m.leaves):
result =[item.to_sympy() for item in m.leaves]
if None not in result:
return result
def to_sympy_matrix(data, **kwargs):
if not isinstance(data, list):
data = matrix_data(data)
try:
return sympy.Matrix(data)
except (TypeError, AssertionError, ValueError):
return None
def to_mpmath_matrix(data, **kwargs):
def mpmath_matrix_data(m):
if not m.has_form('List', None):
return None
if not all(leaf.has_form('List', None) for leaf in m.leaves):
return None
return [[str(item) for item in row.leaves] for row in m.leaves]
if not isinstance(data, list):
data = mpmath_matrix_data(data)
try:
return mp.matrix(data)
except (TypeError, AssertionError, ValueError):
return None
class Det(Builtin):
"""
<dl>
<dt>'Det[$m$]'
<dd>computes the determinant of the matrix $m$.
</dl>
>> Det[{{1, 1, 0}, {1, 0, 1}, {0, 1, 1}}]
= -2
Symbolic determinant:
>> Det[{{a, b, c}, {d, e, f}, {g, h, i}}]
= a e i - a f h - b d i + b f g + c d h - c e g
"""
def apply(self, m, evaluation):
'Det[m_]'
matrix = to_sympy_matrix(m)
if matrix is None or matrix.cols != matrix.rows or matrix.cols == 0:
return evaluation.message('Det', 'matsq', m)
det = matrix.det()
return from_sympy(det)
class Cross(Builtin):
"""
<dl>
<dt>'Cross[$a$, $b$]'
<dd>computes the vector cross product of $a$ and $b$.
</dl>
>> Cross[{x1, y1, z1}, {x2, y2, z2}]
= {y1 z2 - y2 z1, -x1 z2 + x2 z1, x1 y2 - x2 y1}
>> Cross[{x, y}]
= {-y, x}
>> Cross[{1, 2}, {3, 4, 5}]
: The arguments are expected to be vectors of equal length, and the number of arguments is expected to be 1 less than their length.
= Cross[{1, 2}, {3, 4, 5}]
"""
rules = {
'Cross[{x_, y_}]': '{-y, x}',
}
messages = {
'nonn1': ('The arguments are expected to be vectors of equal length, '
'and the number of arguments is expected to be 1 less than '
'their length.'),
}
# TODO Vectors of length other than 3
def apply(self, a, b, evaluation):
'Cross[a_, b_]'
a = to_sympy_matrix(a)
b = to_sympy_matrix(b)
try:
res = a.cross(b)
except sympy.ShapeError:
return evaluation.message('Cross', 'nonn1')
return from_sympy(res)
class VectorAngle(Builtin):
"""
<dl>
<dt>'VectorAngle[$u$, $v$]'
<dd>gives the angles between vectors $u$ and $v$
</dl>
>> VectorAngle[{1, 0}, {0, 1}]
= Pi / 2
>> VectorAngle[{1, 2}, {3, 1}]
= Pi / 4
>> VectorAngle[{1, 1, 0}, {1, 0, 1}]
= Pi / 3
#> VectorAngle[{0, 1}, {0, 1}]
= 0
"""
rules = {
'VectorAngle[u_, v_]': 'ArcCos[u.v / (Norm[u] Norm[v])]',
}
class Inverse(Builtin):
"""
<dl>
<dt>'Inverse[$m$]'
<dd>computes the inverse of the matrix $m$.
</dl>
>> Inverse[{{1, 2, 0}, {2, 3, 0}, {3, 4, 1}}]
= {{-3, 2, 0}, {2, -1, 0}, {1, -2, 1}}
>> Inverse[{{1, 0}, {0, 0}}]
: The matrix {{1, 0}, {0, 0}} is singular.
= Inverse[{{1, 0}, {0, 0}}]
>> Inverse[{{1, 0, 0}, {0, Sqrt[3]/2, 1/2}, {0,-1 / 2, Sqrt[3]/2}}]
= {{1, 0, 0}, {0, Sqrt[3] / 2, -1 / 2}, {0, 1 / 2, Sqrt[3] / 2}}
"""
messages = {
'sing': "The matrix `1` is singular.",
}
def apply(self, m, evaluation):
'Inverse[m_]'
matrix = to_sympy_matrix(m)
if matrix is None or matrix.cols != matrix.rows or matrix.cols == 0:
return evaluation.message('Inverse', 'matsq', m)
if matrix.det() == 0:
return evaluation.message('Inverse', 'sing', m)
inv = matrix.inv()
return from_sympy(inv)
class SingularValueDecomposition(Builtin):
"""
<dl>
<dt>'SingularValueDecomposition[$m$]'
<dd>calculates the singular value decomposition for the matrix $m$.
</dl>
'SingularValueDecomposition' returns $u$, $s$, $w$ such that $m$=$u$ $s$ $v$,
$u$\'$u$=1, $v$\'$v$=1, and $s$ is diagonal.
>> SingularValueDecomposition[{{1.5, 2.0}, {2.5, 3.0}}]
= {{{0.538954, 0.842335}, {0.842335, -0.538954}}, {{4.63555, 0.}, {0., 0.107862}}, {{0.628678, 0.777666}, {-0.777666, 0.628678}}}
#> SingularValueDecomposition[{{3/2, 2}, {5/2, 3}}]
: Symbolic SVD is not implemented, performing numerically.
= {{{0.538954, 0.842335}, {0.842335, -0.538954}}, {{4.63555, 0.}, {0., 0.107862}}, {{0.628678, 0.777666}, {-0.777666, 0.628678}}}
#> SingularValueDecomposition[{1, {2}}]
: Argument {1, {2}} at position 1 is not a non-empty rectangular matrix.
= SingularValueDecomposition[{1, {2}}]
"""
# Sympy lacks symbolic SVD
"""
>> SingularValueDecomposition[{{1, 2}, {2, 3}, {3, 4}}]
= {{-11 / 6, -1 / 3, 7 / 6}, {4 / 3, 1 / 3, -2 / 3}}
>> SingularValueDecomposition[{{1, 2, 0}, {2, 3, 0}, {3, 4, 1}}]
= {{-3, 2, 0}, {2, -1, 0}, {1, -2, 1}}
"""
messages = {
'nosymb': "Symbolic SVD is not implemented, performing numerically.",
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, evaluation):
'SingularValueDecomposition[m_]'
matrix = to_mpmath_matrix(m)
if matrix is None:
return evaluation.message('SingularValueDecomposition', 'matrix', m, 1)
if not any(leaf.is_inexact() for row in m.leaves for leaf in row.leaves):
# symbolic argument (not implemented)
evaluation.message('SingularValueDecomposition', 'nosymb')
U, S, V = mp.svd(matrix)
S = mp.diag(S)
U_list = Expression('List', *U.tolist())
S_list = Expression('List', *S.tolist())
V_list = Expression('List', *V.tolist())
return Expression('List', *[U_list, S_list, V_list])
class QRDecomposition(Builtin):
"""
<dl>
<dt>'QRDecomposition[$m$]'
<dd>computes the QR decomposition of the matrix $m$.
</dl>
>> QRDecomposition[{{1, 2}, {3, 4}, {5, 6}}]
= {{{Sqrt[35] / 35, 3 Sqrt[35] / 35, Sqrt[35] / 7}, {13 Sqrt[210] / 210, 2 Sqrt[210] / 105, -Sqrt[210] / 42}}, {{Sqrt[35], 44 Sqrt[35] / 35}, {0, 2 Sqrt[210] / 35}}}
#> QRDecomposition[{{1, 2, 3, 4}, {1, 4, 9, 16}, {1, 8, 27, 64}}]
: Sympy is unable to perform the QR decomposition.
= QRDecomposition[{{1, 2, 3, 4}, {1, 4, 9, 16}, {1, 8, 27, 64}}]
#> QRDecomposition[{1, {2}}]
: Argument {1, {2}} at position 1 is not a non-empty rectangular matrix.
= QRDecomposition[{1, {2}}]
"""
messages = {
'sympy': 'Sympy is unable to perform the QR decomposition.',
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, evaluation):
'QRDecomposition[m_]'
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('QRDecomposition', 'matrix', m, 1)
try:
Q, R = matrix.QRdecomposition()
except sympy.matrices.MatrixError:
return evaluation.message('QRDecomposition', 'sympy')
Q = Q.transpose()
return Expression('List', *[from_sympy(Q), from_sympy(R)])
class PseudoInverse(Builtin):
"""
<dl>
<dt>'PseudoInverse[$m$]'
<dd>computes the Moore-Penrose pseudoinverse of the matrix $m$.
If $m$ is invertible, the pseudoinverse equals the inverse.
</dl>
>> PseudoInverse[{{1, 2}, {2, 3}, {3, 4}}]
= {{-11 / 6, -1 / 3, 7 / 6}, {4 / 3, 1 / 3, -2 / 3}}
>> PseudoInverse[{{1, 2, 0}, {2, 3, 0}, {3, 4, 1}}]
= {{-3, 2, 0}, {2, -1, 0}, {1, -2, 1}}
>> PseudoInverse[{{1.0, 2.5}, {2.5, 1.0}}]
= {{-0.190476, 0.47619}, {0.47619, -0.190476}}
#> PseudoInverse[{1, {2}}]
: Argument {1, {2}} at position 1 is not a non-empty rectangular matrix.
= PseudoInverse[{1, {2}}]
"""
messages = {
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, evaluation):
'PseudoInverse[m_]'
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('PseudoInverse', 'matrix', m, 1)
pinv = matrix.pinv()
return from_sympy(pinv)
class LeastSquares(Builtin):
"""
<dl>
<dt>'LeastSquares[$m$, $b$]'
<dd>computes the least squares solution to $m$ $x$ = $b$, finding
an $x$ that solves for $b$ optimally.
</dl>
>> LeastSquares[{{1, 2}, {2, 3}, {5, 6}}, {1, 5, 3}]
= {-28 / 13, 31 / 13}
>> Simplify[LeastSquares[{{1, 2}, {2, 3}, {5, 6}}, {1, x, 3}]]
= {12 / 13 - 8 x / 13, -4 / 13 + 7 x / 13}
>> LeastSquares[{{1, 1, 1}, {1, 1, 2}}, {1, 3}]
: Solving for underdetermined system not implemented.
= LeastSquares[{{1, 1, 1}, {1, 1, 2}}, {1, 3}]
## Inconsistent system - ideally we'd print a different message
#> LeastSquares[{{1, 1, 1}, {1, 1, 1}}, {1, 0}]
: Solving for underdetermined system not implemented.
= LeastSquares[{{1, 1, 1}, {1, 1, 1}}, {1, 0}]
#> LeastSquares[{1, {2}}, {1, 2}]
: Argument {1, {2}} at position 1 is not a non-empty rectangular matrix.
= LeastSquares[{1, {2}}, {1, 2}]
#> LeastSquares[{{1, 2}, {3, 4}}, {1, {2}}]
: Argument {1, {2}} at position 2 is not a non-empty rectangular matrix.
= LeastSquares[{{1, 2}, {3, 4}}, {1, {2}}]
"""
messages = {
'underdetermined': "Solving for underdetermined system not implemented.",
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, b, evaluation):
'LeastSquares[m_, b_]'
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('LeastSquares', 'matrix', m, 1)
b_vector = to_sympy_matrix(b)
if b_vector is None:
return evaluation.message('LeastSquares', 'matrix', b, 2)
try:
solution = matrix.solve_least_squares(b_vector) # default method = Cholesky
except NotImplementedError as e:
return evaluation.message('LeastSquares', 'underdetermined')
return from_sympy(solution)
class LinearSolve(Builtin):
"""
<dl>
<dt>'LinearSolve[$matrix$, $right$]'
<dd>solves the linear equation system '$matrix$ . $x$ = $right$'
and returns one corresponding solution $x$.
</dl>
>> LinearSolve[{{1, 1, 0}, {1, 0, 1}, {0, 1, 1}}, {1, 2, 3}]
= {0, 1, 2}
Test the solution:
>> {{1, 1, 0}, {1, 0, 1}, {0, 1, 1}} . {0, 1, 2}
= {1, 2, 3}
If there are several solutions, one arbitrary solution is returned:
>> LinearSolve[{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, {1, 1, 1}]
= {-1, 1, 0}
Infeasible systems are reported:
>> LinearSolve[{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, {1, -2, 3}]
: Linear equation encountered that has no solution.
= LinearSolve[{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, {1, -2, 3}]
#> LinearSolve[{1, {2}}, {1, 2}]
: Argument {1, {2}} at position 1 is not a non-empty rectangular matrix.
= LinearSolve[{1, {2}}, {1, 2}]
#> LinearSolve[{{1, 2}, {3, 4}}, {1, {2}}]
: Argument {1, {2}} at position 2 is not a non-empty rectangular matrix.
= LinearSolve[{{1, 2}, {3, 4}}, {1, {2}}]
"""
messages = {
'lslc': ("Coefficient matrix and target vector(s) or matrix "
"do not have the same dimensions."),
'nosol': "Linear equation encountered that has no solution.",
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, b, evaluation):
'LinearSolve[m_, b_]'
matrix = matrix_data(m)
if matrix is None:
return evaluation.message('LinearSolve', 'matrix', m, 1)
if not b.has_form('List', None):
return
if len(b.leaves) != len(matrix):
return evaluation.message('LinearSolve', 'lslc')
system = [mm + [v] for mm, v in zip(matrix, b.leaves)]
system = to_sympy_matrix(system)
if system is None:
return evaluation.message('LinearSolve', 'matrix', b, 2)
syms = [sympy.Dummy('LinearSolve_var%d' % k)
for k in range(system.cols - 1)]
sol = sympy.solve_linear_system(system, *syms)
if sol:
# substitute 0 for variables that are not in result dictionary
free_vars = dict((sym, sympy.Integer(
0)) for sym in syms if sym not in sol)
sol.update(free_vars)
sol = [(sol[sym] if sym in free_vars else sol[sym].subs(free_vars))
for sym in syms]
return from_sympy(sol)
else:
return evaluation.message('LinearSolve', 'nosol')
class FittedModel(Builtin):
rules = {
'FittedModel[x_List][s_String]': 's /. x',
'FittedModel[x_List][y_]': '("Function" /. x)[y]',
'MakeBoxes[FittedModel[x_List], f_]':
'''
RowBox[{"FittedModel[",
Replace[Temporary["BestFit" /. x, f], Temporary -> MakeBoxes, 1, Heads -> True],
"]"}]
''',
}
class DesignMatrix(Builtin):
"""
<dl>
<dt>'DesignMatrix[$m$, $f$, $x$]'
<dd>returns the design matrix.
</dl>
>> DesignMatrix[{{2, 1}, {3, 4}, {5, 3}, {7, 6}}, x, x]
= {{1, 2}, {1, 3}, {1, 5}, {1, 7}}
>> DesignMatrix[{{2, 1}, {3, 4}, {5, 3}, {7, 6}}, f[x], x]
= {{1, f[2]}, {1, f[3]}, {1, f[5]}, {1, f[7]}}
"""
rules = {
'DesignMatrix[m_, f_List, x_?AtomQ]': 'DesignMatrix[m, {f}, ConstantArray[x, Length[f]]]',
'DesignMatrix[m_, f_, x_?AtomQ]': 'DesignMatrix[m, {f}, {x}]',
'DesignMatrix[m_, f_List, x_List]':
'Prepend[MapThread[Function[{ff, xx, rr}, ff /. xx -> rr], {f, x, Most[#]}], 1]& /@ m',
}
class LinearModelFit(Builtin):
"""
<dl>
<dt>'LinearModelFit[$m$, $f$, $x$]'
<dd>returns the design matrix.
</dl>
>> m = LinearModelFit[{{2, 1}, {3, 4}, {5, 3}, {7, 6}}, x, x];
>> m["BasisFunctions"]
= {1, x}
>> m["BestFit"]
= 0.186441 + 0.779661 x
>> m["BestFitParameters"]
= {0.186441, 0.779661}
>> m["DesignMatrix"]
= {{1, 2}, {1, 3}, {1, 5}, {1, 7}}
>> m["Function"]
= 0.186441 + 0.779661 #1&
>> m["Response"]
= {1, 4, 3, 6}
>> m["FitResiduals"]
= {-0.745763, 1.47458, -1.08475, 0.355932}
>> m = LinearModelFit[{{2, 2, 1}, {3, 2, 4}, {5, 6, 3}, {7, 9, 6}}, {Sin[x], Cos[y]}, {x, y}];
>> m["BasisFunctions"]
= {1, Sin[x], Cos[y]}
>> m["Function"]
= 3.33077 - 5.65221 Cos[#2] - 5.01042 Sin[#1]&
>> m = LinearModelFit[{{{1, 4}, {1, 5}, {1, 7}}, {1, 2, 3}}];
>> m["BasisFunctions"]
= {#1, #2}
>> m["FitResiduals"]
= {-0.142857, 0.214286, -0.0714286}
"""
# see the paper "Regression by linear combination of basis functions" by Risi Kondor for a good
# summary of the math behind this
rules = {
'LinearModelFit[data_, f_, x_?AtomQ]':
'LinearModelFit[data, {f}, {x}]',
'LinearModelFit[data_, f_List, x_List] /; Length[f] == Length[x]':
'''
LinearModelFit[{DesignMatrix[data, f, x], Part[data, ;;, -1]},
Prepend[MapThread[#1 /. #2 -> #3&, {f, x, Table[Slot[i], {i, Length[f]}]}], 1],
"BasisFunctions" -> Prepend[f, 1], "NumberOfSlots" -> Length[f]]
''',
'LinearModelFit[{m_?MatrixQ, v_}, f_, options___]': # f is a Slot[] version of BasisFunctions
'''
Module[{m1 = N[m], v1 = N[v], bf = "BasisFunctions" /. Join[{options}, {"BasisFunctions" -> f}]},
Module[{t1 = Transpose[m1], n = "NumberOfSlots" /. Join[{options}, {"NumberOfSlots" -> Length[f]}]},
Module[{parameters = Dot[Dot[Inverse[Dot[t1, m1]], t1], v1]},
Module[{function = Replace[Temporary[Total[f * parameters]],
Temporary -> Function, 1, Heads -> True], (* work around Function's Hold *)},
FittedModel[{
"BasisFunctions" -> bf,
"BestFit" -> Total[bf * parameters],
"BestFitParameters" -> parameters,
"DesignMatrix" -> m,
"Function" -> function,
"Response" -> v,
"FitResiduals" -> MapThread[#2 - (function @@ Take[#1, -n])&, {m1, v1}]
}]
]
]
]
]
''',
'LinearModelFit[{m_?MatrixQ, v_}]':
'LinearModelFit[{m, v}, Table[Slot[i], {i, Length[First[m]]}]]',
}
class NullSpace(Builtin):
"""
<dl>
<dt>'NullSpace[$matrix$]'
<dd>returns a list of vectors that span the nullspace of $matrix$.
</dl>
>> NullSpace[{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}]
= {{1, -2, 1}}
>> A = {{1, 1, 0}, {1, 0, 1}, {0, 1, 1}};
>> NullSpace[A]
= {}
>> MatrixRank[A]
= 3
#> NullSpace[{1, {2}}]
: Argument {1, {2}} at position 1 is not a non-empty rectangular matrix.
= NullSpace[{1, {2}}]
"""
messages = {
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, evaluation):
'NullSpace[m_]'
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('NullSpace', 'matrix', m, 1)
nullspace = matrix.nullspace()
# convert n x 1 matrices to vectors
nullspace = [list(vec) for vec in nullspace]
return from_sympy(nullspace)
class RowReduce(Builtin):
"""
<dl>
<dt>'RowReduce[$matrix$]'
<dd>returns the reduced row-echelon form of $matrix$.
</dl>
>> RowReduce[{{1, 0, a}, {1, 1, b}}]
= {{1, 0, a}, {0, 1, -a + b}}
>> RowReduce[{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}] // MatrixForm
= 1 0 -1
.
. 0 1 2
.
. 0 0 0
#> RowReduce[{{1, 0}, {0}}]
: Argument {{1, 0}, {0}} at position 1 is not a non-empty rectangular matrix.
= RowReduce[{{1, 0}, {0}}]
"""
messages = {
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, evaluation):
'RowReduce[m_]'
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('RowReduce', 'matrix', m, 1)
reduced = matrix.rref()[0]
return from_sympy(reduced)
class MatrixRank(Builtin):
"""
<dl>
<dt>'MatrixRank[$matrix$]'
<dd>returns the rank of $matrix$.
</dl>
>> MatrixRank[{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}]
= 2
>> MatrixRank[{{1, 1, 0}, {1, 0, 1}, {0, 1, 1}}]
= 3
>> MatrixRank[{{a, b}, {3 a, 3 b}}]
= 1
#> MatrixRank[{{1, 0}, {0}}]
: Argument {{1, 0}, {0}} at position 1 is not a non-empty rectangular matrix.
= MatrixRank[{{1, 0}, {0}}]
"""
messages = {
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, evaluation):
'MatrixRank[m_]'
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('MatrixRank', 'matrix', m, 1)
rank = len(matrix.rref()[1])
return Integer(rank)
class Eigenvalues(Builtin):
"""
<dl>
<dt>'Eigenvalues[$m$]'
<dd>computes the eigenvalues of the matrix $m$.
</dl>
>> Eigenvalues[{{1, 1, 0}, {1, 0, 1}, {0, 1, 1}}] // Sort
= {-1, 1, 2}
>> Eigenvalues[{{Cos[theta],Sin[theta],0},{-Sin[theta],Cos[theta],0},{0,0,1}}] // Sort
= {1, Cos[theta] + Sqrt[-1 + Cos[theta] ^ 2], Cos[theta] - Sqrt[-1 + Cos[theta] ^ 2]}
>> Eigenvalues[{{7, 1}, {-4, 3}}]
= {5, 5}
#> Eigenvalues[{{1, 0}, {0}}]
: Argument {{1, 0}, {0}} at position 1 is not a non-empty rectangular matrix.
= Eigenvalues[{{1, 0}, {0}}]
"""
messages = {
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, evaluation):
'Eigenvalues[m_]'
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('Eigenvalues', 'matrix', m, 1)
if matrix.cols != matrix.rows or matrix.cols == 0:
return evaluation.message('Eigenvalues', 'matsq', m)
eigenvalues = matrix.eigenvals()
try:
eigenvalues = sorted(six.iteritems(eigenvalues),
key=lambda v_c: (abs(v_c[0]), -v_c[0]), reverse=True)
except TypeError as e:
if not str(e).startswith('cannot determine truth value of'):
raise e
eigenvalues = list(eigenvalues.items())
return from_sympy([v for (v, c) in eigenvalues for _ in range(c)])
class Eigensystem(Builtin):
"""
<dl>
<dt>'Eigensystem[$m$]'
<dd>returns the list '{Eigenvalues[$m$], Eigenvectors[$m$]}'.
</dl>
>> Eigensystem[{{1, 1, 0}, {1, 0, 1}, {0, 1, 1}}]
= {{2, -1, 1}, {{1, 1, 1}, {1, -2, 1}, {-1, 0, 1}}}
"""
rules = {
'Eigensystem[m_]': '{Eigenvalues[m], Eigenvectors[m]}'
}
class MatrixPower(Builtin):
"""
<dl>
<dt>'MatrixPower[$m$, $n$]'
<dd>computes the $n$th power of a matrix $m$.
</dl>
>> MatrixPower[{{1, 2}, {1, 1}}, 10]
= {{3363, 4756}, {2378, 3363}}
>> MatrixPower[{{1, 2}, {2, 5}}, -3]
= {{169, -70}, {-70, 29}}
#> MatrixPower[{{0, x}, {0, 0}}, n]
= {{0 ^ n, n x 0 ^ (-1 + n)}, {0, 0 ^ n}}
#> MatrixPower[{{1, 0}, {0}}, 2]
: Argument {{1, 0}, {0}} at position 1 is not a non-empty rectangular matrix.
= MatrixPower[{{1, 0}, {0}}, 2]
"""
messages = {
'matrixpowernotimplemented': ('Matrix power not implemented for matrix `1`.'),
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
def apply(self, m, power, evaluation):
'MatrixPower[m_, power_]'
sympy_m = to_sympy_matrix(m)
if sympy_m is None:
return evaluation.message('MatrixPower', 'matrix', m, 1)
sympy_power = power.to_sympy()
if sympy_power is None:
return
try:
res = sympy_m ** sympy_power
except NotImplementedError:
return evaluation.message('MatrixPower', 'matrixpowernotimplemented', m)
return from_sympy(res)
class MatrixExp(Builtin):
"""
<dl>
<dt>'MatrixExp[$m$]'
<dd>computes the exponential of the matrix $m$.
</dl>
>> MatrixExp[{{0, 2}, {0, 1}}]
= {{1, -2 + 2 E}, {0, E}}
>> MatrixExp[{{1.5, 0.5}, {0.5, 2.0}}]
= {{5.16266, 3.02952}, {3.02952, 8.19218}}
#> MatrixExp[{{a, 0}, {0, b}}]
= {{E ^ a, 0}, {0, E ^ b}}
#> MatrixExp[{{1, 0}, {0}}]
: Argument {{1, 0}, {0}} at position 1 is not a non-empty rectangular matrix.
= MatrixExp[{{1, 0}, {0}}]
"""
messages = {
'matrixexpnotimplemented': ('Matrix power not implemented for matrix `1`.'),
'matrix': "Argument `1` at position `2` is not a non-empty rectangular matrix.",
}
# TODO fix precision
def apply(self, m, evaluation):
'MatrixExp[m_]'
sympy_m = to_sympy_matrix(m)
if sympy_m is None:
return evaluation.message('MatrixExp', 'matrix', m, 1)
try:
res = sympy_m.exp()
except NotImplementedError:
return evaluation.message('MatrixExp', 'matrixexpnotimplemented', m)
return from_sympy(res)
class Norm(Builtin):
"""
<dl>
<dt>'Norm[$m$, $l$]'</dt>
<dd>computes the l-norm of matrix m (currently only works for vectors!).</dd>
<dt>'Norm[$m$]'</dt>
<dd>computes the 2-norm of matrix m (currently only works for vectors!).</dd>
</dl>
>> Norm[{1, 2, 3, 4}, 2]
= Sqrt[30]
>> Norm[{10, 100, 200}, 1]
= 310
>> Norm[{a, b, c}]
= Sqrt[Abs[a] ^ 2 + Abs[b] ^ 2 + Abs[c] ^ 2]
>> Norm[{-100, 2, 3, 4}, Infinity]
= 100
>> Norm[1 + I]
= Sqrt[2]
#> Norm[{1, {2, 3}}]
: The first Norm argument should be a number, vector, or matrix.
= Norm[{1, {2, 3}}]
#> Norm[{x, y}]
= Sqrt[Abs[x] ^ 2 + Abs[y] ^ 2]
#> Norm[{x, y}, p]
= (Abs[x] ^ p + Abs[y] ^ p) ^ (1 / p)
#> Norm[{x, y}, 0]
: The second argument of Norm, 0, should be a symbol, Infinity, or an integer or real number not less than 1 for vector p-norms; or 1, 2, Infinity, or "Frobenius" for matrix norms.
= Norm[{x, y}, 0]
#> Norm[{x, y}, 0.5]
: The second argument of Norm, 0.5, should be a symbol, Infinity, or an integer or real number not less than 1 for vector p-norms; or 1, 2, Infinity, or "Frobenius" for matrix norms.
= Norm[{x, y}, 0.5]
#> Norm[{}]
= Norm[{}]
#> Norm[0]
= 0
"""
rules = {
'Norm[m_?NumberQ]': 'Abs[m]',
'Norm[m_?VectorQ, DirectedInfinity[1]]': 'Max[Abs[m]]',
}
messages = {
'nvm': 'The first Norm argument should be a number, vector, or matrix.',
'ptype': (
'The second argument of Norm, `1`, should be a symbol, Infinity, '
'or an integer or real number not less than 1 for vector p-norms; '
'or 1, 2, Infinity, or "Frobenius" for matrix norms.'),
'normnotimplemented': 'Norm is not yet implemented for matrices.',
}
def apply_single(self, m, evaluation):
'Norm[m_]'
return self.apply(m, Integer(2), evaluation)
def apply(self, m, l, evaluation):
'Norm[m_, l_]'
if isinstance(l, Symbol):
pass
elif isinstance(l, (Real, Integer)) and l.to_python() >= 1:
pass
else:
return evaluation.message('Norm', 'ptype', l)
l = l.to_sympy()
if l is None:
return
matrix = to_sympy_matrix(m)
if matrix is None:
return evaluation.message('Norm', 'nvm')
if len(matrix) == 0:
return
try:
res = matrix.norm(l)
except NotImplementedError:
return evaluation.message('Norm', 'normnotimplemented')
return from_sympy(res)
class Normalize(Builtin):
"""
<dl>
<dt>'Normalize[$v$]'
<dd>calculates the normalized vector $v$.
<dt>'Normalize[$z$]'
<dd>calculates the normalized complex number $z$.
</dl>
>> Normalize[{1, 1, 1, 1}]
= {1 / 2, 1 / 2, 1 / 2, 1 / 2}
>> Normalize[1 + I]
= (1 / 2 + I / 2) Sqrt[2]
#> Normalize[0]
= 0
#> Normalize[{0}]
= {0}
#> Normalize[{}]
= {}
"""
rules = {
'Normalize[v_]': 'Module[{norm = Norm[v]}, If[norm == 0, v, v / norm, v]]',
}
class Eigenvectors(Builtin):
"""
<dl>
<dt>'Eigenvectors[$m$]'
<dd>computes the eigenvectors of the matrix $m$.
</dl>
>> Eigenvectors[{{1, 1, 0}, {1, 0, 1}, {0, 1, 1}}]
= {{1, 1, 1}, {1, -2, 1}, {-1, 0, 1}}
>> Eigenvectors[{{1, 0, 0}, {0, 1, 0}, {0, 0, 0}}]
= {{0, 1, 0}, {1, 0, 0}, {0, 0, 1}}
>> Eigenvectors[{{2, 0, 0}, {0, -1, 0}, {0, 0, 0}}]
= {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}
>> Eigenvectors[{{0.1, 0.2}, {0.8, 0.5}}]
= {{0.309017, 1.}, {-0.809017, 1.}}
#> Eigenvectors[{{-2, 1, -1}, {-3, 2, 1}, {-1, 1, 0}}]
= {{1 / 3, 7 / 3, 1}, {1, 1, 0}, {0, 0, 0}}
"""
messages = {
'eigenvecnotimplemented': (
"Eigenvectors is not yet implemented for the matrix `1`."),
}
# TODO: Normalise the eigenvectors
def apply(self, m, evaluation):
'Eigenvectors[m_]'
matrix = to_sympy_matrix(m)
if matrix is None or matrix.cols != matrix.rows or matrix.cols == 0:
return evaluation.message('Eigenvectors', 'matsq', m)
# sympy raises an error for some matrices that Mathematica can compute.
try:
eigenvects = matrix.eigenvects()
except NotImplementedError:
return evaluation.message(
'Eigenvectors', 'eigenvecnotimplemented', m)
# The eigenvectors are given in the same order as the eigenvalues.
eigenvects = sorted(eigenvects, key=lambda val_c_vect: (abs(val_c_vect[0]), -val_c_vect[0]), reverse=True)
result = []
for val, count, basis in eigenvects:
# Select the i'th basis vector, convert matrix to vector,
# and convert from sympy
vects = [from_sympy(list(b)) for b in basis]
# This follows Mathematica convention better; higher indexed pivots
# are outputted first. e.g. {{0,1},{1,0}} instead of {{1,0},{0,1}}
vects.reverse()
# Add the vectors to results
result.extend(vects)
result.extend([Expression('List', *(
[0] * matrix.rows))] * (matrix.rows - len(result)))
return Expression('List', *result)
def _norm_calc(head, u, v, evaluation):
expr = Expression(head, u, v)
old_quiet_all = evaluation.quiet_all
try:
evaluation.quiet_all = True
expr_eval = expr.evaluate(evaluation)
finally:
evaluation.quiet_all = old_quiet_all
if expr_eval.same(expr):
evaluation.message('Norm', 'nvm')
return None
else:
return expr_eval
class EuclideanDistance(Builtin):
"""
<dl>
<dt>'EuclideanDistance[$u$, $v$]'
<dd>returns the euclidean distance between $u$ and $v$.
</dl>
>> EuclideanDistance[-7, 5]
= 12
>> EuclideanDistance[{-1, -1}, {1, 1}]
= 2 Sqrt[2]
>> EuclideanDistance[{a, b}, {c, d}]
= Sqrt[Abs[a - c] ^ 2 + Abs[b - d] ^ 2]
"""
def apply(self, u, v, evaluation):
'EuclideanDistance[u_, v_]'
t = _norm_calc('Subtract', u, v, evaluation)
if t is not None:
return Expression('Norm', t)
class SquaredEuclideanDistance(Builtin):
"""
<dl>
<dt>'SquaredEuclideanDistance[$u$, $v$]'
<dd>returns squared the euclidean distance between $u$ and $v$.
</dl>
>> SquaredEuclideanDistance[-7, 5]
= 144
>> SquaredEuclideanDistance[{-1, -1}, {1, 1}]
= 8
"""
def apply(self, u, v, evaluation):
'SquaredEuclideanDistance[u_, v_]'
t = _norm_calc('Subtract', u, v, evaluation)
if t is not None:
return Expression('Power', Expression('Norm', t), 2)
class ManhattanDistance(Builtin):
"""
<dl>
<dt>'ManhattanDistance[$u$, $v$]'
<dd>returns the Manhattan distance between $u$ and $v$, which is the number of horizontal or vertical
moves in the gridlike Manhattan city layout to get from $u$ to $v$.
</dl>
>> ManhattanDistance[-7, 5]
= 12
>> ManhattanDistance[{-1, -1}, {1, 1}]
= 4
"""
def apply(self, u, v, evaluation):
'ManhattanDistance[u_, v_]'
t = _norm_calc('Subtract', u, v, evaluation)
if t is not None:
return Expression('Total', Expression('Abs', t))
class ChessboardDistance(Builtin):
"""
<dl>
<dt>'ChessboardDistance[$u$, $v$]'
<dd>returns the chessboard distance (also known as Chebyshev distance) between $u$ and $v$, which is
the number of moves a king on a chessboard needs to get from square $u$ to square $v$.
</dl>
>> ChessboardDistance[-7, 5]
= 12
>> ChessboardDistance[{-1, -1}, {1, 1}]
= 2
"""
def apply(self, u, v, evaluation):
'ChessboardDistance[u_, v_]'
t = _norm_calc('Subtract', u, v, evaluation)
if t is not None:
return Expression('Max', Expression('Abs', t))
class CanberraDistance(Builtin):
"""
<dl>
<dt>'CanberraDistance[$u$, $v$]'
<dd>returns the canberra distance between $u$ and $v$, which is a weighted version of the Manhattan
distance.
</dl>
>> CanberraDistance[-7, 5]
= 1
>> CanberraDistance[{-1, -1}, {1, 1}]
= 2
"""
def apply(self, u, v, evaluation):
'CanberraDistance[u_, v_]'
t = _norm_calc('Subtract', u, v, evaluation)
if t is not None:
return Expression('Total',
Expression('Divide',
Expression('Abs', t),
Expression('Plus', Expression('Abs', u), Expression('Abs', v))))
class BrayCurtisDistance(Builtin):
"""
<dl>
<dt>'BrayCurtisDistance[$u$, $v$]'
<dd>returns the Bray Curtis distance between $u$ and $v$.
</dl>
>> BrayCurtisDistance[-7, 5]
= 6
>> BrayCurtisDistance[{-1, -1}, {10, 10}]
= 11 / 9
"""
def apply(self, u, v, evaluation):
'BrayCurtisDistance[u_, v_]'
t = _norm_calc('Subtract', u, v, evaluation)
if t is not None:
return Expression('Divide',
Expression('Total', Expression('Abs', t)),
Expression('Total', Expression('Abs', Expression('Plus', u, v))))
class CosineDistance(Builtin):
"""
<dl>
<dt>'CosineDistance[$u$, $v$]'
<dd>returns the cosine distance between $u$ and $v$.
</dl>
>> N[CosineDistance[{7, 9}, {71, 89}]]
= 0.0000759646
>> CosineDistance[{a, b}, {c, d}]
= 1 + (-a c - b d) / (Sqrt[Abs[a] ^ 2 + Abs[b] ^ 2] Sqrt[Abs[c] ^ 2 + Abs[d] ^ 2])
"""
def apply(self, u, v, evaluation):
'CosineDistance[u_, v_]'
dot = _norm_calc('Dot', u, v, evaluation)
if dot is not None:
return Expression('Subtract', 1,
Expression('Divide', dot,
Expression('Times',
Expression('Norm', u),
Expression('Norm', v))))
| 29.982891 | 187 | 0.520428 |
918a05990f8dc9db1d2e935f7ff793252c3efb0f | 40,425 | py | Python | Cython/Compiler/FusedNode.py | shreyanavigyan/cython | 217a93e7b139ab49815ea9ca34142b3e2a116a08 | [
"Apache-2.0"
] | 6 | 2021-05-25T13:15:13.000Z | 2022-01-05T06:15:26.000Z | Cython/Compiler/FusedNode.py | shreyanavigyan/cython | 217a93e7b139ab49815ea9ca34142b3e2a116a08 | [
"Apache-2.0"
] | 238 | 2020-02-12T21:01:15.000Z | 2020-10-01T11:46:25.000Z | Cython/Compiler/FusedNode.py | shreyanavigyan/cython | 217a93e7b139ab49815ea9ca34142b3e2a116a08 | [
"Apache-2.0"
] | 1 | 2021-09-07T12:03:18.000Z | 2021-09-07T12:03:18.000Z | from __future__ import absolute_import
import copy
from . import (ExprNodes, PyrexTypes, MemoryView,
ParseTreeTransforms, StringEncoding, Errors)
from .ExprNodes import CloneNode, ProxyNode, TupleNode
from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode
from ..Utils import OrderedSet
from .Errors import error, CannotSpecialize
class FusedCFuncDefNode(StatListNode):
"""
This node replaces a function with fused arguments. It deep-copies the
function for every permutation of fused types, and allocates a new local
scope for it. It keeps track of the original function in self.node, and
the entry of the original function in the symbol table is given the
'fused_cfunction' attribute which points back to us.
Then when a function lookup occurs (to e.g. call it), the call can be
dispatched to the right function.
node FuncDefNode the original function
nodes [FuncDefNode] list of copies of node with different specific types
py_func DefNode the fused python function subscriptable from
Python space
__signatures__ A DictNode mapping signature specialization strings
to PyCFunction nodes
resulting_fused_function PyCFunction for the fused DefNode that delegates
to specializations
fused_func_assignment Assignment of the fused function to the function name
defaults_tuple TupleNode of defaults (letting PyCFunctionNode build
defaults would result in many different tuples)
specialized_pycfuncs List of synthesized pycfunction nodes for the
specializations
code_object CodeObjectNode shared by all specializations and the
fused function
fused_compound_types All fused (compound) types (e.g. floating[:])
"""
__signatures__ = None
resulting_fused_function = None
fused_func_assignment = None
defaults_tuple = None
decorators = None
child_attrs = StatListNode.child_attrs + [
'__signatures__', 'resulting_fused_function', 'fused_func_assignment']
def __init__(self, node, env):
super(FusedCFuncDefNode, self).__init__(node.pos)
self.nodes = []
self.node = node
is_def = isinstance(self.node, DefNode)
if is_def:
# self.node.decorators = []
self.copy_def(env)
else:
self.copy_cdef(env)
# Perform some sanity checks. If anything fails, it's a bug
for n in self.nodes:
assert not n.entry.type.is_fused
assert not n.local_scope.return_type.is_fused
if node.return_type.is_fused:
assert not n.return_type.is_fused
if not is_def and n.cfunc_declarator.optional_arg_count:
assert n.type.op_arg_struct
node.entry.fused_cfunction = self
# Copy the nodes as AnalyseDeclarationsTransform will prepend
# self.py_func to self.stats, as we only want specialized
# CFuncDefNodes in self.nodes
self.stats = self.nodes[:]
def copy_def(self, env):
"""
Create a copy of the original def or lambda function for specialized
versions.
"""
fused_compound_types = PyrexTypes.unique(
[arg.type for arg in self.node.args if arg.type.is_fused])
fused_types = self._get_fused_base_types(fused_compound_types)
permutations = PyrexTypes.get_all_specialized_permutations(fused_types)
self.fused_compound_types = fused_compound_types
if self.node.entry in env.pyfunc_entries:
env.pyfunc_entries.remove(self.node.entry)
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# keep signature object identity for special casing in DefNode.analyse_declarations()
copied_node.entry.signature = self.node.entry.signature
self._specialize_function_args(copied_node.args, fused_to_specific)
copied_node.return_type = self.node.return_type.specialize(
fused_to_specific)
copied_node.analyse_declarations(env)
# copied_node.is_staticmethod = self.node.is_staticmethod
# copied_node.is_classmethod = self.node.is_classmethod
self.create_new_local_scope(copied_node, env, fused_to_specific)
self.specialize_copied_def(copied_node, cname, self.node.entry,
fused_to_specific, fused_compound_types)
PyrexTypes.specialize_entry(copied_node.entry, cname)
copied_node.entry.used = True
env.entries[copied_node.entry.name] = copied_node.entry
if not self.replace_fused_typechecks(copied_node):
break
self.orig_py_func = self.node
self.py_func = self.make_fused_cpdef(self.node, env, is_def=True)
def copy_cdef(self, env):
"""
Create a copy of the original c(p)def function for all specialized
versions.
"""
permutations = self.node.type.get_all_specialized_permutations()
# print 'Node %s has %d specializations:' % (self.node.entry.name,
# len(permutations))
# import pprint; pprint.pprint([d for cname, d in permutations])
# Prevent copying of the python function
self.orig_py_func = orig_py_func = self.node.py_func
self.node.py_func = None
if orig_py_func:
env.pyfunc_entries.remove(orig_py_func.entry)
fused_types = self.node.type.get_fused_types()
self.fused_compound_types = fused_types
new_cfunc_entries = []
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# Make the types in our CFuncType specific.
try:
type = copied_node.type.specialize(fused_to_specific)
except CannotSpecialize:
# unlike for the argument types, specializing the return type can fail
error(copied_node.pos, "Return type is a fused type that cannot "
"be determined from the function arguments")
self.py_func = None # this is just to let the compiler exit gracefully
return
entry = copied_node.entry
type.specialize_entry(entry, cname)
# Reuse existing Entries (e.g. from .pxd files).
for i, orig_entry in enumerate(env.cfunc_entries):
if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
copied_node.entry = env.cfunc_entries[i]
if not copied_node.entry.func_cname:
copied_node.entry.func_cname = entry.func_cname
entry = copied_node.entry
type = entry.type
break
else:
new_cfunc_entries.append(entry)
copied_node.type = type
entry.type, type.entry = type, entry
entry.used = (entry.used or
self.node.entry.defined_in_pxd or
env.is_c_class_scope or
entry.is_cmethod)
if self.node.cfunc_declarator.optional_arg_count:
self.node.cfunc_declarator.declare_optional_arg_struct(
type, env, fused_cname=cname)
copied_node.return_type = type.return_type
self.create_new_local_scope(copied_node, env, fused_to_specific)
# Make the argument types in the CFuncDeclarator specific
self._specialize_function_args(copied_node.cfunc_declarator.args,
fused_to_specific)
# If a cpdef, declare all specialized cpdefs (this
# also calls analyse_declarations)
copied_node.declare_cpdef_wrapper(env)
if copied_node.py_func:
env.pyfunc_entries.remove(copied_node.py_func.entry)
self.specialize_copied_def(
copied_node.py_func, cname, self.node.entry.as_variable,
fused_to_specific, fused_types)
if not self.replace_fused_typechecks(copied_node):
break
# replace old entry with new entries
try:
cindex = env.cfunc_entries.index(self.node.entry)
except ValueError:
env.cfunc_entries.extend(new_cfunc_entries)
else:
env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
if orig_py_func:
self.py_func = self.make_fused_cpdef(orig_py_func, env,
is_def=False)
else:
self.py_func = orig_py_func
def _get_fused_base_types(self, fused_compound_types):
"""
Get a list of unique basic fused types, from a list of
(possibly) compound fused types.
"""
base_types = []
seen = set()
for fused_type in fused_compound_types:
fused_type.get_fused_types(result=base_types, seen=seen)
return base_types
def _specialize_function_args(self, args, fused_to_specific):
for arg in args:
if arg.type.is_fused:
arg.type = arg.type.specialize(fused_to_specific)
if arg.type.is_memoryviewslice:
arg.type.validate_memslice_dtype(arg.pos)
if arg.annotation:
# TODO might be nice if annotations were specialized instead?
# (Or might be hard to do reliably)
arg.annotation.untyped = True
def create_new_local_scope(self, node, env, f2s):
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
"""
node.create_local_scope(env)
node.local_scope.fused_to_specific = f2s
# This is copied from the original function, set it to false to
# stop recursion
node.has_fused_arguments = False
self.nodes.append(node)
def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types):
"""Specialize the copy of a DefNode given the copied node,
the specialization cname and the original DefNode entry"""
fused_types = self._get_fused_base_types(fused_compound_types)
type_strings = [
PyrexTypes.specialization_signature_string(fused_type, f2s)
for fused_type in fused_types
]
node.specialized_signature_string = '|'.join(type_strings)
node.entry.pymethdef_cname = PyrexTypes.get_fused_cname(
cname, node.entry.pymethdef_cname)
node.entry.doc = py_entry.doc
node.entry.doc_cname = py_entry.doc_cname
def replace_fused_typechecks(self, copied_node):
"""
Branch-prune fused type checks like
if fused_t is int:
...
Returns whether an error was issued and whether we should stop in
in order to prevent a flood of errors.
"""
num_errors = Errors.num_errors
transform = ParseTreeTransforms.ReplaceFusedTypeChecks(
copied_node.local_scope)
transform(copied_node)
if Errors.num_errors > num_errors:
return False
return True
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
Generate Cython code for instance checks, matching an object to
specialized types.
"""
for specialized_type in normal_types:
# all_numeric = all_numeric and specialized_type.is_numeric
pyx_code.context.update(
py_type_name=specialized_type.py_type_name(),
specialized_type_name=specialized_type.specialization_string,
)
pyx_code.put_chunk(
u"""
if isinstance(arg, {{py_type_name}}):
dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break
""")
def _dtype_name(self, dtype):
if dtype.is_typedef:
return '___pyx_%s' % dtype
return str(dtype).replace(' ', '_')
def _dtype_type(self, dtype):
if dtype.is_typedef:
return self._dtype_name(dtype)
return str(dtype)
def _sizeof_dtype(self, dtype):
if dtype.is_pyobject:
return 'sizeof(void *)'
else:
return "sizeof(%s)" % self._dtype_type(dtype)
def _buffer_check_numpy_dtype_setup_cases(self, pyx_code):
"Setup some common cases to match dtypes against specializations"
if pyx_code.indenter("if kind in b'iu':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_int")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'f':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_float")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'c':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_complex")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'O':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_object")
pyx_code.dedent()
match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
no_match = "dest_sig[{{dest_sig_idx}}] = None"
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
"""
Match a numpy dtype object to the individual specializations.
"""
self._buffer_check_numpy_dtype_setup_cases(pyx_code)
for specialized_type in pythran_types+specialized_buffer_types:
final_type = specialized_type
if specialized_type.is_pythran_expr:
specialized_type = specialized_type.org_buffer
dtype = specialized_type.dtype
pyx_code.context.update(
itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
dtype=dtype,
specialized_type_name=final_type.specialization_string)
dtypes = [
(dtype.is_int, pyx_code.dtype_int),
(dtype.is_float, pyx_code.dtype_float),
(dtype.is_complex, pyx_code.dtype_complex)
]
for dtype_category, codewriter in dtypes:
if dtype_category:
cond = '{{itemsize_match}} and (<Py_ssize_t>arg.ndim) == %d' % (
specialized_type.ndim,)
if dtype.is_int:
cond += ' and {{signed_match}}'
if final_type.is_pythran_expr:
cond += ' and arg_is_pythran_compatible'
if codewriter.indenter("if %s:" % cond):
#codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(self.match)
codewriter.putln("break")
codewriter.dedent()
def _buffer_parse_format_string_check(self, pyx_code, decl_code,
specialized_type, env):
"""
For each specialized type, try to coerce the object to a memoryview
slice of that type. This means obtaining a buffer and parsing the
format string.
TODO: separate buffer acquisition from format parsing
"""
dtype = specialized_type.dtype
if specialized_type.is_buffer:
axes = [('direct', 'strided')] * specialized_type.ndim
else:
axes = specialized_type.axes
memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes)
memslice_type.create_from_py_utility_code(env)
pyx_code.context.update(
coerce_from_py_func=memslice_type.from_py_function,
dtype=dtype)
decl_code.putln(
"{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
pyx_code.context.update(
specialized_type_name=specialized_type.specialization_string,
sizeof_dtype=self._sizeof_dtype(dtype))
pyx_code.put_chunk(
u"""
# try {{dtype}}
if itemsize == -1 or itemsize == {{sizeof_dtype}}:
memslice = {{coerce_from_py_func}}(arg, 0)
if memslice.memview:
__PYX_XCLEAR_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
%s
break
else:
__pyx_PyErr_Clear()
""" % self.match)
def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
"""
Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual
specializations. If that fails, try naively to coerce the object
to each specialization, which obtains the buffer each time and tries
to match the format string.
"""
# The first thing to find a match in this loop breaks out of the loop
pyx_code.put_chunk(
u"""
""" + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
""" + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
dtype = arg_base.dtype
else:
dtype = None
else:
dtype = None
itemsize = -1
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
dtype_signed = kind == 'i'
""")
pyx_code.indent(2)
if pythran_types:
pyx_code.put_chunk(
u"""
# Pythran only supports the endianness of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
elif byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
if arg_is_pythran_compatible:
cur_stride = itemsize
shape = arg.shape
strides = arg.strides
for i in range(arg.ndim-1, -1, -1):
if (<Py_ssize_t>strides[i]) != cur_stride:
arg_is_pythran_compatible = False
break
cur_stride *= <Py_ssize_t> shape[i]
else:
arg_is_pythran_compatible = not (arg.flags.f_contiguous and (<Py_ssize_t>arg.ndim) > 1)
""")
pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
for specialized_type in buffer_types:
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
"""
If we have any buffer specializations, write out some variable
declarations and imports.
"""
decl_code.put_chunk(
u"""
ctypedef struct {{memviewslice_cname}}:
void *memview
void __PYX_XCLEAR_MEMVIEW({{memviewslice_cname}} *, int have_gil)
bint __pyx_memoryview_check(object)
""")
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef {{memviewslice_cname}} memslice
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef char kind
itemsize = -1
""")
if pythran_types:
pyx_code.local_variable_declarations.put_chunk(u"""
cdef bint arg_is_pythran_compatible
cdef Py_ssize_t cur_stride
""")
pyx_code.imports.put_chunk(
u"""
cdef type ndarray
ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
""")
seen_typedefs = set()
seen_int_dtypes = set()
for buffer_type in all_buffer_types:
dtype = buffer_type.dtype
dtype_name = self._dtype_name(dtype)
if dtype.is_typedef:
if dtype_name not in seen_typedefs:
seen_typedefs.add(dtype_name)
decl_code.putln(
'ctypedef %s %s "%s"' % (dtype.resolve(), dtype_name,
dtype.empty_declaration_code()))
if buffer_type.dtype.is_int:
if str(dtype) not in seen_int_dtypes:
seen_int_dtypes.add(str(dtype))
pyx_code.context.update(dtype_name=dtype_name,
dtype_type=self._dtype_type(dtype))
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef bint {{dtype_name}}_is_signed
{{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
""")
def _split_fused_types(self, arg):
"""
Specialize fused types and split into normal types and buffer types.
"""
specialized_types = PyrexTypes.get_specialized_types(arg.type)
# Prefer long over int, etc by sorting (see type classes in PyrexTypes.py)
specialized_types.sort()
seen_py_type_names = set()
normal_types, buffer_types, pythran_types = [], [], []
has_object_fallback = False
for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name()
if py_type_name:
if py_type_name in seen_py_type_names:
continue
seen_py_type_names.add(py_type_name)
if py_type_name == 'object':
has_object_fallback = True
else:
normal_types.append(specialized_type)
elif specialized_type.is_pythran_expr:
pythran_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type)
return normal_types, buffer_types, pythran_types, has_object_fallback
def _unpack_argument(self, pyx_code):
pyx_code.put_chunk(
u"""
# PROCESSING ARGUMENT {{arg_tuple_idx}}
if {{arg_tuple_idx}} < len(<tuple>args):
arg = (<tuple>args)[{{arg_tuple_idx}}]
elif kwargs is not None and '{{arg.name}}' in <dict>kwargs:
arg = (<dict>kwargs)['{{arg.name}}']
else:
{{if arg.default}}
arg = (<tuple>defaults)[{{default_idx}}]
{{else}}
{{if arg_tuple_idx < min_positional_args}}
raise TypeError("Expected at least %d argument%s, got %d" % (
{{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(<tuple>args)))
{{else}}
raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}")
{{endif}}
{{endif}}
""")
def _fused_signature_index(self, pyx_code):
"""
Generate Cython code for constructing a persistent nested dictionary index of
fused type specialization signatures.
"""
pyx_code.put_chunk(
u"""
if not _fused_sigindex:
for sig in <dict>signatures:
sigindex_node = _fused_sigindex
*sig_series, last_type = sig.strip('()').split('|')
for sig_type in sig_series:
if sig_type not in sigindex_node:
sigindex_node[sig_type] = sigindex_node = {}
else:
sigindex_node = sigindex_node[sig_type]
sigindex_node[last_type] = sig
"""
)
def make_fused_cpdef(self, orig_py_func, env, is_def):
"""
This creates the function that is indexable from Python and does
runtime dispatch based on the argument types. The function gets the
arg tuple and kwargs dict (or None) and the defaults tuple
as arguments from the Binding Fused Function's tp_call.
"""
from . import TreeFragment, Code, UtilityCode
fused_types = self._get_fused_base_types([
arg.type for arg in self.node.args if arg.type.is_fused])
context = {
'memviewslice_cname': MemoryView.memviewslice_cname,
'func_args': self.node.args,
'n_fused': len(fused_types),
'min_positional_args':
self.node.num_required_args - self.node.num_required_kw_args
if is_def else
sum(1 for arg in self.node.args if arg.default is None),
'name': orig_py_func.entry.name,
}
pyx_code = Code.PyxCodeWriter(context=context)
decl_code = Code.PyxCodeWriter(context=context)
decl_code.put_chunk(
u"""
cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" ()
type __Pyx_ImportNumPyArrayTypeIfAvailable()
int __Pyx_Is_Little_Endian()
""")
decl_code.indent()
pyx_code.put_chunk(
u"""
def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}):
# FIXME: use a typed signature - currently fails badly because
# default arguments inherit the types we specify here!
cdef list search_list
cdef dict sn, sigindex_node
dest_sig = [None] * {{n_fused}}
if kwargs is not None and not kwargs:
kwargs = None
cdef Py_ssize_t i
# instance check body
""")
pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports")
pyx_code.named_insertion_point("func_defs")
pyx_code.named_insertion_point("local_variable_declarations")
fused_index = 0
default_idx = 0
all_buffer_types = OrderedSet()
seen_fused_types = set()
for i, arg in enumerate(self.node.args):
if arg.type.is_fused:
arg_fused_types = arg.type.get_fused_types()
if len(arg_fused_types) > 1:
raise NotImplementedError("Determination of more than one fused base "
"type per argument is not implemented.")
fused_type = arg_fused_types[0]
if arg.type.is_fused and fused_type not in seen_fused_types:
seen_fused_types.add(fused_type)
context.update(
arg_tuple_idx=i,
arg=arg,
dest_sig_idx=fused_index,
default_idx=default_idx,
)
normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
self._unpack_argument(pyx_code)
# 'unrolled' loop, first match breaks out of it
if pyx_code.indenter("while 1:"):
if normal_types:
self._fused_instance_checks(normal_types, pyx_code, env)
if buffer_types or pythran_types:
env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
if has_object_fallback:
pyx_code.context.update(specialized_type_name='object')
pyx_code.putln(self.match)
else:
pyx_code.putln(self.no_match)
pyx_code.putln("break")
pyx_code.dedent()
fused_index += 1
all_buffer_types.update(buffer_types)
all_buffer_types.update(ty.org_buffer for ty in pythran_types)
if arg.default:
default_idx += 1
if all_buffer_types:
self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
self._fused_signature_index(pyx_code)
pyx_code.put_chunk(
u"""
sigindex_matches = []
sigindex_candidates = [_fused_sigindex]
for dst_type in dest_sig:
found_matches = []
found_candidates = []
# Make two seperate lists: One for signature sub-trees
# with at least one definite match, and another for
# signature sub-trees with only ambiguous matches
# (where `dest_sig[i] is None`).
if dst_type is None:
for sn in sigindex_matches:
found_matches.extend(sn.values())
for sn in sigindex_candidates:
found_candidates.extend(sn.values())
else:
for search_list in (sigindex_matches, sigindex_candidates):
for sn in search_list:
if dst_type in sn:
found_matches.append(sn[dst_type])
sigindex_matches = found_matches
sigindex_candidates = found_candidates
if not (found_matches or found_candidates):
break
candidates = sigindex_matches
if not candidates:
raise TypeError("No matching signature found")
elif len(candidates) > 1:
raise TypeError("Function call with ambiguous argument types")
else:
return (<dict>signatures)[candidates[0]]
""")
fragment_code = pyx_code.getvalue()
# print decl_code.getvalue()
# print fragment_code
from .Optimize import ConstantFolding
fragment = TreeFragment.TreeFragment(
fragment_code, level='module', pipeline=[ConstantFolding()])
ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root)
UtilityCode.declare_declarations_in_scope(
decl_code.getvalue(), env.global_scope())
ast.scope = env
# FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self'
ast.analyse_declarations(env)
py_func = ast.stats[-1] # the DefNode
self.fragment_scope = ast.scope
if isinstance(self.node, DefNode):
py_func.specialized_cpdefs = self.nodes[:]
else:
py_func.specialized_cpdefs = [n.py_func for n in self.nodes]
return py_func
def update_fused_defnode_entry(self, env):
copy_attributes = (
'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname',
'pymethdef_cname', 'doc', 'doc_cname', 'is_member',
'scope'
)
entry = self.py_func.entry
for attr in copy_attributes:
setattr(entry, attr,
getattr(self.orig_py_func.entry, attr))
self.py_func.name = self.orig_py_func.name
self.py_func.doc = self.orig_py_func.doc
env.entries.pop('__pyx_fused_cpdef', None)
if isinstance(self.node, DefNode):
env.entries[entry.name] = entry
else:
env.entries[entry.name].as_variable = entry
env.pyfunc_entries.append(entry)
self.py_func.entry.fused_cfunction = self
for node in self.nodes:
if isinstance(self.node, DefNode):
node.fused_py_func = self.py_func
else:
node.py_func.fused_py_func = self.py_func
node.entry.as_variable = entry
self.synthesize_defnodes()
self.stats.append(self.__signatures__)
def analyse_expressions(self, env):
"""
Analyse the expressions. Take care to only evaluate default arguments
once and clone the result for all specializations
"""
for fused_compound_type in self.fused_compound_types:
for fused_type in fused_compound_type.get_fused_types():
for specialization_type in fused_type.types:
if specialization_type.is_complex:
specialization_type.create_declaration_utility_code(env)
if self.py_func:
self.__signatures__ = self.__signatures__.analyse_expressions(env)
self.py_func = self.py_func.analyse_expressions(env)
self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env)
self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env)
self.defaults = defaults = []
for arg in self.node.args:
if arg.default:
arg.default = arg.default.analyse_expressions(env)
defaults.append(ProxyNode(arg.default))
else:
defaults.append(None)
for i, stat in enumerate(self.stats):
stat = self.stats[i] = stat.analyse_expressions(env)
if isinstance(stat, FuncDefNode) and stat is not self.py_func:
# the dispatcher specifically doesn't want its defaults overriding
for arg, default in zip(stat.args, defaults):
if default is not None:
arg.default = CloneNode(default).coerce_to(arg.type, env)
if self.py_func:
args = [CloneNode(default) for default in defaults if default]
self.defaults_tuple = TupleNode(self.pos, args=args)
self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
self.defaults_tuple = ProxyNode(self.defaults_tuple)
self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object)
fused_func = self.resulting_fused_function.arg
fused_func.defaults_tuple = CloneNode(self.defaults_tuple)
fused_func.code_object = CloneNode(self.code_object)
for i, pycfunc in enumerate(self.specialized_pycfuncs):
pycfunc.code_object = CloneNode(self.code_object)
pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env)
pycfunc.defaults_tuple = CloneNode(self.defaults_tuple)
return self
def synthesize_defnodes(self):
"""
Create the __signatures__ dict of PyCFunctionNode specializations.
"""
if isinstance(self.nodes[0], CFuncDefNode):
nodes = [node.py_func for node in self.nodes]
else:
nodes = self.nodes
# For the moment, fused functions do not support METH_FASTCALL
for node in nodes:
node.entry.signature.use_fastcall = False
signatures = [StringEncoding.EncodedString(node.specialized_signature_string)
for node in nodes]
keys = [ExprNodes.StringNode(node.pos, value=sig)
for node, sig in zip(nodes, signatures)]
values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True)
for node in nodes]
self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values))
self.specialized_pycfuncs = values
for pycfuncnode in values:
pycfuncnode.is_specialization = True
def generate_function_definitions(self, env, code):
if self.py_func:
self.py_func.pymethdef_required = True
self.fused_func_assignment.generate_function_definitions(env, code)
for stat in self.stats:
if isinstance(stat, FuncDefNode) and stat.entry.used:
code.mark_pos(stat.pos)
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
# Note: all def function specialization are wrapped in PyCFunction
# nodes in the self.__signatures__ dictnode.
for default in self.defaults:
if default is not None:
default.generate_evaluation_code(code)
if self.py_func:
self.defaults_tuple.generate_evaluation_code(code)
self.code_object.generate_evaluation_code(code)
for stat in self.stats:
code.mark_pos(stat.pos)
if isinstance(stat, ExprNodes.ExprNode):
stat.generate_evaluation_code(code)
else:
stat.generate_execution_code(code)
if self.__signatures__:
self.resulting_fused_function.generate_evaluation_code(code)
code.putln(
"((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" %
(self.resulting_fused_function.result(),
self.__signatures__.result()))
self.__signatures__.generate_giveref(code)
self.__signatures__.generate_post_assignment_code(code)
self.__signatures__.free_temps(code)
self.fused_func_assignment.generate_execution_code(code)
# Dispose of results
self.resulting_fused_function.generate_disposal_code(code)
self.resulting_fused_function.free_temps(code)
self.defaults_tuple.generate_disposal_code(code)
self.defaults_tuple.free_temps(code)
self.code_object.generate_disposal_code(code)
self.code_object.free_temps(code)
for default in self.defaults:
if default is not None:
default.generate_disposal_code(code)
default.free_temps(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
| 42.285565 | 120 | 0.582066 |
75578bc84815a5fea9a1b7cacdbf74cddb3e1d0c | 671 | py | Python | tests/analysis/init_imports.py | joshlemon/plaso | 9f8e05f21fa23793bfdade6af1d617e9dd092531 | [
"Apache-2.0"
] | 1 | 2020-10-29T18:23:25.000Z | 2020-10-29T18:23:25.000Z | tests/analysis/init_imports.py | joshlemon/plaso | 9f8e05f21fa23793bfdade6af1d617e9dd092531 | [
"Apache-2.0"
] | null | null | null | tests/analysis/init_imports.py | joshlemon/plaso | 9f8e05f21fa23793bfdade6af1d617e9dd092531 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests that all analysis plugins are imported correctly."""
from __future__ import unicode_literals
import unittest
from tests import test_lib
class AnalysisImportTest(test_lib.ImportCheckTestCase):
"""Tests that analysis plugin classes are imported correctly."""
_IGNORABLE_FILES = frozenset([
'logger.py', 'manager.py', 'definitions.py', 'mediator.py',
'interface.py'])
def testAnalysisPluginsImported(self):
"""Tests that all parsers are imported."""
self._AssertFilesImportedInInit(
test_lib.ANALYSIS_PATH, self._IGNORABLE_FILES)
if __name__ == '__main__':
unittest.main()
| 24.851852 | 66 | 0.725782 |
8878c41bae9497ca1658abf482f679c1313ddb34 | 9,823 | py | Python | lynx_code/ecc_fast.py | enkrypter/Lynx-wallet | 166b7e5810f017a6e12bf96e54b0d44767b2a901 | [
"MIT"
] | 2 | 2019-09-19T10:57:19.000Z | 2019-10-29T20:39:26.000Z | lynx_code/ecc_fast.py | enkrypter/Lynx-wallet | 166b7e5810f017a6e12bf96e54b0d44767b2a901 | [
"MIT"
] | 4 | 2019-11-17T17:40:13.000Z | 2020-01-22T12:13:02.000Z | lynx_code/ecc_fast.py | enkrypter/Lynx-wallet | 166b7e5810f017a6e12bf96e54b0d44767b2a901 | [
"MIT"
] | 2 | 2020-03-17T21:42:56.000Z | 2020-05-02T14:10:40.000Z | # taken (with minor modifications) from pycoin
# https://github.com/richardkiss/pycoin/blob/01b1787ed902df23f99a55deb00d8cd076a906fe/pycoin/ecdsa/native/secp256k1.py
import os
import sys
import traceback
import ctypes
from ctypes.util import find_library
from ctypes import (
byref, c_byte, c_int, c_uint, c_char_p, c_size_t, c_void_p, create_string_buffer, CFUNCTYPE, POINTER
)
import ecdsa
from .logging import get_logger
_logger = get_logger(__name__)
SECP256K1_FLAGS_TYPE_MASK = ((1 << 8) - 1)
SECP256K1_FLAGS_TYPE_CONTEXT = (1 << 0)
SECP256K1_FLAGS_TYPE_COMPRESSION = (1 << 1)
# /** The higher bits contain the actual data. Do not use directly. */
SECP256K1_FLAGS_BIT_CONTEXT_VERIFY = (1 << 8)
SECP256K1_FLAGS_BIT_CONTEXT_SIGN = (1 << 9)
SECP256K1_FLAGS_BIT_COMPRESSION = (1 << 8)
# /** Flags to pass to secp256k1_context_create. */
SECP256K1_CONTEXT_VERIFY = (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY)
SECP256K1_CONTEXT_SIGN = (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN)
SECP256K1_CONTEXT_NONE = (SECP256K1_FLAGS_TYPE_CONTEXT)
SECP256K1_EC_COMPRESSED = (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION)
SECP256K1_EC_UNCOMPRESSED = (SECP256K1_FLAGS_TYPE_COMPRESSION)
def load_library():
if sys.platform == 'darwin':
library_path = 'libsecp256k1.0.dylib'
elif sys.platform in ('windows', 'win32'):
library_path = 'libsecp256k1.dll'
else:
library_path = 'libsecp256k1.so.0'
secp256k1 = ctypes.cdll.LoadLibrary(library_path)
if not secp256k1:
_logger.warning('libsecp256k1 library failed to load')
return None
try:
secp256k1.secp256k1_context_create.argtypes = [c_uint]
secp256k1.secp256k1_context_create.restype = c_void_p
secp256k1.secp256k1_context_randomize.argtypes = [c_void_p, c_char_p]
secp256k1.secp256k1_context_randomize.restype = c_int
secp256k1.secp256k1_ec_pubkey_create.argtypes = [c_void_p, c_void_p, c_char_p]
secp256k1.secp256k1_ec_pubkey_create.restype = c_int
secp256k1.secp256k1_ecdsa_sign.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p, c_void_p, c_void_p]
secp256k1.secp256k1_ecdsa_sign.restype = c_int
secp256k1.secp256k1_ecdsa_verify.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p]
secp256k1.secp256k1_ecdsa_verify.restype = c_int
secp256k1.secp256k1_ec_pubkey_parse.argtypes = [c_void_p, c_char_p, c_char_p, c_size_t]
secp256k1.secp256k1_ec_pubkey_parse.restype = c_int
secp256k1.secp256k1_ec_pubkey_serialize.argtypes = [c_void_p, c_char_p, c_void_p, c_char_p, c_uint]
secp256k1.secp256k1_ec_pubkey_serialize.restype = c_int
secp256k1.secp256k1_ecdsa_signature_parse_compact.argtypes = [c_void_p, c_char_p, c_char_p]
secp256k1.secp256k1_ecdsa_signature_parse_compact.restype = c_int
secp256k1.secp256k1_ecdsa_signature_normalize.argtypes = [c_void_p, c_char_p, c_char_p]
secp256k1.secp256k1_ecdsa_signature_normalize.restype = c_int
secp256k1.secp256k1_ecdsa_signature_serialize_compact.argtypes = [c_void_p, c_char_p, c_char_p]
secp256k1.secp256k1_ecdsa_signature_serialize_compact.restype = c_int
secp256k1.secp256k1_ec_pubkey_tweak_mul.argtypes = [c_void_p, c_char_p, c_char_p]
secp256k1.secp256k1_ec_pubkey_tweak_mul.restype = c_int
secp256k1.ctx = secp256k1.secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)
r = secp256k1.secp256k1_context_randomize(secp256k1.ctx, os.urandom(32))
if r:
return secp256k1
else:
_logger.warning('secp256k1_context_randomize failed')
return None
except (OSError, AttributeError):
_logger.warning('libsecp256k1 library was found and loaded but there was an error when using it')
return None
class _patched_functions:
prepared_to_patch = False
monkey_patching_active = False
def _prepare_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1():
if not _libsecp256k1:
return
# save original functions so that we can undo patching (needed for tests)
_patched_functions.orig_sign = staticmethod(ecdsa.ecdsa.Private_key.sign)
_patched_functions.orig_verify = staticmethod(ecdsa.ecdsa.Public_key.verifies)
_patched_functions.orig_mul = staticmethod(ecdsa.ellipticcurve.Point.__mul__)
curve_secp256k1 = ecdsa.ecdsa.curve_secp256k1
curve_order = ecdsa.curves.SECP256k1.order
point_at_infinity = ecdsa.ellipticcurve.INFINITY
def mul(self: ecdsa.ellipticcurve.Point, other: int):
if self.curve() != curve_secp256k1:
# this operation is not on the secp256k1 curve; use original implementation
return _patched_functions.orig_mul(self, other)
other %= curve_order
if self == point_at_infinity or other == 0:
return point_at_infinity
pubkey = create_string_buffer(64)
public_pair_bytes = b'\4' + self.x().to_bytes(32, byteorder="big") + self.y().to_bytes(32, byteorder="big")
r = _libsecp256k1.secp256k1_ec_pubkey_parse(
_libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
r = _libsecp256k1.secp256k1_ec_pubkey_tweak_mul(_libsecp256k1.ctx, pubkey, other.to_bytes(32, byteorder="big"))
if not r:
return point_at_infinity
pubkey_serialized = create_string_buffer(65)
pubkey_size = c_size_t(65)
_libsecp256k1.secp256k1_ec_pubkey_serialize(
_libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED)
x = int.from_bytes(pubkey_serialized[1:33], byteorder="big")
y = int.from_bytes(pubkey_serialized[33:], byteorder="big")
return ecdsa.ellipticcurve.Point(curve_secp256k1, x, y, curve_order)
def sign(self: ecdsa.ecdsa.Private_key, hash: int, random_k: int):
# note: random_k is ignored
if self.public_key.curve != curve_secp256k1:
# this operation is not on the secp256k1 curve; use original implementation
return _patched_functions.orig_sign(self, hash, random_k)
secret_exponent = self.secret_multiplier
nonce_function = None
sig = create_string_buffer(64)
sig_hash_bytes = hash.to_bytes(32, byteorder="big")
_libsecp256k1.secp256k1_ecdsa_sign(
_libsecp256k1.ctx, sig, sig_hash_bytes, secret_exponent.to_bytes(32, byteorder="big"), nonce_function, None)
compact_signature = create_string_buffer(64)
_libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(_libsecp256k1.ctx, compact_signature, sig)
r = int.from_bytes(compact_signature[:32], byteorder="big")
s = int.from_bytes(compact_signature[32:], byteorder="big")
return ecdsa.ecdsa.Signature(r, s)
def verify(self: ecdsa.ecdsa.Public_key, hash: int, signature: ecdsa.ecdsa.Signature):
if self.curve != curve_secp256k1:
# this operation is not on the secp256k1 curve; use original implementation
return _patched_functions.orig_verify(self, hash, signature)
sig = create_string_buffer(64)
input64 = signature.r.to_bytes(32, byteorder="big") + signature.s.to_bytes(32, byteorder="big")
r = _libsecp256k1.secp256k1_ecdsa_signature_parse_compact(_libsecp256k1.ctx, sig, input64)
if not r:
return False
r = _libsecp256k1.secp256k1_ecdsa_signature_normalize(_libsecp256k1.ctx, sig, sig)
public_pair_bytes = b'\4' + self.point.x().to_bytes(32, byteorder="big") + self.point.y().to_bytes(32, byteorder="big")
pubkey = create_string_buffer(64)
r = _libsecp256k1.secp256k1_ec_pubkey_parse(
_libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes))
if not r:
return False
return 1 == _libsecp256k1.secp256k1_ecdsa_verify(_libsecp256k1.ctx, sig, hash.to_bytes(32, byteorder="big"), pubkey)
# save new functions so that we can (re-)do patching
_patched_functions.fast_sign = sign
_patched_functions.fast_verify = verify
_patched_functions.fast_mul = mul
_patched_functions.prepared_to_patch = True
def do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1():
if not _libsecp256k1:
# FIXME logging 'verbosity' is not yet initialised
_logger.info('libsecp256k1 library not available, falling back to python-ecdsa. '
'This means signing operations will be slower.')
return
if not _patched_functions.prepared_to_patch:
raise Exception("can't patch python-ecdsa without preparations")
ecdsa.ecdsa.Private_key.sign = _patched_functions.fast_sign
ecdsa.ecdsa.Public_key.verifies = _patched_functions.fast_verify
ecdsa.ellipticcurve.Point.__mul__ = _patched_functions.fast_mul
# ecdsa.ellipticcurve.Point.__add__ = ... # TODO??
_patched_functions.monkey_patching_active = True
def undo_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1():
if not _libsecp256k1:
return
if not _patched_functions.prepared_to_patch:
raise Exception("can't patch python-ecdsa without preparations")
ecdsa.ecdsa.Private_key.sign = _patched_functions.orig_sign
ecdsa.ecdsa.Public_key.verifies = _patched_functions.orig_verify
ecdsa.ellipticcurve.Point.__mul__ = _patched_functions.orig_mul
_patched_functions.monkey_patching_active = False
def is_using_fast_ecc():
return _patched_functions.monkey_patching_active
try:
_libsecp256k1 = load_library()
except:
_libsecp256k1 = None
_prepare_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1()
| 44.049327 | 127 | 0.738064 |
66f7ee4e8c500f40d47faec7872b5363856e10a2 | 4,626 | py | Python | supvisors/tests/test_ttypes.py | julien6387/supvisors | 4e32bce566dec2cf9e9a213a3698178030eb869b | [
"Apache-2.0"
] | 66 | 2017-01-05T11:28:34.000Z | 2022-03-04T08:42:01.000Z | supvisors/tests/test_ttypes.py | julien6387/supvisors | 4e32bce566dec2cf9e9a213a3698178030eb869b | [
"Apache-2.0"
] | 36 | 2016-12-30T10:46:58.000Z | 2022-01-09T22:56:10.000Z | supvisors/tests/test_ttypes.py | julien6387/supvisors | 4e32bce566dec2cf9e9a213a3698178030eb869b | [
"Apache-2.0"
] | 12 | 2017-03-04T04:53:51.000Z | 2022-01-28T13:03:22.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import pytest
from unittest.mock import Mock
from supvisors.ttypes import *
def test_AddressStates():
""" Test the AddressStates enumeration. """
expected = ['UNKNOWN', 'CHECKING', 'RUNNING', 'SILENT', 'ISOLATING', 'ISOLATED']
assert AddressStates._member_names_ == expected
assert list(AddressStates._value2member_map_.keys()) == list(range(6))
def test_ApplicationStates():
""" Test the ApplicationStates enumeration. """
expected = ['STOPPED', 'STARTING', 'RUNNING', 'STOPPING']
assert expected == ApplicationStates._member_names_
assert list(ApplicationStates._value2member_map_.keys()) == list(range(4))
def test_StartingStrategies():
""" Test the StartingStrategies enumeration. """
expected = ['CONFIG', 'LESS_LOADED', 'MOST_LOADED', 'LOCAL']
assert StartingStrategies._member_names_ == expected
assert list(StartingStrategies._value2member_map_.keys()) == list(range(4))
def test_ConciliationStrategies():
""" Test the ConciliationStrategies enumeration. """
expected = ['SENICIDE', 'INFANTICIDE', 'USER', 'STOP', 'RESTART', 'RUNNING_FAILURE']
assert ConciliationStrategies._member_names_ == expected
assert list(ConciliationStrategies._value2member_map_.keys()) == list(range(6))
def test_StartingFailureStrategies():
""" Test the StartingFailureStrategies enumeration. """
expected = ['ABORT', 'STOP', 'CONTINUE']
assert StartingFailureStrategies._member_names_ == expected
assert list(StartingFailureStrategies._value2member_map_.keys()) == list(range(3))
def test_RunningFailureStrategies():
""" Test the RunningFailureStrategies enumeration. """
expected = ['CONTINUE', 'RESTART_PROCESS', 'STOP_APPLICATION', 'RESTART_APPLICATION']
assert RunningFailureStrategies._member_names_ == expected
assert list(RunningFailureStrategies._value2member_map_.keys()) == list(range(4))
def test_SupvisorsStates():
""" Test the SupvisorsStates enumeration. """
expected = ['INITIALIZATION', 'DEPLOYMENT', 'OPERATION', 'CONCILIATION', 'RESTARTING',
'SHUTTING_DOWN', 'SHUTDOWN']
assert SupvisorsStates._member_names_ == expected
assert list(SupvisorsStates._value2member_map_.keys()) == list(range(7))
def test_exception():
""" Test the exception InvalidTransition. """
# test with unknown attributes
with pytest.raises(InvalidTransition) as exc:
raise InvalidTransition('invalid transition')
assert 'invalid transition' == str(exc.value)
def test_SupvisorsFaults():
""" Test the SupvisorsFaults enumeration. """
expected = ['SUPVISORS_CONF_ERROR', 'BAD_SUPVISORS_STATE']
assert SupvisorsFaults._member_names_ == expected
assert list(SupvisorsFaults._value2member_map_.keys()) == list(range(100, 102))
def test_process_event():
""" Test the ProcessEvent classes. """
# attribute called 'name' cannot be mocked at creation
process = Mock(config=Mock(), group=None)
process.config.name = 'dummy_process'
# test ProcessEvent creation
event = ProcessEvent(process)
assert isinstance(event, Event)
assert event.process is process
# test payload with no group
assert event.payload() == 'processname:dummy_process groupname: '
# test payload with no group
process.group = Mock(config=Mock())
process.group.config.name = 'dummy_group'
assert event.payload() == 'processname:dummy_process groupname:dummy_group '
# test ProcessAddedEvent creation
event = ProcessAddedEvent(process)
assert isinstance(event, ProcessEvent)
assert event.payload() == 'processname:dummy_process groupname:dummy_group '
# test ProcessRemovedEvent creation
event = ProcessRemovedEvent(process)
assert isinstance(event, ProcessEvent)
assert event.payload() == 'processname:dummy_process groupname:dummy_group '
| 40.226087 | 90 | 0.70709 |
00e22277ce6d93cabea7f6993fd8cf07f070c702 | 982 | py | Python | old_logen/pylogen/Browser.py | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | 14 | 2015-10-16T11:35:30.000Z | 2021-05-12T15:31:16.000Z | old_logen/pylogen/Browser.py | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | null | null | null | old_logen/pylogen/Browser.py | leuschel/logen | 0ea806f54628162615e25177c3ed98f6b2c27935 | [
"Apache-2.0"
] | 5 | 2015-10-16T12:44:41.000Z | 2019-10-02T02:45:38.000Z | import os
import sys
class Browser:
""" Runs the users browser and displays the page
for html previewing
"""
def __init__(self, pref):
self.pref = pref
browser_cmd = pref.get_preference('browser')
if browser_cmd is None or browser_cmd == '':
if sys.platform == "win32":
pref.set_preference('browser', 'explorer')
def openpage(self, page):
browser_cmd = self.pref.get_preference('browser')
if browser_cmd is None or browser_cmd == '':
print "Please specify a browser for preview to autoopen exported html"
return False
if sys.platform == "win32":
page = page.replace("/", "\\")
cmd = ('%s "%s"' % (browser_cmd , page))
os.system(cmd)
return True
if __name__ == "__main__":
print "Testing Browser:"
b = Browser();
b.openpage("C:\Documents and Settings\Steve\Desktop\print.html")
| 27.277778 | 82 | 0.57332 |
e34affb8776de4dc2f088c2857b9daaed3ba4d74 | 1,575 | py | Python | test/bench.py | 00xc/pylzf | 44f836ebf76a7a84b0d1ba114d7336c90213b266 | [
"BSD-2-Clause"
] | null | null | null | test/bench.py | 00xc/pylzf | 44f836ebf76a7a84b0d1ba114d7336c90213b266 | [
"BSD-2-Clause"
] | null | null | null | test/bench.py | 00xc/pylzf | 44f836ebf76a7a84b0d1ba114d7336c90213b266 | [
"BSD-2-Clause"
] | null | null | null | import random
import time
import pylzf
import zlib
def mean(x):
return sum(x)/len(x)
if __name__ == '__main__':
NTESTS = 20
with open("/usr/share/dict/american-english", "r") as f:
words = [w.rstrip() for w in f.readlines()]
for NWORDS in [100, 1_000, 10_000, 100_000, 500_000]:
sizes = []
elapsed = []
for _ in range(NTESTS):
data = " ".join(random.choice(words) for _ in range(NWORDS)).encode()
t0 = time.perf_counter()
result = pylzf.compress(data)
t0 = time.perf_counter() - t0
try:
dcmpr = pylzf.decompress(result)
assert(data == dcmpr)
except:
print(len(data), "->", len(result), "->", len(dcmpr))
sizes.append((len(data), len(result)))
elapsed.append(t0)
print("[PYLZF {:6d} words] Total time: {:9.3f} ms | Average time/test: {:7.3f} ms | Avg. space saved: {} %".format(
NWORDS,
round(sum(elapsed)*1000, 3),
round(mean(elapsed)*1000, 3),
round(mean([(1 - (d[1]/d[0]))*100 for d in sizes]), 3)
))
sizes = []
elapsed = []
for _ in range(NTESTS):
data = " ".join(random.choice(words) for _ in range(NWORDS)).encode()
t0 = time.perf_counter()
result = zlib.compress(data, level=1)
t0 = time.perf_counter() - t0
assert(data == zlib.decompress(result))
sizes.append((len(data), len(result)))
elapsed.append(t0)
print("[ZLIB {:7d} words] Total time: {:9.3f} ms | Average time/test: {:7.3f} ms | Avg. space saved: {} %".format(
NWORDS,
round(sum(elapsed)*1000, 3),
round(mean(elapsed)*1000, 3),
round(mean([(1 - (d[1]/d[0]))*100 for d in sizes]), 3)
))
| 24.609375 | 117 | 0.610794 |
e6c49d01e5b621ace4e417d414ec074297b7aafd | 9,446 | py | Python | saleor/graphql/account/mutations/jwt.py | aldian/saleor | c3c42998055daf262bbb1c339f254007519a1d9b | [
"CC-BY-4.0"
] | 2 | 2021-01-31T00:28:42.000Z | 2021-01-31T12:30:46.000Z | saleor/graphql/account/mutations/jwt.py | aldian/saleor | c3c42998055daf262bbb1c339f254007519a1d9b | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/account/mutations/jwt.py | aldian/saleor | c3c42998055daf262bbb1c339f254007519a1d9b | [
"CC-BY-4.0"
] | 1 | 2021-02-18T13:56:47.000Z | 2021-02-18T13:56:47.000Z | from typing import Optional
import graphene
import jwt
from django.core.exceptions import ValidationError
from django.middleware.csrf import _compare_masked_tokens # type: ignore
from django.middleware.csrf import _get_new_csrf_token
from django.utils import timezone
from django.utils.crypto import get_random_string
from graphene.types.generic import GenericScalar
from ....account import models
from ....account.error_codes import AccountErrorCode
from ....core.jwt import (
JWT_REFRESH_TOKEN_COOKIE_NAME,
JWT_REFRESH_TYPE,
PERMISSIONS_FIELD,
create_access_token,
create_refresh_token,
get_user_from_payload,
jwt_decode,
)
from ....core.permissions import get_permissions_from_names
from ...core.mutations import BaseMutation
from ...core.types.common import AccountError
from ..types import User
def get_payload(token):
try:
payload = jwt_decode(token)
except jwt.ExpiredSignatureError:
raise ValidationError(
"Signature has expired", code=AccountErrorCode.JWT_SIGNATURE_EXPIRED.value
)
except jwt.DecodeError:
raise ValidationError(
"Error decoding signature", code=AccountErrorCode.JWT_DECODE_ERROR.value
)
except jwt.InvalidTokenError:
raise ValidationError(
"Invalid token", code=AccountErrorCode.JWT_INVALID_TOKEN.value
)
return payload
def get_user(payload):
try:
user = get_user_from_payload(payload)
except Exception:
user = None
if not user:
raise ValidationError(
"Invalid token", code=AccountErrorCode.JWT_INVALID_TOKEN.value
)
permissions = payload.get(PERMISSIONS_FIELD)
if permissions is not None:
user.effective_permissions = get_permissions_from_names(permissions)
return user
class CreateToken(BaseMutation):
"""Mutation that authenticates a user and returns token and user data."""
class Arguments:
email = graphene.String(required=True, description="Email of a user.")
password = graphene.String(required=True, description="Password of a user.")
class Meta:
description = "Create JWT token."
error_type_class = AccountError
error_type_field = "account_errors"
token = graphene.String(description="JWT token, required to authenticate.")
refresh_token = graphene.String(
description="JWT refresh token, required to re-generate access token."
)
csrf_token = graphene.String(
description="CSRF token required to re-generate access token."
)
user = graphene.Field(User, description="A user instance.")
@classmethod
def _retrieve_user_from_credentials(cls, email, password) -> Optional[models.User]:
user = models.User.objects.filter(email=email, is_active=True).first()
if user and user.check_password(password):
return user
return None
@classmethod
def get_user(cls, _info, data):
user = cls._retrieve_user_from_credentials(data["email"], data["password"])
if not user:
raise ValidationError(
{
"email": ValidationError(
"Please, enter valid credentials",
code=AccountErrorCode.INVALID_CREDENTIALS.value,
)
}
)
return user
@classmethod
def perform_mutation(cls, root, info, **data):
user = cls.get_user(info, data)
access_token = create_access_token(user)
csrf_token = _get_new_csrf_token()
refresh_token = create_refresh_token(user, {"csrfToken": csrf_token})
info.context.refresh_token = refresh_token
info.context._cached_user = user
user.last_login = timezone.now()
user.save(update_fields=["last_login"])
return cls(
errors=[],
user=user,
token=access_token,
refresh_token=refresh_token,
csrf_token=csrf_token,
)
class RefreshToken(BaseMutation):
"""Mutation that refresh user token and returns token and user data."""
token = graphene.String(description="JWT token, required to authenticate.")
user = graphene.Field(User, description="A user instance.")
class Arguments:
refresh_token = graphene.String(required=False, description="Refresh token.")
csrf_token = graphene.String(
required=False,
description=(
"CSRF token required to refresh token. This argument is "
"required when refreshToken is provided as a cookie."
),
)
class Meta:
description = (
"Refresh JWT token. Mutation tries to take refreshToken from the input."
"If it fails it will try to take refreshToken from the http-only cookie -"
f"{JWT_REFRESH_TOKEN_COOKIE_NAME}. csrfToken is required when refreshToken "
"is provided as a cookie."
)
error_type_class = AccountError
error_type_field = "account_errors"
@classmethod
def get_refresh_token_payload(cls, refresh_token):
try:
payload = get_payload(refresh_token)
except ValidationError as e:
raise ValidationError({"refreshToken": e})
return payload
@classmethod
def get_refresh_token(cls, info, data):
request = info.context
refresh_token = request.COOKIES.get(JWT_REFRESH_TOKEN_COOKIE_NAME, None)
refresh_token = data.get("refresh_token") or refresh_token
return refresh_token
@classmethod
def clean_refresh_token(cls, refresh_token):
if not refresh_token:
raise ValidationError(
{
"refreshToken": ValidationError(
"Missing refreshToken",
code=AccountErrorCode.JWT_MISSING_TOKEN.value,
)
}
)
payload = cls.get_refresh_token_payload(refresh_token)
if payload["type"] != JWT_REFRESH_TYPE:
raise ValidationError(
{
"refreshToken": ValidationError(
"Incorrect refreshToken",
code=AccountErrorCode.JWT_INVALID_TOKEN.value,
)
}
)
return payload
@classmethod
def clean_csrf_token(cls, csrf_token, payload):
is_valid = _compare_masked_tokens(csrf_token, payload["csrfToken"])
if not is_valid:
raise ValidationError(
{
"csrfToken": ValidationError(
"Invalid csrf token",
code=AccountErrorCode.JWT_INVALID_CSRF_TOKEN.value,
)
}
)
@classmethod
def get_user(cls, payload):
try:
user = get_user(payload)
except ValidationError as e:
raise ValidationError({"refreshToken": e})
return user
@classmethod
def perform_mutation(cls, root, info, **data):
refresh_token = cls.get_refresh_token(info, data)
payload = cls.clean_refresh_token(refresh_token)
# None when we got refresh_token from cookie.
if not data.get("refresh_token"):
csrf_token = data.get("csrf_token")
cls.clean_csrf_token(csrf_token, payload)
user = get_user(payload)
token = create_access_token(user)
return cls(errors=[], user=user, token=token)
class VerifyToken(BaseMutation):
"""Mutation that confirms if token is valid and also returns user data."""
user = graphene.Field(User, description="User assigned to token.")
is_valid = graphene.Boolean(
required=True,
default_value=False,
description="Determine if token is valid or not.",
)
payload = GenericScalar(description="JWT payload.")
class Arguments:
token = graphene.String(required=True, description="JWT token to validate.")
class Meta:
description = "Verify JWT token."
error_type_class = AccountError
error_type_field = "account_errors"
@classmethod
def get_payload(cls, token):
try:
payload = get_payload(token)
except ValidationError as e:
raise ValidationError({"token": e})
return payload
@classmethod
def get_user(cls, payload):
try:
user = get_user(payload)
except ValidationError as e:
raise ValidationError({"token": e})
return user
@classmethod
def perform_mutation(cls, root, info, **data):
token = data["token"]
payload = cls.get_payload(token)
user = cls.get_user(payload)
return cls(errors=[], user=user, is_valid=True, payload=payload)
class DeactivateAllUserTokens(BaseMutation):
class Meta:
description = "Deactivate all JWT tokens of the currently authenticated user."
error_type_class = AccountError
error_type_field = "account_errors"
@classmethod
def check_permissions(cls, context):
return context.user.is_authenticated
@classmethod
def perform_mutation(cls, root, info, **data):
user = info.context.user
user.jwt_token_key = get_random_string()
user.save(update_fields=["jwt_token_key"])
return cls()
| 33.496454 | 88 | 0.636248 |
116fd9c5f89ed996555f17ae505328f486a6a448 | 42,295 | py | Python | plugwise/helper.py | plugwise/python-plugwise | be50323e29bb0cf493af49c60111f51d624866d9 | [
"MIT"
] | 3 | 2020-10-29T15:06:03.000Z | 2022-03-01T10:35:40.000Z | plugwise/helper.py | plugwise/python-plugwise | be50323e29bb0cf493af49c60111f51d624866d9 | [
"MIT"
] | 154 | 2020-10-18T11:02:02.000Z | 2022-03-31T10:24:19.000Z | plugwise/helper.py | plugwise/python-plugwise | be50323e29bb0cf493af49c60111f51d624866d9 | [
"MIT"
] | 3 | 2020-10-28T18:02:43.000Z | 2021-09-02T15:20:51.000Z | """Use of this source code is governed by the MIT license found in the LICENSE file.
Plugwise Smile protocol helpers.
"""
import asyncio
import datetime as dt
import logging
import aiohttp
import async_timeout
from dateutil import tz
from dateutil.parser import parse
from defusedxml import ElementTree as etree
from munch import Munch
# Time related
import pytz
from .constants import (
APPLIANCES,
ATTR_ICON,
ATTR_ID,
ATTR_NAME,
ATTR_STATE,
ATTR_TYPE,
ATTR_UNIT_OF_MEASUREMENT,
BINARY_SENSORS,
COOLING_ICON,
DEVICE_MEASUREMENTS,
DOMAIN_OBJECTS,
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
FLAME_ICON,
HEATER_CENTRAL_MEASUREMENTS,
HEATING_ICON,
HOME_MEASUREMENTS,
IDLE_ICON,
LOCATIONS,
POWER_WATT,
SENSORS,
SWITCH_GROUP_TYPES,
SWITCHES,
THERMOSTAT_CLASSES,
)
from .exceptions import (
DeviceTimeoutError,
InvalidAuthentication,
InvalidXMLError,
ResponseError,
)
from .util import (
determine_selected,
escape_illegal_xml_characters,
format_measure,
in_between,
version_to_model,
)
_LOGGER = logging.getLogger(__name__)
DAYS = {
"mo": 0,
"tu": 1,
"we": 2,
"th": 3,
"fr": 4,
"sa": 5,
"su": 6,
}
def device_state_updater(data, devs, d_id, d_dict):
"""Helper-function for async_update().
Update the Device_State sensor state.
"""
for idx, item in enumerate(d_dict["sensors"]):
if item[ATTR_ID] == "device_state":
result = update_device_state(data, d_dict)
devs[d_id]["sensors"][idx][ATTR_STATE] = result[0]
devs[d_id]["sensors"][idx][ATTR_ICON] = result[1]
def update_device_state(data, d_dict):
"""Helper-function for _device_state_updater()."""
_cooling_state = False
_dhw_state = False
_heating_state = False
state = "idle"
icon = IDLE_ICON
for _, item in enumerate(d_dict["binary_sensors"]):
if item[ATTR_ID] == "dhw_state":
if item[ATTR_STATE]:
state = "dhw-heating"
icon = FLAME_ICON
_dhw_state = True
if "heating_state" in data:
if data["heating_state"]:
state = "heating"
icon = HEATING_ICON
_heating_state = True
if _heating_state and _dhw_state:
state = "dhw and heating"
icon = HEATING_ICON
if "cooling_state" in data:
if data["cooling_state"]:
state = "cooling"
icon = COOLING_ICON
_cooling_state = True
if _cooling_state and _dhw_state:
state = "dhw and cooling"
icon = COOLING_ICON
return [state, icon]
def pw_notification_updater(devs, d_id, d_dict, notifs):
"""Helper-function for async_update().
Update the PW_Notification binary_sensor state.
"""
for idx, item in enumerate(d_dict["binary_sensors"]):
if item[ATTR_ID] == "plugwise_notification":
devs[d_id]["binary_sensors"][idx][ATTR_STATE] = notifs != {}
def update_helper(data, devs, d_dict, d_id, e_type, key):
"""Helper-function for async_update()."""
for dummy in d_dict[e_type]:
if key != dummy[ATTR_ID]:
continue
for idx, item in enumerate(devs[d_id][e_type]):
if key != item[ATTR_ID]:
continue
devs[d_id][e_type][idx][ATTR_STATE] = data[key]
def check_model(name, v_name):
"""Model checking before using version_to_model."""
if v_name in ["Plugwise", "Plugwise B.V."]:
if name == "ThermoTouch":
return "Anna"
model = version_to_model(name)
if model != "Unknown":
return model
else:
return name
def schemas_schedule_temp(schedules):
"""Helper-function for schemas().
Obtain the schedule temperature of the schema/schedule.
"""
for period, temp in schedules.items():
moment_1, moment_2 = period.split(",")
moment_1 = moment_1.replace("[", "").split(" ")
moment_2 = moment_2.replace(")", "").split(" ")
result_1 = DAYS.get(moment_1[0], "None")
result_2 = DAYS.get(moment_2[0], "None")
now = dt.datetime.now().time()
start = dt.datetime.strptime(moment_1[1], "%H:%M").time()
end = dt.datetime.strptime(moment_2[1], "%H:%M").time()
if (
result_1 == dt.datetime.now().weekday()
or result_2 == dt.datetime.now().weekday()
):
if in_between(now, start, end):
return temp
def types_finder(data):
"""Detect types within locations from logs."""
types = set()
for measure, attrs in HOME_MEASUREMENTS.items():
locator = f".//logs/point_log[type='{measure}']"
if data.find(locator) is not None:
log = data.find(locator)
if measure == "outdoor_temperature":
types.add(attrs[ATTR_TYPE])
p_locator = ".//electricity_point_meter"
if log.find(p_locator) is not None:
if log.find(p_locator).get("id"):
types.add(attrs[ATTR_TYPE])
return types
def power_data_local_format(attrs, key_string, val):
"""Format power data."""
f_val = format_measure(val, attrs[ATTR_UNIT_OF_MEASUREMENT])
# Format only HOME_MEASUREMENT POWER_WATT values, do not move to util-format_meaure function!
if attrs[ATTR_UNIT_OF_MEASUREMENT] == POWER_WATT:
f_val = int(round(float(val)))
if all(item in key_string for item in ["electricity", "cumulative"]):
f_val = format_measure(val, ENERGY_KILO_WATT_HOUR)
return f_val
def power_data_energy_diff(measurement, net_string, f_val, direct_data):
"""Calculate differential energy."""
if "electricity" in measurement and "interval" not in net_string:
diff = 1
if "produced" in measurement:
diff = -1
if net_string not in direct_data:
direct_data[net_string] = 0
if isinstance(f_val, int):
direct_data[net_string] += f_val * diff
else:
direct_data[net_string] += float(f_val * diff)
direct_data[net_string] = float(f"{round(direct_data[net_string], 3):.3f}")
return direct_data
class SmileComm:
"""The SmileComm class."""
def __init__(
self,
host,
password,
username,
port,
timeout,
websession,
):
"""Set the constructor for this class."""
if not websession:
async def _create_session() -> aiohttp.ClientSession:
return aiohttp.ClientSession() # pragma: no cover
loop = asyncio.get_event_loop()
if loop.is_running():
self._websession = aiohttp.ClientSession()
else:
self._websession = loop.run_until_complete(
_create_session()
) # pragma: no cover
else:
self._websession = websession
self._auth = aiohttp.BasicAuth(username, password=password)
self._endpoint = f"http://{host}:{str(port)}"
self._timeout = timeout
async def _request_validate(self, resp, method):
"""Helper-function for _request(): validate the returned data."""
# Command accepted gives empty body with status 202
if resp.status == 202:
return
# Cornercase for stretch not responding with 202
if method == "put" and resp.status == 200:
return
if resp.status == 401:
raise InvalidAuthentication
result = await resp.text()
if not result or "<error>" in result:
_LOGGER.error("Smile response empty or error in %s", result)
raise ResponseError
try:
# Encode to ensure utf8 parsing
xml = etree.XML(escape_illegal_xml_characters(result).encode())
except etree.ParseError:
_LOGGER.error("Smile returns invalid XML for %s", self._endpoint)
raise InvalidXMLError
return xml
async def _request(
self,
command,
retry=3,
method="get",
data=None,
headers=None,
):
"""Get/put/delete data from a give URL."""
resp = None
url = f"{self._endpoint}{command}"
try:
async with async_timeout.timeout(self._timeout):
if method == "get":
# Work-around for Stretchv2, should not hurt the other smiles
headers = {"Accept-Encoding": "gzip"}
resp = await self._websession.get(
url, auth=self._auth, headers=headers
)
if method == "put":
headers = {"Content-type": "text/xml"}
resp = await self._websession.put(
url, data=data, headers=headers, auth=self._auth
)
if method == "delete":
resp = await self._websession.delete(url, auth=self._auth)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Plugwise: %s", command)
raise DeviceTimeoutError
return await self._request(command, retry - 1)
return await self._request_validate(resp, method)
async def close_connection(self):
"""Close the Plugwise connection."""
await self._websession.close()
class SmileHelper:
"""The SmileHelper class."""
def _locations_legacy(self):
"""Helper-function for _all_locations().
Create locations for legacy devices.
"""
appliances = set()
self._home_location = 0
# Add Anna appliances
for appliance in self._appliances.findall("./appliance"):
appliances.add(appliance.attrib["id"])
if self.smile_type == "thermostat":
self._loc_data[0] = {
"name": "Legacy Anna",
"types": {"temperature"},
"members": appliances,
}
if self.smile_type == "stretch":
self._loc_data[0] = {
"name": "Legacy Stretch",
"types": {"power"},
"members": appliances,
}
def _locations_specials(self, loc, location):
"""Helper-function for _all_locations().
Correct location info in special cases.
"""
if loc.name == "Home":
self._home_location = loc.id
loc.types.add("home")
for location_type in types_finder(location):
loc.types.add(location_type)
# Replace location-name for P1 legacy, can contain privacy-related info
if self._smile_legacy and self.smile_type == "power":
loc.name = "Home"
self._home_location = loc.id
loc.types.add("home")
loc.types.add("power")
return loc
def _all_locations(self):
"""Collect all locations."""
self._loc_data = {}
loc = Munch()
# Legacy Anna without outdoor_temp and Stretches have no locations, create one containing all appliances
if len(self._locations) == 0 and self._smile_legacy:
self._locations_legacy()
return
for location in self._locations.findall("./location"):
loc.name = location.find("name").text
loc.id = location.attrib["id"]
# Filter the valid single location for P1 legacy: services not empty
locator = ".//services"
if (
self._smile_legacy
and self.smile_type == "power"
and len(location.find(locator)) == 0
):
continue
loc.types = set()
loc.members = set()
# Group of appliances
locator = ".//appliances/appliance"
if location.find(locator) is not None:
for member in location.findall(locator):
loc.members.add(member.attrib["id"])
# Specials
loc = self._locations_specials(loc, location)
self._loc_data[loc.id] = {
"name": loc.name,
"types": loc.types,
"members": loc.members,
}
return
def _get_module_data(self, appliance, locator, mod_type):
"""Helper-function for _energy_device_info_finder() and _appliance_info_finder().
Collect requested info from MODULES.
"""
appl_search = appliance.find(locator)
if appl_search is not None:
link_id = appl_search.attrib["id"]
module = self._modules.find(f".//{mod_type}[@id='{link_id}']....")
if module is not None:
v_name = module.find("vendor_name").text
v_model = module.find("vendor_model").text
hw_version = module.find("hardware_version").text
fw_version = module.find("firmware_version").text
return [v_name, v_model, hw_version, fw_version]
return [None, None, None, None]
def _energy_device_info_finder(self, appliance, appl):
"""Helper-function for _appliance_info_finder().
Collect energy device info (Circle, Plug, Stealth): firmware, model and vendor name.
"""
if self._stretch_v2 or self._stretch_v3:
locator = ".//services/electricity_point_meter"
mod_type = "electricity_point_meter"
module_data = self._get_module_data(appliance, locator, mod_type)
appl.v_name = module_data[0]
if appl.model != "Group Switch":
appl.model = None
if module_data[2] is not None:
hw_version = module_data[2].replace("-", "")
appl.model = version_to_model(hw_version)
appl.fw = module_data[3]
return appl
if self.smile_type != "stretch" and "plug" in appl.types:
locator = ".//logs/point_log/electricity_point_meter"
mod_type = "electricity_point_meter"
module_data = self._get_module_data(appliance, locator, mod_type)
appl.v_name = module_data[0]
appl.model = version_to_model(module_data[1])
appl.fw = module_data[3]
return appl
def _appliance_info_finder(self, appliance, appl):
"""Collect device info (Smile/Stretch, Thermostats, Auxiliary): firmware, model and vendor name."""
# Find gateway and heater_central devices
if appl.pwclass == "gateway":
self.gateway_id = appliance.attrib["id"]
appl.fw = self.smile_version[0]
appl.model = appl.name = self.smile_name
appl.v_name = "Plugwise B.V."
return appl
if appl.pwclass in THERMOSTAT_CLASSES:
locator = ".//logs/point_log[type='thermostat']/thermostat"
mod_type = "thermostat"
module_data = self._get_module_data(appliance, locator, mod_type)
appl.v_name = module_data[0]
appl.model = check_model(module_data[1], appl.v_name)
appl.fw = module_data[3]
return appl
if appl.pwclass == "heater_central":
# Remove heater_central when no active device present
if not self._active_device_present:
return None
self._heater_id = appliance.attrib["id"]
appl.name = "Auxiliary"
locator1 = ".//logs/point_log[type='flame_state']/boiler_state"
locator2 = ".//services/boiler_state"
mod_type = "boiler_state"
module_data = self._get_module_data(appliance, locator1, mod_type)
if module_data == [None, None, None, None]:
module_data = self._get_module_data(appliance, locator2, mod_type)
appl.v_name = module_data[0]
appl.model = check_model(module_data[1], appl.v_name)
if appl.model is None:
appl.model = (
"Generic heater/cooler" if self._cp_state else "Generic heater"
)
return appl
# Handle stretches
self._energy_device_info_finder(appliance, appl)
# Cornercase just return existing dict-object
return appl # pragma: no cover
def _appliance_types_finder(self, appliance, appl):
"""Helper-function for _all_appliances() - determine type(s) per appliance."""
# Appliance with location (i.e. a device)
if appliance.find("location") is not None:
appl.location = appliance.find("location").attrib["id"]
for appl_type in types_finder(appliance):
appl.types.add(appl_type)
else:
# Preset all types applicable to home
appl.types = self._loc_data[self._home_location]["types"]
# Determine appliance_type from functionality
relay_func = appliance.find(".//actuator_functionalities/relay_functionality")
relay_act = appliance.find(".//actuators/relay")
thermo_func = appliance.find(
".//actuator_functionalities/thermostat_functionality"
)
if relay_func is not None or relay_act is not None:
appl.types.add("plug")
elif thermo_func is not None:
appl.types.add("thermostat")
return appl
def _all_appliances(self):
"""Collect all appliances with relevant info."""
self._appl_data = {}
self._cp_state = None
self._all_locations()
# For legacy P1
if self._smile_legacy and self.smile_type == "power":
# Inject home_location as device id for legacy so
# appl_data can use the location id as device id.
self._appl_data[self._home_location] = {
"class": "gateway",
"fw": self.smile_version[0],
"location": self._home_location,
"model": "Smile P1",
"name": "P1",
"types": {"power", "home"},
"vendor": "Plugwise B.V.",
}
self.gateway_id = self._home_location
return
# The presence of either indicates a local active device, e.g. heat-pump or gas-fired heater
self._cp_state = self._appliances.find(
".//logs/point_log[type='compressor_state']"
)
fl_state = self._appliances.find(".//logs/point_log[type='flame_state']")
bl_state = self._appliances.find(".//services/boiler_state")
self._active_device_present = (
self._cp_state is not None or fl_state is not None or bl_state is not None
)
for appliance in self._appliances.findall("./appliance"):
appl = Munch()
appl.pwclass = appliance.find("type").text
# Nothing useful in opentherm so skip it
if appl.pwclass == "open_therm_gateway":
continue
appl.location = None
appl.types = set()
appl.id = appliance.attrib["id"]
appl.name = appliance.find("name").text
appl.model = appl.pwclass.replace("_", " ").title()
appl.fw = None
appl.v_name = None
# Determine types for this appliance
appl = self._appliance_types_finder(appliance, appl)
# Determine class for this appliance
appl = self._appliance_info_finder(appliance, appl)
# Skip on heater_central when no active device present
if not appl:
continue
self._appl_data[appl.id] = {
"class": appl.pwclass,
"fw": appl.fw,
"location": appl.location,
"model": appl.model,
"name": appl.name,
"types": appl.types,
"vendor": appl.v_name,
}
if (
not self._smile_legacy
and appl.pwclass == "thermostat"
and appl.location is None
):
self._appl_data.pop(appl.id)
# For legacy Anna gateway and heater_central is the same device
if self._smile_legacy and self.smile_type == "thermostat":
self.gateway_id = self._heater_id
def _match_locations(self):
"""Helper-function for _scan_thermostats().
Update locations with present appliance-types.
"""
matched_locations = {}
self._all_appliances()
for location_id, location_details in self._loc_data.items():
for dummy, appliance_details in self._appl_data.items():
if appliance_details["location"] == location_id:
for appl_type in appliance_details["types"]:
location_details["types"].add(appl_type)
matched_locations[location_id] = location_details
return matched_locations
def _presets_legacy(self):
""" Helper-function for presets() - collect Presets for a legacy Anna."""
preset_dictionary = {}
for directive in self._domain_objects.findall("rule/directives/when/then"):
if directive is not None and "icon" in directive.keys():
# Ensure list of heating_setpoint, cooling_setpoint
preset_dictionary[directive.attrib["icon"]] = [
float(directive.attrib["temperature"]),
0,
]
return preset_dictionary
def _presets(self, loc_id):
"""Collect Presets for a Thermostat based on location_id."""
presets = {}
tag = "zone_setpoint_and_state_based_on_preset"
if self._smile_legacy:
return self._presets_legacy()
rule_ids = self._rule_ids_by_tag(tag, loc_id)
if rule_ids is None:
rule_ids = self._rule_ids_by_name("Thermostat presets", loc_id)
for rule_id in rule_ids:
directives = self._domain_objects.find(f'rule[@id="{rule_id}"]/directives')
for directive in directives:
preset = directive.find("then").attrib
keys, dummy = zip(*preset.items())
if str(keys[0]) == "setpoint":
presets[directive.attrib["preset"]] = [float(preset["setpoint"]), 0]
else:
presets[directive.attrib["preset"]] = [
float(preset["heating_setpoint"]),
float(preset["cooling_setpoint"]),
]
return presets
def _rule_ids_by_name(self, name, loc_id):
"""Helper-function for _presets().
Obtain the rule_id from the given name and location_id.
"""
schema_ids = {}
locator = f'.//contexts/context/zone/location[@id="{loc_id}"]'
for rule in self._domain_objects.findall(f'.//rule[name="{name}"]'):
if rule.find(locator) is not None:
schema_ids[rule.attrib["id"]] = loc_id
if schema_ids != {}:
return schema_ids
def _rule_ids_by_tag(self, tag, loc_id):
"""Helper-function for _presets(), _schemas() and _last_active_schema().
Obtain the rule_id from the given template_tag and location_id.
"""
schema_ids = {}
locator1 = f'.//template[@tag="{tag}"]'
locator2 = f'.//contexts/context/zone/location[@id="{loc_id}"]'
for rule in self._domain_objects.findall(".//rule"):
if rule.find(locator1) is not None:
if rule.find(locator2) is not None:
schema_ids[rule.attrib["id"]] = loc_id
if schema_ids != {}:
return schema_ids
def _appliance_measurements(self, appliance, data, measurements):
"""Helper-function for _get_appliance_data() - collect appliance measurement data."""
for measurement, attrs in measurements:
p_locator = f'.//logs/point_log[type="{measurement}"]/period/measurement'
if appliance.find(p_locator) is not None:
if self._smile_legacy:
if measurement == "domestic_hot_water_state":
continue
measure = appliance.find(p_locator).text
# Fix for Adam + Anna: there is a pressure-measurement with an unrealistic value,
# this measurement appears at power-on and is never updated, therefore remove.
if (
measurement == "central_heater_water_pressure"
and float(measure) > 3.5
):
continue
try:
measurement = attrs[ATTR_NAME]
except KeyError:
pass
data[measurement] = format_measure(
measure, attrs[ATTR_UNIT_OF_MEASUREMENT]
)
i_locator = f'.//logs/interval_log[type="{measurement}"]/period/measurement'
if appliance.find(i_locator) is not None:
name = f"{measurement}_interval"
measure = appliance.find(i_locator).text
data[name] = format_measure(measure, ENERGY_WATT_HOUR)
return data
def _get_appliance_data(self, d_id):
"""Helper-function for smile.py: _get_device_data().
Collect the appliance-data based on device id.
Determined from APPLIANCES, for legacy from DOMAIN_OBJECTS.
"""
data = {}
# P1 legacy has no APPLIANCES, also not present in DOMAIN_OBJECTS
if self._smile_legacy and self.smile_type == "power":
return data
search = self._appliances
if self._smile_legacy and self.smile_type != "stretch":
search = self._domain_objects
appliances = search.findall(f'.//appliance[@id="{d_id}"]')
for appliance in appliances:
measurements = DEVICE_MEASUREMENTS.items()
if self._active_device_present:
measurements = {
**DEVICE_MEASUREMENTS,
**HEATER_CENTRAL_MEASUREMENTS,
}.items()
data = self._appliance_measurements(appliance, data, measurements)
data.update(self._get_lock_state(appliance))
# Fix for Adam + Anna: heating_state also present under Anna, remove
if "temperature" in data:
data.pop("heating_state", None)
return data
def _rank_thermostat(
self, thermo_matching, loc_id, appliance_id, appliance_details
):
"""Helper-function for _scan_thermostats().
Rank the thermostat based on appliance_details: master or slave."""
appl_class = appliance_details["class"]
if (
loc_id == appliance_details["location"]
or (self._smile_legacy and not appliance_details["location"])
) and appl_class in thermo_matching:
# Pre-elect new master
if thermo_matching[appl_class] > self._thermo_locs[loc_id]["master_prio"]:
# Demote former master
if self._thermo_locs[loc_id]["master"] is not None:
self._thermo_locs[loc_id]["slaves"].add(
self._thermo_locs[loc_id]["master"]
)
# Crown master
self._thermo_locs[loc_id]["master_prio"] = thermo_matching[appl_class]
self._thermo_locs[loc_id]["master"] = appliance_id
else:
self._thermo_locs[loc_id]["slaves"].add(appliance_id)
return appl_class
def _scan_thermostats(self, debug_text="missing text"):
"""Helper-function for smile.py: get_all_devices() and single_master_thermostat().
Update locations with thermostat ranking results.
"""
self._thermo_locs = self._match_locations()
thermo_matching = {
"thermostat": 3,
"zone_thermometer": 2,
"zone_thermostat": 2,
"thermostatic_radiator_valve": 1,
}
high_prio = 0
for loc_id, location_details in self._thermo_locs.items():
self._thermo_locs[loc_id] = location_details
if loc_id != self._home_location:
self._thermo_locs[loc_id].update(
{"master": None, "master_prio": 0, "slaves": set()}
)
elif self._smile_legacy:
self._thermo_locs[loc_id].update(
{"master": None, "master_prio": 0, "slaves": set()}
)
for appliance_id, appliance_details in self._appl_data.items():
appl_class = self._rank_thermostat(
thermo_matching, loc_id, appliance_id, appliance_details
)
# Find highest ranking thermostat
if appl_class in thermo_matching:
if thermo_matching[appl_class] > high_prio:
high_prio = thermo_matching[appl_class]
def _temperature_uri_legacy(self):
"""Helper-function for _temperature_uri().
Determine the location-set_temperature uri - from APPLIANCES.
"""
locator = ".//appliance[type='thermostat']"
appliance_id = self._appliances.find(locator).attrib["id"]
return f"{APPLIANCES};id={appliance_id}/thermostat"
def _temperature_uri(self, loc_id):
"""Helper-function for smile.py: set_temperature().
Determine the location-set_temperature uri - from LOCATIONS."""
if self._smile_legacy:
return self._temperature_uri_legacy()
locator = f'location[@id="{loc_id}"]/actuator_functionalities/thermostat_functionality'
thermostat_functionality_id = self._locations.find(locator).attrib["id"]
return f"{LOCATIONS};id={loc_id}/thermostat;id={thermostat_functionality_id}"
def _group_switches(self):
"""Helper-function for smile.py: get_all_devices().
Collect switching- or pump-group info.
"""
switch_groups = {}
# P1 and Anna don't have switch groups
if self.smile_type == "power" or self.smile_name == "Anna":
return switch_groups
search = self._domain_objects
appliances = search.findall("./appliance")
groups = search.findall("./group")
for group in groups:
group_appl = {}
members = []
group_id = group.attrib["id"]
group_name = group.find("name").text
group_type = group.find("type").text
if self.smile_type == "stretch":
group_appliance = group.findall("appliances/appliance")
for dummy in group_appliance:
members.append(dummy.attrib["id"])
else:
for appliance in appliances:
if appliance.find("./groups/group") is not None:
appl_id = appliance.attrib["id"]
apl_gr_id = appliance.find("./groups/group").attrib["id"]
if apl_gr_id == group_id:
members.append(appl_id)
if group_type in SWITCH_GROUP_TYPES:
group_appl[group_id] = {
"class": group_type,
"fw": None,
"location": None,
"members": members,
"model": "Group Switch",
"name": group_name,
"types": {"switch_group"},
"vendor": "Plugwise",
}
switch_groups.update(group_appl)
return switch_groups
def _heating_valves(self):
"""Helper-function for smile.py: _device_data_adam().
Collect amount of open valves indicating active direct heating.
For cases where the heat is provided from an external shared source (city heating).
"""
loc_found = 0
open_valve_count = 0
for appliance in self._appliances.findall(".//appliance"):
locator = './/logs/point_log[type="valve_position"]/period/measurement'
if appliance.find(locator) is not None:
loc_found += 1
measure = appliance.find(locator).text
if float(measure) > 0.0:
open_valve_count += 1
return None if loc_found == 0 else open_valve_count
def _power_data_peak_value(self, loc):
"""Helper-function for _power_data_from_location()."""
loc.found = True
no_tariffs = False
# Only once try to find P1 Legacy values
if loc.logs.find(loc.locator) is None and self.smile_type == "power":
no_tariffs = True
# P1 Legacy: avoid doubling the net_electricity_..._point value by skipping one peak-list option
if loc.peak_select == "nl_offpeak":
loc.found = False
return loc
loc.locator = (
f'.//{loc.log_type}[type="{loc.measurement}"]/period/measurement'
)
# Locator not found
if loc.logs.find(loc.locator) is None:
loc.found = False
return loc
peak = loc.peak_select.split("_")[1]
if peak == "offpeak":
peak = "off_peak"
log_found = loc.log_type.split("_")[0]
loc.key_string = f"{loc.measurement}_{peak}_{log_found}"
# P1 with fw 2.x does not have tariff indicators for point_log values
if no_tariffs:
loc.key_string = f"{loc.measurement}_{log_found}"
if "gas" in loc.measurement:
loc.key_string = f"{loc.measurement}_{log_found}"
loc.net_string = f"net_electricity_{log_found}"
val = loc.logs.find(loc.locator).text
loc.f_val = power_data_local_format(loc.attrs, loc.key_string, val)
return loc
def _power_data_from_location(self, loc_id):
"""Helper-function for smile.py: _get_device_data().
Collect the power-data based on Location ID.
"""
direct_data = {}
loc = Munch()
search = self._domain_objects
t_string = "tariff"
if self.smile_type == "power":
# P1: use data from LOCATIONS
search = self._locations
if self._smile_legacy:
t_string = "tariff_indicator"
loc.logs = search.find(f'.//location[@id="{loc_id}"]/logs')
if loc.logs is None:
return
log_list = ["point_log", "cumulative_log", "interval_log"]
peak_list = ["nl_peak", "nl_offpeak"]
# meter_string = ".//{}[type='{}']/"
for loc.measurement, loc.attrs in HOME_MEASUREMENTS.items():
for loc.log_type in log_list:
for loc.peak_select in peak_list:
loc.locator = (
f'.//{loc.log_type}[type="{loc.measurement}"]/period/'
f'measurement[@{t_string}="{loc.peak_select}"]'
)
loc = self._power_data_peak_value(loc)
if not loc.found:
continue
direct_data = power_data_energy_diff(
loc.measurement, loc.net_string, loc.f_val, direct_data
)
direct_data[loc.key_string] = loc.f_val
if direct_data != {}:
return direct_data
def _preset(self, loc_id):
"""Helper-function for smile.py: device_data_climate().
Collect the active preset based on Location ID.
"""
if self._smile_legacy:
active_rule = self._domain_objects.find(
"rule[active='true']/directives/when/then"
)
if active_rule is None or "icon" not in active_rule.keys():
return
return active_rule.attrib["icon"]
locator = f'.//location[@id="{loc_id}"]/preset'
preset = self._domain_objects.find(locator)
if preset is not None:
return preset.text
def _schemas_legacy(self):
"""Helper-function for _schemas().
Collect available schemas/schedules for the legacy thermostat.
"""
available = []
name = None
schedule_temperature = None
schemas = {}
selected = None
for schema in self._domain_objects.findall(".//rule"):
rule_name = schema.find("name").text
if rule_name:
if "preset" not in rule_name:
name = rule_name
log_type = "schedule_state"
locator = f"appliance[type='thermostat']/logs/point_log[type='{log_type}']/period/measurement"
active = False
if self._domain_objects.find(locator) is not None:
active = self._domain_objects.find(locator).text == "on"
if name is not None:
schemas[name] = active
available, selected = determine_selected(available, selected, schemas)
return available, selected, schedule_temperature
def _schemas(self, loc_id):
"""Helper-function for smile.py: _device_data_climate().
Obtain the available schemas/schedules based on the Location ID.
"""
available = []
rule_ids = {}
schemas = {}
schedule_temperature = None
selected = None
# Legacy schemas
if self._smile_legacy: # Only one schedule allowed
return self._schemas_legacy()
# Current schemas
tag = "zone_preset_based_on_time_and_presence_with_override"
rule_ids = self._rule_ids_by_tag(tag, loc_id)
if rule_ids is None:
return available, selected, schedule_temperature
for rule_id, dummy in rule_ids.items():
name = self._domain_objects.find(f'rule[@id="{rule_id}"]/name').text
active = (
self._domain_objects.find(f'rule[@id="{rule_id}"]/active').text
== "true"
)
schemas[name] = active
schedules = {}
locator = f'rule[@id="{rule_id}"]/directives'
directives = self._domain_objects.find(locator)
for directive in directives:
schedule = directive.find("then").attrib
keys, dummy = zip(*schedule.items())
if str(keys[0]) == "preset":
schedules[directive.attrib["time"]] = float(
self._presets(loc_id)[schedule["preset"]][0]
)
else:
schedules[directive.attrib["time"]] = float(schedule["setpoint"])
schedule_temperature = schemas_schedule_temp(schedules)
available, selected = determine_selected(available, selected, schemas)
return available, selected, schedule_temperature
def _last_active_schema(self, loc_id):
"""Helper-function for smile.py: _device_data_climate().
Determine the last active schema/schedule based on the Location ID.
"""
epoch = dt.datetime(1970, 1, 1, tzinfo=pytz.utc)
rule_ids = {}
schemas = {}
last_modified = None
tag = "zone_preset_based_on_time_and_presence_with_override"
rule_ids = self._rule_ids_by_tag(tag, loc_id)
if rule_ids is None:
return
for rule_id, dummy in rule_ids.items():
schema_name = self._domain_objects.find(f'rule[@id="{rule_id}"]/name').text
schema_date = self._domain_objects.find(
f'rule[@id="{rule_id}"]/modified_date'
).text
schema_time = parse(schema_date)
schemas[schema_name] = (schema_time - epoch).total_seconds()
if schemas != {}:
last_modified = sorted(schemas.items(), key=lambda kv: kv[1])[-1][0]
return last_modified
def _object_value(self, obj_type, obj_id, measurement):
"""Helper-function for smile.py: _get_device_data() and _device_data_anna().
Obtain the value/state for the given object.
"""
search = self._domain_objects
locator = (
f'.//{obj_type}[@id="{obj_id}"]/logs/point_log'
f'[type="{measurement}"]/period/measurement'
)
if search.find(locator) is not None:
val = format_measure(search.find(locator).text, None)
return val
return None
def _get_lock_state(self, xml):
"""Helper-function for _get_appliance_data().
Adam & Stretches: obtain the relay-switch lock state.
"""
data = {}
actuator = "actuator_functionalities"
func_type = "relay_functionality"
if self.smile_type == "stretch" and self.smile_version[1].major == 2:
actuator = "actuators"
func_type = "relay"
appl_class = xml.find("type").text
if appl_class not in ["central_heating_pump", "valve_actuator"]:
locator = f".//{actuator}/{func_type}/lock"
if xml.find(locator) is not None:
measure = xml.find(locator).text
data["lock"] = format_measure(measure, None)
return data
def _create_lists_from_data(self, data, bs_list, s_list, sw_list):
"""Helper-function for smile.py: _all_device_data().
Create lists of binary_sensors, sensors, switches from the relevant data.
"""
for key, value in list(data.items()):
for item in BINARY_SENSORS:
if item[ATTR_ID] == key:
data.pop(item[ATTR_ID])
if self._active_device_present:
item[ATTR_STATE] = value
bs_list.append(item)
for item in SENSORS:
if item[ATTR_ID] == key:
data.pop(item[ATTR_ID])
item[ATTR_STATE] = value
s_list.append(item)
for item in SWITCHES:
if item[ATTR_ID] == key:
data.pop(item[ATTR_ID])
item[ATTR_STATE] = value
sw_list.append(item)
| 36.304721 | 112 | 0.57482 |
e59da581e64f4711670f7c14ebd424fc506001cd | 1,695 | py | Python | setup.py | rcook/sniptool | 8d88bec0d817cd1bba19f12af9d26fd6f458c9f1 | [
"MIT"
] | null | null | null | setup.py | rcook/sniptool | 8d88bec0d817cd1bba19f12af9d26fd6f458c9f1 | [
"MIT"
] | null | null | null | setup.py | rcook/sniptool | 8d88bec0d817cd1bba19f12af9d26fd6f458c9f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
###################################################
# Copyright (C) 2017, All rights reserved.
##################################################
import os
import re
from setuptools import setup
_PROJECT_NAME = "sniptool"
def _read_properties(project_name):
init_path = os.path.abspath(os.path.join(project_name, "__init__.py"))
regex = re.compile("^\\s*__(?P<key>.*)__\\s*=\\s*\"(?P<value>.*)\"\\s*$")
with open(init_path, "rt") as f:
props = {}
for line in f.readlines():
m = regex.match(line)
if m is not None:
props[m.group("key")] = m.group("value")
return props
_PROPERTIES = _read_properties(_PROJECT_NAME)
project_name = _PROPERTIES["project_name"]
version = _PROPERTIES["version"]
description = _PROPERTIES["description"]
setup(
name=project_name,
version=version,
description=description,
setup_requires=["setuptools-markdown"],
long_description_markdown_filename="README.md",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
],
url="https://github.com/rcook/sniptool",
author="Richard Cook",
author_email="rcook@rcook.org",
license="MIT",
packages=[project_name],
install_requires=[
"inflection",
"Jinja2",
"pyperclip",
"pyprelude",
"pysimplevcs"
],
entry_points={
"console_scripts": [
"{0} = {0}.__main__:_main".format(project_name)
]
},
include_package_data=True,
test_suite="{}.tests.suite".format(project_name),
zip_safe=False)
| 27.786885 | 77 | 0.581121 |
6b74471de944a1432be8b49e96d6c7ed805d9ec9 | 290 | py | Python | packages/urls.py | shamanu4/test_project | 8ec52b5ab88c7bae4e469dc04fe64630e2f081fa | [
"MIT"
] | 1 | 2019-07-26T09:56:38.000Z | 2019-07-26T09:56:38.000Z | packages/urls.py | shamanu4/test_project | 8ec52b5ab88c7bae4e469dc04fe64630e2f081fa | [
"MIT"
] | 6 | 2020-06-05T19:00:20.000Z | 2022-03-11T23:29:35.000Z | packages/urls.py | vintkor/cryptotrade | cd27b5d58e4149cf9ad5e035983fcec566369833 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import (
PackageListView,
ByPackageFormView,
)
app_name = 'package'
urlpatterns = [
path('', PackageListView.as_view(), name='packages-list'),
path('buy-package/<int:package_id>', ByPackageFormView.as_view(), name='buy-package'),
]
| 24.166667 | 90 | 0.703448 |
efcb58b91815ead6f61a7aa258c12f6d2802a2b3 | 3,388 | py | Python | speakers/migrations/0006_add_description_field.py | andyzsf/ConMan | e8d4aa9eeda7a85b39d8d897dbdba43de3cee9c1 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | speakers/migrations/0006_add_description_field.py | andyzsf/ConMan | e8d4aa9eeda7a85b39d8d897dbdba43de3cee9c1 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | speakers/migrations/0006_add_description_field.py | andyzsf/ConMan | e8d4aa9eeda7a85b39d8d897dbdba43de3cee9c1 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
from south.db import db
from django.db import models
from speakers.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Presentation.description'
db.add_column('speakers_presentation', 'description', models.CharField(max_length=255, null=True, blank=True))
# Changing field 'Presentation.cat'
db.alter_column('speakers_presentation', 'cat_id', models.ForeignKey(orm.Category, null=True, blank=True))
def backwards(self, orm):
# Deleting field 'Presentation.description'
db.delete_column('speakers_presentation', 'description')
# Changing field 'Presentation.cat'
db.alter_column('speakers_presentation', 'cat_id', models.ForeignKey(orm.Category, blank=True))
models = {
'speakers.category': {
'description': ('models.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '150', 'db_index': 'True'})
},
'speakers.presentation': {
'audiences': ('models.ManyToManyField', ['AudienceType'], {}),
'cat': ('models.ForeignKey', ['Category'], {'null': 'True', 'blank': 'True'}),
'description': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'end': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'location': ('models.ForeignKey', ['Room'], {'null': 'True', 'blank': 'True'}),
'long_abstract': ('models.TextField', [], {'null': 'True', 'blank': 'True'}),
'presenter': ('models.ManyToManyField', ['UserProfile'], {}),
'score': ('models.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'short_abstract': ('models.TextField', [], {'max_length': '5000'}),
'slides': ('models.FileField', [], {'null': 'True', 'upload_to': '"slides"', 'blank': 'True'}),
'start': ('models.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('models.ForeignKey', ['Status'], {'default': 'get_status'}),
'title': ('models.CharField', [], {'max_length': '150', 'db_index': 'True'})
},
'common.userprofile': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'speakers.status': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'db_index': 'True', 'max_length': '70'})
},
'speakers.room': {
'here': ('models.ImageField', [], {'null': 'True', 'upload_to': "'here'", 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '70'})
},
'speakers.audiencetype': {
'description': ('models.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '150', 'db_index': 'True'})
}
}
complete_apps = ['speakers']
| 47.71831 | 118 | 0.523613 |
a2958c6786b5abb384d798bbeed1fbd8c1a82189 | 1,085 | py | Python | Dataset/Leetcode/train/101/550.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/101/550.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/101/550.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, root: TreeNode) -> bool:
def ishuiwen(p):
index1 = 0
index2 = len(p) - 1
while index1 < len(p) and index2 >= 0 and p[index1] == p[index2]:
index1 += 1
index2 -= 1
if index2 > index1:
return False
else:
return True
if not root:
return True
from collections import deque
d = deque([root])
ret = []
while d:
tmp = []
for _ in range(len(d)):
a = d.popleft()
if not a:
tmp.append(None)
continue
else:
tmp.append(a.val)
if a.left:
d.append(a.left)
else:
d.append(None)
if a.right:
d.append(a.right)
else:
d.append(None)
ret.append(tmp)
return all([ishuiwen(i) for i in ret])
| 28.552632 | 77 | 0.376037 |
81b3cdcdfc743bb3a5b4017b74538e1a0fc37718 | 2,515 | py | Python | IA 2/Perceptron1/Perceptron.py | CesarSG/actividades | 60c42a1bb181c5b75377ac8302a45bac95d37bfb | [
"MIT"
] | null | null | null | IA 2/Perceptron1/Perceptron.py | CesarSG/actividades | 60c42a1bb181c5b75377ac8302a45bac95d37bfb | [
"MIT"
] | null | null | null | IA 2/Perceptron1/Perceptron.py | CesarSG/actividades | 60c42a1bb181c5b75377ac8302a45bac95d37bfb | [
"MIT"
] | null | null | null | import os
import sys
import csv
import numpy as np
import matplotlib.pyplot as plt
class Perceptron:
def __init__(self, x, w, d, heta, max_epoch):
self._x = x
self._w = w
self._d = d
self._heta = heta
self._max_epoch = max_epoch
self._trained = False
def run(self):
self.perceptron()
def outputs(self):
result = np.dot(self._x, self._w)
outputs = []
with open('outputs-perceptron.csv', 'w', newline='') as csvfile:
filewriter = csv.writer(csvfile)
for res in result:
output = self.step(res)
outputs.append(output)
filewriter.writerow([output])
return outputs
def prodError(self, errors):
error = (errors**2)
return error
def plotError(self, historial):
epoch = list(range(len(historial)))
plt.plot(epoch, historial)
plt.show()
def plot(self, w, x, des):
m = (-w[1]/w[2])
b = -(w[0]/w[2])
y_0 = (m*0)+(b)
y_1 = (m*1)+(b)
x1, y1 = [0, 1], [y_0, y_1]
plt.plot(x1, y1)
for i, d in zip(x, des):
if d > 0:
plt.plot(i[1], i[2], 'bo')
else:
plt.plot(i[1], i[2], 'ro')
plt.draw()
plt.pause(0.5)
plt.clf()
def step(self, y):
return 1/(1 + np.exp(-y))
def train(self, result):
i = 0
errors = 0
for pattern, res in zip(self._x, result):
y = self.step(res)
error = self._d[i] - y
i += 1
if(error != 0):
self._w += self._heta * error * pattern * y * (1-y)
errors += 1
return errors
def perceptron(self):
historial = []
for epoch in range(self._max_epoch):
print("Número de epoca: ", epoch)
result = np.dot(self._x, self._w)
print("\t", result)
errors = self.train(result)
prod = self.prodError(errors)
historial.append(prod)
self.plot(self._w, self._x, self._d)
print("\t Errores: ", errors)
print("------------------")
if(errors == 0):
self._trained = True
if(self._trained):
input()
break
plt.close()
self.plotError(historial)
outputs = self.outputs()
print("Outputs: ", outputs)
| 23.287037 | 72 | 0.468787 |
d577eae77a7946bbe6867c2d394702ff351c0c99 | 3,090 | py | Python | test/code/transformers/test_meta_features_extractor.py | IooHooI/RECOMMENDATION_SYSTEMS_2 | 0ca2f17a9b268d094cbae8b13beb2b97696eff3e | [
"Unlicense"
] | null | null | null | test/code/transformers/test_meta_features_extractor.py | IooHooI/RECOMMENDATION_SYSTEMS_2 | 0ca2f17a9b268d094cbae8b13beb2b97696eff3e | [
"Unlicense"
] | 8 | 2020-01-28T22:30:25.000Z | 2022-02-10T00:01:53.000Z | test/code/transformers/test_meta_features_extractor.py | IooHooI/RECOMMENDATION_SYSTEMS_2 | 0ca2f17a9b268d094cbae8b13beb2b97696eff3e | [
"Unlicense"
] | null | null | null | import os
import unittest
import numpy as np
import pandas as pd
import tensorflow as tf
from tffm import TFFMClassifier
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.model_selection import train_test_split
from source.code.transformers.metafeaturesextractor import MetaFeaturesExtractor
data_directory = '../../../data/datasets/'
train = pd.read_csv(os.path.join(data_directory, 'train.csv'), engine='python')
songs = pd.read_csv(os.path.join(data_directory, 'songs.csv'))
members = pd.read_csv(os.path.join(data_directory, 'members.csv'))
members.registration_init_time = pd.to_datetime(members.registration_init_time, format='%Y%m%d')
members.expiration_date = pd.to_datetime(members.expiration_date, format='%Y%m%d')
X, y = train[train.columns[:-1]], train[train.columns[-1]]
class TestPipeline(unittest.TestCase):
def test_case_1(self):
categorical_features = [
'source_system_tab',
'source_screen_name',
'city',
'gender'
]
categorical_features_lang = [
'language'
]
numerical_features = [
'bd',
'song_length',
'days_registered'
]
num_features_pipeline = Pipeline([
('impute', SimpleImputer(missing_values=np.nan, strategy='mean')),
('discretize', KBinsDiscretizer(n_bins=4, encode='onehot-dense'))
])
cat_features_pipeline = Pipeline([
('impute', SimpleImputer(missing_values=np.nan, strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False))
])
cat_features_pipeline_lang = Pipeline([
('impute', SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=-1)),
('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False))
])
preprocessor = ColumnTransformer(transformers=[
('num', num_features_pipeline, numerical_features),
('cat', cat_features_pipeline, categorical_features),
('cat_lang', cat_features_pipeline_lang, categorical_features_lang)
])
unified_pipeline = Pipeline(steps=[
('add_meta_info', MetaFeaturesExtractor(user_meta=members, item_meta=songs)),
('preprocessing', preprocessor)
])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.90, random_state=42, stratify=y)
X_train = unified_pipeline.fit_transform(X_train, y_train)
self.assertTrue(len(X_train) > 0)
model = TFFMClassifier(
order=6,
rank=10,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
n_epochs=100,
batch_size=-1,
init_std=0.001,
input_type='dense'
)
model.fit(X_train, y_train.values, show_progress=True)
| 33.225806 | 110 | 0.660518 |
df8e08dd7dccf4b60330ebbc106426606d2a99a1 | 4,252 | py | Python | pysap/plugins/astro/deconvolve/deconvolve.py | SylvainLan/pysap | 38c0d43f61adb952c3768d33f1ca41c5769fef2b | [
"CECILL-B"
] | null | null | null | pysap/plugins/astro/deconvolve/deconvolve.py | SylvainLan/pysap | 38c0d43f61adb952c3768d33f1ca41c5769fef2b | [
"CECILL-B"
] | null | null | null | pysap/plugins/astro/deconvolve/deconvolve.py | SylvainLan/pysap | 38c0d43f61adb952c3768d33f1ca41c5769fef2b | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
##########################################################################
# pySAP - Copyright (C) CEA, 2017 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
CONDA-VU Galaxy Image Deconvolution
"""
# System import
from __future__ import print_function
from builtins import range, zip
import pysap
from pysap.numerics.linear import WaveletConvolve2
from pysap.plugins.astro.deconvolve.wavelet_filters import get_cospy_filters
from pysap.utils import condatvu_logo
# Third party import
import numpy as np
from modopt.base.np_adjust import rotate
from modopt.opt.algorithms import Condat
from modopt.opt.cost import costObj
from modopt.opt.gradient import GradBasic
from modopt.opt.proximity import Positivity, SparseThreshold
from modopt.opt.reweight import cwbReweight
from modopt.math.convolve import convolve
from modopt.math.stats import sigma_mad
from modopt.signal.wavelet import filter_convolve
def psf_convolve(data, psf, psf_rot=False):
"""PSF Convolution
Parameters
----------
data : np.ndarray
Input data, 2D image
psf : np.ndarray
Input PSF, 2D image
psf_rot : bool, optional
Option to rotate the input PSF (default is False)
Returns
-------
np.ndarray convolved image
"""
if psf_rot:
psf = rotate(psf)
return convolve(data, psf)
def get_weights(data, psf, filters, wave_thresh_factor=np.array([3, 3, 4])):
"""Get Sparsity Weights
Parameters
----------
data : np.ndarray
Input data, 2D image
psf : np.ndarray
Input PSF, 2D image
filters : np.ndarray
Wavelet filters
wave_thresh_factor : np.ndarray, optional
Threshold factors for each wavelet scale (default is
np.array([3, 3, 4]))
Returns
-------
np.ndarray weights
"""
noise_est = sigma_mad(data)
filter_conv = filter_convolve(np.rot90(psf, 2), filters)
filter_norm = np.array([np.linalg.norm(a) * b * np.ones(data.shape)
for a, b in zip(filter_conv, wave_thresh_factor)])
return noise_est * filter_norm
def sparse_deconv_condatvu(data, psf, n_iter=300, n_reweights=1):
"""Sparse Deconvolution with Condat-Vu
Parameters
----------
data : np.ndarray
Input data, 2D image
psf : np.ndarray
Input PSF, 2D image
n_iter : int, optional
Maximum number of iterations
n_reweights : int, optional
Number of reweightings
Returns
-------
np.ndarray deconvolved image
"""
# Print the algorithm set-up
print(condatvu_logo())
# Define the wavelet filters
filters = (get_cospy_filters(data.shape,
transform_name='LinearWaveletTransformATrousAlgorithm'))
# Set the reweighting scheme
reweight = cwbReweight(get_weights(data, psf, filters))
# Set the initial variable values
primal = np.ones(data.shape)
dual = np.ones(filters.shape)
# Set the gradient operators
grad_op = GradBasic(data, lambda x: psf_convolve(x, psf),
lambda x: psf_convolve(x, psf, psf_rot=True))
# Set the linear operator
linear_op = WaveletConvolve2(filters)
# Set the proximity operators
prox_op = Positivity()
prox_dual_op = SparseThreshold(linear_op, reweight.weights)
# Set the cost function
cost_op = costObj([grad_op, prox_op, prox_dual_op], tolerance=1e-6,
cost_interval=1, plot_output=True, verbose=False)
# Set the optimisation algorithm
alg = Condat(primal, dual, grad_op, prox_op, prox_dual_op, linear_op,
cost_op, rho=0.8, sigma=0.5, tau=0.5, auto_iterate=False)
# Run the algorithm
alg.iterate(max_iter=n_iter)
# Implement reweigting
for rw_num in range(n_reweights):
print(' - Reweighting: {}'.format(rw_num + 1))
reweight.reweight(linear_op.op(alg.x_final))
alg.iterate(max_iter=n_iter)
# Return the final result
return alg.x_final
| 27.61039 | 78 | 0.650517 |
ba2ad3d282fcc080abe7e565b8cf2631ce6fdad6 | 1,339 | py | Python | perfkitbenchmarker/linux_packages/amdblis.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | 1 | 2018-08-28T19:33:21.000Z | 2018-08-28T19:33:21.000Z | perfkitbenchmarker/linux_packages/amdblis.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/amdblis.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing AMD BLIS installation and cleanup functions."""
from perfkitbenchmarker.linux_packages import INSTALL_DIR
AMDBLIS_DIR = '%s/amdblis' % INSTALL_DIR
GIT_REPO = 'https://github.com/amd/blis'
GIT_TAG = '1.3'
def _Install(vm):
"""Installs the AMD BLIS package on the VM."""
vm.Install('build_tools')
vm.Install('fortran')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, AMDBLIS_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(AMDBLIS_DIR, GIT_TAG))
vm.RemoteCommand(
'cd {0} && ./configure --enable-cblas zen'.format(AMDBLIS_DIR))
vm.RemoteCommand('cd {0} && make -j'.format(AMDBLIS_DIR))
def Install(vm):
"""Installs the AMD BLIS package on the VM."""
_Install(vm)
| 36.189189 | 77 | 0.73413 |
f7f316e5dd64a7d086b94a7eb910318f5913d9c2 | 7,460 | py | Python | python/oneflow/compatible/single_client/test/ops/test_hardsigmoid.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 3,285 | 2020-07-31T05:51:22.000Z | 2022-03-31T15:20:16.000Z | python/oneflow/compatible/single_client/test/ops/test_hardsigmoid.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 2,417 | 2020-07-31T06:28:58.000Z | 2022-03-31T23:04:14.000Z | python/oneflow/compatible/single_client/test/ops/test_hardsigmoid.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 520 | 2020-07-31T05:52:42.000Z | 2022-03-29T02:38:11.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import unittest
from collections import OrderedDict
from typing import Dict
import numpy as np
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
def _compare_hardsigmoid_with_np(
input_shape, device_type, value_type, machine_ids, device_counts
):
if value_type[1] == flow.float16:
input_1 = np.random.uniform(-3.5, 3.5, size=input_shape).astype(np.float16)
input_1 += np.random.randn(*input_shape).astype(np.float16)
input_1 = np.array(input_1, dtype=value_type[0])
else:
input_1 = np.random.uniform(-3.5, 3.5, size=input_shape).astype(value_type[0])
input_1 += np.random.randn(*input_shape).astype(value_type[0])
assert device_type in ["cpu", "gpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
if value_type[1] == flow.float16:
func_config.default_data_type(flow.float32)
else:
func_config.default_data_type(value_type[1])
def np_hardsigmoid(input):
input_shape = input.shape
input = input.flatten()
elem_cnt = input.size
_zero = np.zeros_like(input)
for i in range(elem_cnt):
if input[i] >= 3:
_zero[i] = 1
elif input[i] <= -3:
_zero[i] = 0
else:
_zero[i] = input[i] / 6 + 0.5
np_hsigmoid_out = np.reshape(_zero, newshape=input_shape)
return np.array(np_hsigmoid_out).astype(value_type[0])
np_out_hardsigmoid = np_hardsigmoid(input_1)
def np_diff(input):
input_shape = input.shape
input = input.flatten()
elem_cnt = input.size
diff = np.zeros(shape=(elem_cnt,), dtype=value_type[0])
for i in range(elem_cnt):
if input[i] > -3 and input[i] < 3:
diff[i] = 1 / 6
diff = np.reshape(diff, newshape=input_shape)
return diff
_np_grad = np_diff(input_1)
def assert_prediction_grad(blob: tp.Numpy):
if value_type[1] == flow.float16:
assert np.allclose(blob, _np_grad, atol=0.001)
else:
assert np.allclose(blob, _np_grad, atol=1e-05)
if value_type[1] == flow.float16:
@flow.global_function(type="train", function_config=func_config)
def oneflow_hardsigmoid(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=flow.float32)
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=flow.float32,
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
x_f16 = flow.cast(x_var, flow.float16)
of_hardsigmoid_out_f16 = flow.nn.hardsigmoid(x_f16)
of_hardsigmoid_out_f32 = flow.cast(of_hardsigmoid_out_f16, flow.float32)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(of_hardsigmoid_out_f32)
flow.watch_diff(x_var, assert_prediction_grad)
return of_hardsigmoid_out_f32
else:
@flow.global_function(type="train", function_config=func_config)
def oneflow_hardsigmoid(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=value_type[1])
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=value_type[1],
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
flow.watch_diff(x_var, assert_prediction_grad)
of_hardsigmoid_out = flow.nn.hardsigmoid(x_var)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(of_hardsigmoid_out)
return of_hardsigmoid_out
of_out_hardsigmoid = oneflow_hardsigmoid(input_1)
if value_type[1] == flow.float16:
assert np.allclose(of_out_hardsigmoid, np_out_hardsigmoid, atol=0.01)
else:
assert np.allclose(of_out_hardsigmoid, np_out_hardsigmoid, atol=1e-05)
def _gen_arg_dict(shape, device_type, value_type, machine_ids, device_counts):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["device_type"] = [device_type]
if value_type == "float" and device_type == "cpu":
arg_dict["value_type"] = [
(np.float32, flow.float32),
(np.float64, flow.float64),
]
else:
arg_dict["value_type"] = [
(np.float32, flow.float16),
(np.float32, flow.float32),
(np.float64, flow.float64),
]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testhardsigmoid1n1d(flow.unittest.TestCase):
def test_hardsigmoid_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16),
device_type="cpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_hardsigmoid_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_hardsigmoid_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(16, 16),
device_type="gpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_hardsigmoid_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testhardsigmoid1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_hardsigmoid_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(4, 8, 16),
device_type="gpu",
value_type="float",
machine_ids="0:0-1",
device_counts=2,
)
for arg in GenArgList(arg_dict):
_compare_hardsigmoid_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| 36.213592 | 87 | 0.627882 |
d5cc193a29594f13667d2dd24b2381c01c72962a | 870 | py | Python | newdle/providers/free_busy/random.py | Biboba/newdle | 89ea84ebf3259ccc8670c72b718b68a0fc1ccd66 | [
"MIT"
] | null | null | null | newdle/providers/free_busy/random.py | Biboba/newdle | 89ea84ebf3259ccc8670c72b718b68a0fc1ccd66 | [
"MIT"
] | null | null | null | newdle/providers/free_busy/random.py | Biboba/newdle | 89ea84ebf3259ccc8670c72b718b68a0fc1ccd66 | [
"MIT"
] | null | null | null | import datetime
from random import Random
def _to_tuple(t):
return (t.hour, t.minute)
def fetch_free_busy(date, tz, uid):
rnd = Random(date.isoformat() + uid)
if rnd.randint(0, 1):
start = rnd.randint(5, 21)
end = rnd.randint(start + 1, 23)
start_time = _to_tuple(datetime.time(start))
end_time = _to_tuple(datetime.time(end))
return [[start_time, end_time]]
else:
start = rnd.randint(7, 10)
end = rnd.randint(start + 1, start + 3)
start2 = rnd.randint(14, 16)
end2 = rnd.randint(start2 + 1, start2 + 5)
start_time = _to_tuple(datetime.time(start))
end_time = _to_tuple(datetime.time(end))
start_time2 = _to_tuple(datetime.time(start2))
end_time2 = _to_tuple(datetime.time(end2))
return [(start_time, end_time), (start_time2, end_time2)]
| 32.222222 | 65 | 0.624138 |
67f10cfcc02612f0e287ded417f7aeee8f8ec815 | 9,817 | py | Python | src/python/nimbusml/internal/core/linear_model/averagedperceptronbinaryclassifier.py | montehoover/NimbusML | f6be39ce9359786976429bab0ccd837e849b4ba5 | [
"MIT"
] | 134 | 2018-11-01T22:15:24.000Z | 2019-05-04T11:30:08.000Z | src/python/nimbusml/internal/core/linear_model/averagedperceptronbinaryclassifier.py | montehoover/NimbusML | f6be39ce9359786976429bab0ccd837e849b4ba5 | [
"MIT"
] | 226 | 2019-05-07T19:00:44.000Z | 2021-01-06T07:59:48.000Z | src/python/nimbusml/internal/core/linear_model/averagedperceptronbinaryclassifier.py | montehoover/NimbusML | f6be39ce9359786976429bab0ccd837e849b4ba5 | [
"MIT"
] | 43 | 2019-05-15T20:19:42.000Z | 2022-03-30T10:26:07.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
AveragedPerceptronBinaryClassifier
"""
__all__ = ["AveragedPerceptronBinaryClassifier"]
from ...core.loss.loss_factory import check_loss, create_loss
from ...entrypoints.trainers_averagedperceptronbinaryclassifier import \
trainers_averagedperceptronbinaryclassifier
from ...utils.utils import trace
from ..base_pipeline_item import BasePipelineItem, DefaultSignatureWithRoles
class AveragedPerceptronBinaryClassifier(
BasePipelineItem,
DefaultSignatureWithRoles):
"""
Machine Learning Averaged Perceptron Binary Classifier
.. remarks::
Perceptron is a classification algorithm that makes its predictions
based on a linear function. I.e., for an instance with feature values
*f0, f1,..., f_D-1*, , the prediction is given by the sign of
*sigma[0,D-1] ( w_i * f_i)*, where *w_0, w_1,...,w_D-1* are the
weights
computed by the algorithm.
Perceptron is an online algorithm, i.e., it processes the instances
in
the training set one at a time. The weights are initialized to be 0,
or
some random values. Then, for each example in the training set, the
value of *sigma[0, D-1] (w_i * f_i)* is computed. If this value has
the
same sign as the label of the current example, the weights remain the
same. If they have opposite signs, the weights vector is updated by
either subtracting or adding (if the label is negative or positive,
respectively) the feature vector of the current example, multiplied
by a
factor *0 < a <= 1*, called the learning rate. In a generalization of
this algorithm, the weights are updated by adding the feature vector
multiplied by the learning rate, and by the gradient of some loss
function (in the specific case described above, the loss is hinge-
loss,
whose gradient is 1 when it is non-zero).
In Averaged Perceptron (AKA voted-perceptron), the weight vectors are
stored, together with a weight that counts the number of iterations
it
survived (this is equivalent to storing the weight vector after every
iteration, regardless of whether it was updated or not). The
prediction
is then calculated by taking the weighted average of all the sums
*sigma[0, D-1] (w_i * f_i)* or the different weight vectors.
**Reference**
`Wikipedia entry for Perceptron
<https://en.wikipedia.org/wiki/Perceptron>`_
`Large Margin Classification Using the Perceptron Algorithm
<https://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.48.8200>`_
`Discriminative Training Methods for Hidden Markov Models
<https://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.6725>`_
:param normalize: Specifies the type of automatic normalization used:
* ``"Auto"``: if normalization is needed, it is performed
automatically. This is the default choice.
* ``"No"``: no normalization is performed.
* ``"Yes"``: normalization is performed.
* ``"Warn"``: if normalization is needed, a warning
message is displayed, but normalization is not performed.
Normalization rescales disparate data ranges to a standard scale.
Feature
scaling insures the distances between data points are proportional
and
enables various optimization methods such as gradient descent to
converge
much faster. If normalization is performed, a ``MaxMin`` normalizer
is
used. It normalizes values in an interval [a, b] where ``-1 <= a <=
0``
and ``0 <= b <= 1`` and ``b - a = 1``. This normalizer preserves
sparsity by mapping zero to zero.
:param caching: Whether trainer should cache input training data.
:param loss: The default is :py:class:`'hinge' <nimbusml.loss.Hinge>`. Other
choices are :py:class:`'exp' <nimbusml.loss.Exp>`, :py:class:`'log'
<nimbusml.loss.Log>`, and :py:class:`'smoothed_hinge'
<nimbusml.loss.SmoothedHinge>`. For more information, please see the
documentation page about losses, [Loss](xref:nimbusml.loss).
:param learning_rate: Determines the size of the step taken in the
direction of the gradient in each step of the learning process. This
determines how fast or slow the learner converges on the optimal
solution. If the step size is too big, you might overshoot the optimal
solution. If the step size is too small, training takes longer to
converge to the best solution.
:param decrease_learning_rate: Decrease learning rate.
:param l2_regularization: L2 Regularization Weight.
:param number_of_iterations: Number of iterations.
:param initial_weights_diameter: Sets the initial weights diameter that
specifies the range from which values are drawn for the initial
weights. These weights are initialized randomly from within this range.
For example, if the diameter is specified to be ``d``, then the weights
are uniformly distributed between ``-d/2`` and ``d/2``. The default
value is ``0``, which specifies that all the weights are set to zero.
:param reset_weights_after_x_examples: Number of examples after which
weights will be reset to the current average.
:param lazy_update: Instead of updating averaged weights on every example,
only update when loss is nonzero.
:param recency_gain: Extra weight given to more recent updates.
:param recency_gain_multiplicative: Whether Recency Gain is multiplicative
(vs. additive).
:param averaged: Do averaging?.
:param averaged_tolerance: The inexactness tolerance for averaging.
:param initial_weights: Initial Weights and bias, comma-separated.
:param shuffle: Whether to shuffle for each training iteration.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:func:`LogisticRegressionClassifier
<nimbusml.linear_model.LogisticRegressionClassifier>`,
`Types </nimbusml/concepts/types#column-types>`_
.. index:: models, classification, perceptron
Example:
.. literalinclude:: /../nimbusml/examples/AveragedPerceptronBinaryClassifier.py
:language: python
"""
@trace
def __init__(
self,
normalize='Auto',
caching='Auto',
loss='hinge',
learning_rate=1.0,
decrease_learning_rate=False,
l2_regularization=0.0,
number_of_iterations=1,
initial_weights_diameter=0.0,
reset_weights_after_x_examples=None,
lazy_update=True,
recency_gain=0.0,
recency_gain_multiplicative=False,
averaged=True,
averaged_tolerance=0.01,
initial_weights=None,
shuffle=True,
**params):
BasePipelineItem.__init__(
self, type='classifier', **params)
self.normalize = normalize
self.caching = caching
self.loss = loss
check_loss(
'ClassificationLossFunction',
self.__class__.__name__,
self.loss)
self.learning_rate = learning_rate
self.decrease_learning_rate = decrease_learning_rate
self.l2_regularization = l2_regularization
self.number_of_iterations = number_of_iterations
self.initial_weights_diameter = initial_weights_diameter
self.reset_weights_after_x_examples = reset_weights_after_x_examples
self.lazy_update = lazy_update
self.recency_gain = recency_gain
self.recency_gain_multiplicative = recency_gain_multiplicative
self.averaged = averaged
self.averaged_tolerance = averaged_tolerance
self.initial_weights = initial_weights
self.shuffle = shuffle
@property
def _entrypoint(self):
return trainers_averagedperceptronbinaryclassifier
@trace
def _get_node(self, **all_args):
algo_args = dict(
feature_column_name=self._getattr_role(
'feature_column_name',
all_args),
label_column_name=self._getattr_role(
'label_column_name',
all_args),
normalize_features=self.normalize,
caching=self.caching,
loss_function=create_loss(
'ClassificationLossFunction',
self.__class__.__name__,
self.loss),
learning_rate=self.learning_rate,
decrease_learning_rate=self.decrease_learning_rate,
l2_regularization=self.l2_regularization,
number_of_iterations=self.number_of_iterations,
initial_weights_diameter=self.initial_weights_diameter,
reset_weights_after_x_examples=self.reset_weights_after_x_examples,
lazy_update=self.lazy_update,
recency_gain=self.recency_gain,
recency_gain_multiplicative=self.recency_gain_multiplicative,
averaged=self.averaged,
averaged_tolerance=self.averaged_tolerance,
initial_weights=self.initial_weights,
shuffle=self.shuffle)
all_args.update(algo_args)
return self._entrypoint(**all_args)
| 41.247899 | 94 | 0.659061 |
a8848902d898ef316f5852f7f7c50056672bced7 | 153 | py | Python | example/registration/admin.py | westofpluto/django_custom_auth_user | e8dd1bbbdf943982d68a3183b4931a34b2b2c3f5 | [
"MIT"
] | null | null | null | example/registration/admin.py | westofpluto/django_custom_auth_user | e8dd1bbbdf943982d68a3183b4931a34b2b2c3f5 | [
"MIT"
] | null | null | null | example/registration/admin.py | westofpluto/django_custom_auth_user | e8dd1bbbdf943982d68a3183b4931a34b2b2c3f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8
# Core
from django.contrib import admin
# Models
from registration.models import User
# Register admins
admin.site.register(User)
| 13.909091 | 36 | 0.751634 |
9532002765597a35e465cf553d23d3535bc93a35 | 2,788 | py | Python | weGame/main.py | yang-lile/TAN-ZHE | e10ddfd74e2537b3d6840799be441c10a32c8661 | [
"MIT"
] | 1 | 2019-10-25T15:05:24.000Z | 2019-10-25T15:05:24.000Z | weGame/main.py | yang-lile/TAN-ZHE | e10ddfd74e2537b3d6840799be441c10a32c8661 | [
"MIT"
] | null | null | null | weGame/main.py | yang-lile/TAN-ZHE | e10ddfd74e2537b3d6840799be441c10a32c8661 | [
"MIT"
] | null | null | null | #
# @Time : 2019/9/23 下午2:43
# @Author : 礼lua
# @Modify Log : insert L54-L71
# @todoList : 1.use manGame.run to run the game
#
import pygame, sys
# import pygame constant
from pygame.locals import *
from globalValue import GL
def main():# 你可能没学过,但不难理解,这是一个主函数,整个程序的入口
# pygame init
# it is a start of game
pygame.init()
# define size of font
themeFont = pygame.font.Font(GL.cusFontAddress, 100)
themeName = themeFont.render("姑姑咕咕", True, GL.BLACK)
# try centered
themeRec = themeName.get_rect()
themeRec.center = (GL.scrLength/2, GL.scrWidth/4)
# define size of item
# itemFont = pygame.font.Font(cusFontAddress, 30)
from Button import Button
btItem = [0]*5 # ???我也不知道怎么办
btItem[0] = Button("src/itemImage/开始游戏up.png", "src/itemImage/开始游戏down.png", (GL.scrLength/2, 300))
btItem[1] = Button("src/itemImage/继续游戏up.png", "src/itemImage/继续游戏down.png", (GL.scrLength/2, 370))
btItem[2] = Button("src/itemImage/帮助up.png", "src/itemImage/帮助down.png", (GL.scrLength/2, 440))
btItem[3] = Button("src/itemImage/关于up.png", "src/itemImage/关于down.png", (GL.scrLength/2, 510))
btItem[4] = Button("src/itemImage/退出up.png", "src/itemImage/退出down.png", (GL.scrLength/2, 580))
# define screen background picture
bg = pygame.image.load("src/bgpicture.png")
GL.screen.blit(bg, (0,0))
# load game name to screen
GL.screen.blit(themeName, themeRec)
# load item
for i in range(0,5):
btItem[i].render()
# keep running of program
while True:
# monitor events
for event in pygame.event.get():
# quit event
if event.type in (QUIT, ):
# quit the game but I don't know why need two statement
pygame.quit()
sys.exit()
for i in range(0,5):
flag = btItem[i].render()
if flag and event.type == MOUSEBUTTONDOWN:
if i == 0:
from mainGame import run
run()
elif i == 1:
print("读档中。。。")
print("继续游戏~")
elif i == 2:
print("帮助~")
print("反正也没人看/")
elif i == 3:
print("开发人员:")
print("孙Sir")
print("张Sir")
print("杨头子")
elif i == 4:
pygame.quit()
sys.exit()
else:
print("Bug!!!")
pygame.display.update()
if __name__ == "__main__":
# 这是判断是否当前代码为主函数,
# 即pyhton编译的程序
# 若不是,则文件内的代码不会执行
main()
| 34 | 103 | 0.518293 |
09ec2cbdcdf1fa2940228bc0318401fad4df2e98 | 11,450 | py | Python | src/python/fig_plot/fig_plot.py | Magic-wei/easy_plot | f5bbc9c7a4793d05ead951540c891a3ff729c5cc | [
"MIT"
] | 5 | 2018-12-26T14:55:36.000Z | 2021-06-28T00:56:33.000Z | src/python/fig_plot/fig_plot.py | Magic-wei/easy_plot | f5bbc9c7a4793d05ead951540c891a3ff729c5cc | [
"MIT"
] | 1 | 2019-02-13T02:24:47.000Z | 2019-03-23T16:26:32.000Z | src/python/fig_plot/fig_plot.py | Magic-wei/easy_plot | f5bbc9c7a4793d05ead951540c891a3ff729c5cc | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
---------------------------------
Figure Plot
Author: Wei Wang
Date: 07/01/2019
---------------------------------
'''
import os
import glob
import pandas as pd
import numpy as np # for easy generation of arrays
import matplotlib.pyplot as plt
import matplotlib as mpl
# parameter setting
script_path = os.path.split(os.path.realpath(__file__))[0] # os.getcwd() returns the path in your terminal
(base_path, script_dir) = os.path.split(script_path)
search_path = os.path.join(script_path, '..','..','..','data', 'fig_plot')
filename = os.path.join('*', 'state.csv')
global_path_file = os.path.join(script_path,'..','..','..','data','fig_plot','global_path.txt')
# print('search_path', search_path)
# print('global_path_file', global_path_file)
save_enable = False
image_path = script_path
fig_dpi = 100; # inches = pixels / dpi
figsize_inch = list(np.array([800, 600]) / fig_dpi)
# dynamic rc settings
mpl.rcParams['font.size'] = 18
# mpl.rcParams['font.family'] = 'sans'
mpl.rcParams['font.style'] = 'normal'
mpl.rc('axes', titlesize=18, labelsize=18)
mpl.rc('lines', linewidth=2.5, markersize=5)
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
# customize color
green_lv1 = list(np.array([229, 245, 249]) / 255.0)
green_lv2 = list(np.array([153, 216, 201]) / 255.0)
green_lv3 = list(np.array([44, 162, 95]) / 255.0)
gray = list(np.array([99, 99, 99]) / 255.0)
# import data
## read global path from txt file
global_path = pd.read_csv(global_path_file, delimiter=' ', header=None)
global_path.columns = ["point_id", "position_x", "position_y"]
# print(global_path.head())
## read vehicle states from csv files
dataset = []
file_list = glob.glob(os.path.join(search_path, filename))
# print('file_list has:', file_list)
print(os.path.join(search_path, filename))
for file in file_list:
raw_data = pd.read_csv(file)
dataset.append(raw_data)
print(file, '-> dataset[%d]' %(len(dataset)))
# path_comparison
fig = plt.figure(figsize=figsize_inch, dpi=fig_dpi, frameon=False)
ax = fig.add_subplot(111)
line_global, = ax.plot(global_path['position_x'], global_path['position_y'],
linestyle='-', color=gray, linewidth=1.5, label='global path')
lines = []
lines.append(line_global)
i = 0
for data in dataset:
i += 1
line_tmp, = ax.plot(data['position_x'], data['position_y'],
linestyle='--', linewidth=1.5, label='vehicle path # %d'%(i))
lines.append(line_tmp)
plt.axis('equal')
ax.set_adjustable('box')
# range and ticks setting
x_min = np.min(global_path["position_x"])
x_max = np.max(global_path["position_x"])
y_min = np.min(global_path["position_y"])
y_max = np.max(global_path["position_y"])
rx = x_max - x_min
ry = y_max - y_min
scale = 10
resolution = 10
x_min_scale = np.round((x_min - rx/scale)/resolution) * resolution
x_max_scale = np.round((x_max + rx/scale)/resolution) * resolution
y_min_scale = np.round((y_min - ry/scale)/resolution) * resolution
y_max_scale = np.round((y_max + ry/scale)/resolution) * resolution
# ax.set_xlim(x_min_scale, x_max_scale)
# ax.set_ylim(y_min_scale, y_max_scale)
ax.axis([x_min_scale, x_max_scale, y_min_scale, y_max_scale])
plt.xticks(np.linspace(x_min_scale, x_max_scale, 5))
plt.yticks(np.linspace(y_min_scale, y_max_scale, 5))
## legend setting
plt.xlabel('x')
plt.ylabel('y')
# plt.title('path comparison')
plt.legend(handles=lines, loc='best', frameon=False, ncol=1)
## grid
plt.grid(True, color=gray, alpha=0.3)
## save
if save_enable:
save_name = 'path_comparison'
# plt.savefig(os.path.join(script_path, '%s.svg'%(save_name)))
plt.savefig(os.path.join(script_path, '%s.png'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.eps'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.pdf'%(save_name)))
# steering angle
i = 0
for data in dataset:
i += 1
time_cur = data['time_cur']
des_steering_angle = data['des_steering_angle']
cur_steering_angle = data['cur_steering_angle']
if max(des_steering_angle) < 0.6:
des_steering_angle = des_steering_angle * 180 / np.pi;
cur_steering_angle = cur_steering_angle * 180 / np.pi;
fig = plt.figure(figsize=figsize_inch, dpi=fig_dpi, frameon=False)
ax = fig.add_subplot(111)
line_des, = ax.plot(time_cur, des_steering_angle,
linestyle='-', color=green_lv2, label='desired steering angle')
line_cur, = ax.plot(time_cur, cur_steering_angle,
linestyle='--', color=green_lv3, label='actual steering angle')
lines = [line_des, line_cur]
# range and ticks setting
x_min = np.min(time_cur)
x_max = np.max(time_cur)
y_min = np.min([np.min(des_steering_angle), np.min(cur_steering_angle)])
y_max = np.max([np.max(des_steering_angle), np.max(cur_steering_angle)])
rx = x_max - x_min
ry = y_max - y_min
scale = 10
resolution = 1
x_min_scale = x_min
x_max_scale = np.round((x_max + rx/scale)/resolution) * resolution
y_min_scale = np.round((y_min - ry/scale)/resolution) * resolution
y_max_scale = np.round((y_max + ry/scale)/resolution) * resolution
ax.axis([x_min_scale, x_max_scale, y_min_scale, y_max_scale])
plt.xticks(np.linspace(x_min_scale, x_max_scale, 5))
plt.yticks(np.linspace(y_min_scale, y_max_scale, 5))
## legend setting
plt.xlabel('time (s)')
plt.ylabel('sterring angle (deg)')
# plt.title('steering angle')
plt.legend(loc='best', frameon=False, ncol=1)
## grid
plt.grid(True, color=gray, alpha=0.3)
## save
if save_enable:
save_name = 'steering_angle_%d'%(i)
# plt.savefig(os.path.join(script_path, '%s.svg'%(save_name)))
plt.savefig(os.path.join(script_path, '%s.png'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.eps'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.pdf'%(save_name)))
# tracking error
i = 0
for data in dataset:
i += 1
time_cur = data['time_cur']
lateral_error = data['lateral_error']
heading_error = data['heading_error']
fig = plt.figure(figsize=figsize_inch, dpi=fig_dpi, frameon=False)
ax = fig.add_subplot(111)
line_1, = ax.plot(time_cur, lateral_error,
linestyle='-', color=green_lv2, label='lateral error')
# range and ticks setting
y_min = np.min(lateral_error)
y_max = np.max(lateral_error)
ry = y_max - y_min
scale = 10
resolution = 1
y_min_scale = np.round((y_min - ry/scale)/resolution) * resolution
y_max_scale = np.round((y_max + ry/scale)/resolution) * resolution
ax.set_ylim([y_min_scale, y_max_scale])
plt.yticks(np.linspace(y_min_scale, y_max_scale, 5))
ax2 = ax.twinx()
line_2, = ax2.plot(time_cur, heading_error,
linestyle='--', color=green_lv3, label='heading error')
# range and ticks setting
x_min = np.min(time_cur)
x_max = np.max(time_cur)
y_min = np.min(heading_error)
y_max = np.max(heading_error)
rx = x_max - x_min
ry = y_max - y_min
scale = 10
resolution = 1
x_min_scale = x_min
x_max_scale = np.round((x_max + rx/scale)/resolution) * resolution
y_min_scale = np.round((y_min - ry/scale)/resolution) * resolution
y_max_scale = np.round((y_max + ry/scale)/resolution) * resolution
ax.set_xlim([x_min_scale, x_max_scale])
ax2.set_ylim([y_min_scale, y_max_scale])
plt.xticks(np.linspace(x_min_scale, x_max_scale, 5))
plt.yticks(np.linspace(y_min_scale, y_max_scale, 5))
## legend setting
ax.set_ylabel('lateral error (m)')
ax2.set_ylabel('heading error (deg)')
ax.set_xlabel('time (s)')
# plt.title('tracking error')
ax.legend(loc=1, frameon=False, ncol=1)
ax2.legend(loc=2, frameon=False, ncol=1)
## save
if save_enable:
save_name = 'tracking_error_%d'%(i)
# plt.savefig(os.path.join(script_path, '%s.svg'%(save_name)))
plt.savefig(os.path.join(script_path, '%s.png'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.eps'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.pdf'%(save_name)))
# velocity
i = 0
for data in dataset:
i += 1
time_cur = data['time_cur']
linear_v_x = data['linear_v_x']
linear_v_y = data['linear_v_y']
time_cost = data['time_cost']
fig = plt.figure(figsize=figsize_inch, dpi=fig_dpi, frameon=False)
ax = fig.add_subplot(111)
line_des, = ax.plot(time_cur, linear_v_x,
linestyle='-', color=green_lv2, label='longitudinal velocity')
line_cur, = ax.plot(time_cur, linear_v_y,
linestyle='--', color=green_lv3, label='lateral velocity')
lines = [line_des, line_cur]
# range and ticks setting
x_min = np.min(time_cur)
x_max = np.max(time_cur)
y_min = 0.
y_max = np.max([np.max(linear_v_x), np.max(linear_v_y)])
rx = x_max - x_min
ry = y_max - y_min
scale = 10
resolution = 0.5
x_min_scale = x_min
x_max_scale = np.round((x_max + rx/scale)/resolution) * resolution
y_min_scale = np.round((y_min)/resolution) * resolution
y_max_scale = np.round((y_max)/resolution) * resolution
ax.axis([x_min_scale, x_max_scale, y_min_scale, y_max_scale])
plt.xticks(np.linspace(x_min_scale, x_max_scale, 5))
plt.yticks(np.linspace(y_min_scale, y_max_scale, 5))
## legend setting
plt.xlabel('time (s)')
plt.ylabel('velocity (m/s)')
# plt.title('velocity')
plt.legend(loc='best', frameon=False, ncol=1)
## grid
plt.grid(True, color=gray, alpha=0.3)
## save
if save_enable:
save_name = 'velocity_%d'%(i)
# plt.savefig(os.path.join(script_path, '%s.svg'%(save_name)))
plt.savefig(os.path.join(script_path, '%s.png'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.eps'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.pdf'%(save_name)))
# acceleration
i = 0
for data in dataset:
i += 1
time_cur = data['time_cur']
linear_acc_y = data['linear_acc_y']
fig = plt.figure(figsize=figsize_inch, dpi=fig_dpi, frameon=False)
ax = fig.add_subplot(111)
line_acc, = ax.plot(time_cur, linear_acc_y,
linestyle='-', color=green_lv2, label='lateral acceleration')
# range and ticks setting
x_min = np.min(time_cur)
x_max = np.max(time_cur)
y_min = np.min(linear_acc_y)
y_max = np.max(linear_acc_y)
rx = x_max - x_min
ry = y_max - y_min
scale = 10
resolution = 1
x_min_scale = x_min
x_max_scale = np.round((x_max + rx/scale)/resolution) * resolution
y_min_scale = np.round((y_min - ry/scale)/resolution) * resolution
y_max_scale = np.round((y_max + ry/scale)/resolution) * resolution
ax.axis([x_min_scale, x_max_scale, y_min_scale, y_max_scale])
plt.xticks(np.linspace(x_min_scale, x_max_scale, 5))
plt.yticks(np.linspace(y_min_scale, y_max_scale, 5))
## legend setting
plt.xlabel('time (s)')
plt.ylabel('acceleration (% m/s^2 %)')
# plt.title('acceleration')
plt.legend(loc='best', frameon=False, ncol=1)
## grid
plt.grid(True, color=gray, alpha=0.3)
## save
if save_enable:
save_name = 'acceleration_%d'%(i)
# plt.savefig(os.path.join(script_path, '%s.svg'%(save_name)))
plt.savefig(os.path.join(script_path, '%s.png'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.eps'%(save_name)))
# plt.savefig(os.path.join(script_path, '%s.pdf'%(save_name)))
plt.show() | 35.122699 | 106 | 0.667686 |
b503c6cff5f85e73bad09f5a0316f160dbfdfc3c | 1,013 | py | Python | Copyright_contract.py | JunbeomGwak/final_project | 1a90fd4254de22907387c1e45cf2caaa9e0fc907 | [
"MIT"
] | null | null | null | Copyright_contract.py | JunbeomGwak/final_project | 1a90fd4254de22907387c1e45cf2caaa9e0fc907 | [
"MIT"
] | null | null | null | Copyright_contract.py | JunbeomGwak/final_project | 1a90fd4254de22907387c1e45cf2caaa9e0fc907 | [
"MIT"
] | null | null | null | import solcx
from logzero import logger
from web3 import Web3
from solcx import compile_files
w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:7545'))
w3.eth.defaultAccount = w3.eth.accounts[0]
print('install solcx 0.8.0..')
solcx.install_solc('0.8.0')
def deploy(contract_file, contract_name):
compiled_sol = compile_files([contract_file])
interface = compiled_sol['{}:{}'.format(contract_file, contract_name)]
contract = w3.eth.contract(abi = interface['abi'],
bytecode = '0x'+interface['bin'],
bytecode_runtime=interface['bin-runtime'])
tx_hash = contract.constructor().transact()
logger.info(f'tx_hash: {tx_hash}')
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
logger.info(f'Copyright_tx_receipt: {tx_receipt}')
contract_address = tx_receipt['contractAddress']
logger.info(f'Copyright_contract_address: {contract_address}')
contract_instance = contract(contract_address)
logger.info(f'Copyright_contract_instance: {contract_instance}')
return contract_instance
| 30.69697 | 71 | 0.754195 |
e2f6b4be25549e24bfeffb3b68e5eeff017d052c | 1,425 | py | Python | manage.py | lawja/FavColor | 6c8f1a7af498021143cc7a8ac7f66dd6a5cda179 | [
"Apache-2.0"
] | null | null | null | manage.py | lawja/FavColor | 6c8f1a7af498021143cc7a8ac7f66dd6a5cda179 | [
"Apache-2.0"
] | null | null | null | manage.py | lawja/FavColor | 6c8f1a7af498021143cc7a8ac7f66dd6a5cda179 | [
"Apache-2.0"
] | null | null | null | '''
manage.py
Server start module
'''
import os
from app import create_app
from flask_script import Manager, Shell, Command, Option
from pymongo import MongoClient
from flask_moment import Moment
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
app.debug = True
moment = Moment(app)
manager = Manager(app)
# for mongodb use
mongo_url = ('mongodb://%s:%s@ds028310.mlab.com'
':28310/tudev_checkout' % (app.config['DB_USER'],
app.config['DB_PASS']))
client = MongoClient(mongo_url, connect=False)
def make_shell_context():
return dict(app=app)
@manager.option('-h', '--host', dest='host', default='0.0.0.0')
@manager.option('-p', '--port', dest='port', type=int, default=1337)
@manager.option('-w', '--workers', dest='workers', type=int, default=4)
def gunicorn(host, port, workers):
"""Start the Server with Gunicorn"""
from gunicorn.app.base import Application
class FlaskApplication(Application):
def init(self, parser, opts, args):
return {
'bind': '{0}:{1}'.format(host, port),
'workers': workers,
'timeout': 120
}
def load(self):
return app
application = FlaskApplication()
return application.run()
manager.add_command("shell", Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
| 25 | 71 | 0.626667 |
e27ed7d30713e126ed69749eba4c40dcd148b170 | 2,795 | py | Python | hummingbot/core/utils/kill_switch.py | cardosofede/hummingbot | d1df085bb879a06a7dc77d4fdc8ff6f13d8726ca | [
"Apache-2.0"
] | 542 | 2021-12-17T22:34:31.000Z | 2022-03-31T14:36:23.000Z | hummingbot/core/utils/kill_switch.py | cardosofede/hummingbot | d1df085bb879a06a7dc77d4fdc8ff6f13d8726ca | [
"Apache-2.0"
] | 291 | 2021-12-17T20:07:53.000Z | 2022-03-31T11:07:23.000Z | hummingbot/core/utils/kill_switch.py | cardosofede/hummingbot | d1df085bb879a06a7dc77d4fdc8ff6f13d8726ca | [
"Apache-2.0"
] | 220 | 2021-12-17T12:41:23.000Z | 2022-03-31T23:03:22.000Z | import asyncio
import logging
from decimal import Decimal
from typing import Optional
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.logger import HummingbotLogger
from hummingbot.core.utils.async_utils import safe_ensure_future
class KillSwitch:
ks_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls.ks_logger is None:
cls.ks_logger = logging.getLogger(__name__)
return cls.ks_logger
def __init__(self,
hummingbot_application: "HummingbotApplication"): # noqa F821
self._hummingbot_application = hummingbot_application
self._kill_switch_enabled: bool = global_config_map.get("kill_switch_enabled").value
self._kill_switch_rate: Decimal = Decimal(global_config_map.get("kill_switch_rate").value or "0.0") / \
Decimal(100)
self._started = False
self._update_interval = 10.0
self._check_profitability_task: Optional[asyncio.Task] = None
self._profitability: Optional[Decimal] = None
async def check_profitability_loop(self):
while True:
try:
if self._kill_switch_enabled:
self._profitability: Decimal = await self._hummingbot_application.calculate_profitability()
# Stop the bot if losing too much money, or if gained a certain amount of profit
if (self._profitability <= self._kill_switch_rate < Decimal("0.0")) or \
(self._profitability >= self._kill_switch_rate > Decimal("0.0")):
self.logger().info("Kill switch threshold reached. Stopping the bot...")
self._hummingbot_application.notify(f"\n[Kill switch triggered]\n"
f"Current profitability "
f"is {self._profitability}. Stopping the bot...")
self._hummingbot_application.stop()
break
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(f"Error calculating profitability: {e}", exc_info=True)
await asyncio.sleep(self._update_interval)
def start(self):
safe_ensure_future(self.start_loop())
async def start_loop(self):
self.stop()
self._check_profitability_task = safe_ensure_future(self.check_profitability_loop())
self._started = True
def stop(self):
if self._check_profitability_task and not self._check_profitability_task.done():
self._check_profitability_task.cancel()
self._started = False
| 42.348485 | 111 | 0.633631 |
a4f7b80b67b3fb64ea8e2f771442caac0adaf301 | 1,166 | py | Python | flag_create.py | mitre-cyber-academy/2016-Binary-200 | 73722ddde921bffec0b70a42a1db142c223f81a2 | [
"Apache-2.0"
] | null | null | null | flag_create.py | mitre-cyber-academy/2016-Binary-200 | 73722ddde921bffec0b70a42a1db142c223f81a2 | [
"Apache-2.0"
] | null | null | null | flag_create.py | mitre-cyber-academy/2016-Binary-200 | 73722ddde921bffec0b70a42a1db142c223f81a2 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
#----------------------------------------
# Script to convert flags into
# rustled string for dank_asm challenge
# for the MITRE CTF 2016
#
# Written by: kierk
# Contact at: apellitieri@mitre.org
#----------------------------------------
#----------------------------------------
# For usage example try:
# ezpZ_f1Ag
# result: g~wQOp,dI
#
# NOTE: Not all flags work as some will
# produce unprintable bytes
# NOTE2: If the flag is != 9 chars you
# need to edit the MIPS addi -10
# instruction to be negative len+1
#----------------------------------------
from __future__ import print_function
# Get desired flag
ctf_flag = raw_input('What flag would you like to rustle: ')
orig_len = len(ctf_flag)
# Convert flag to 'rustled' format
# Written to mimic the MIPS code
t1 = 1
t2 = 1
t3 = 0
index = 0
rustled_array = []
# I need to adjust MIPS code if we want a flag other than lenth 9
while(index < orig_len):
t2 = t2 + t1
t1 = t1 + 1
rustled_array.append(t2 ^ ord(ctf_flag[index]))
index += 1
# Print resulting rustled flag
print("Rustled Flag: ", end="")
for i in rustled_array:
print(chr(i), end="")
print("")
| 22.862745 | 65 | 0.592624 |
a238f66c6fc3741c20a576d3a979e79147030ee5 | 3,441 | py | Python | flankers/youtubestore.py | SpaceAppsXploration/semantic-data-chronos | f7908f50892781154d623f46303812e88fdecb57 | [
"Apache-2.0"
] | 1 | 2017-06-15T17:51:19.000Z | 2017-06-15T17:51:19.000Z | flankers/youtubestore.py | SpaceAppsXploration/semantic-data-chronos | f7908f50892781154d623f46303812e88fdecb57 | [
"Apache-2.0"
] | null | null | null | flankers/youtubestore.py | SpaceAppsXploration/semantic-data-chronos | f7908f50892781154d623f46303812e88fdecb57 | [
"Apache-2.0"
] | null | null | null | import webapp2
import urllib
import json
from apiclient.discovery import build
from optparse import OptionParser
from config.config import _DEBUG
from config.youtube_secret import _KEY, YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION
from datastore.models import WebResource
params = {
'part': 'id, snippet',
'q': 'space+exploration+documentary',
'publishedAfter': '2005-01-01T00:00:00Z',
'publishedBefore': '2005-01-01T00:00:00Z',
'order': 'relevance'
}
### Advanced operators: https://support.google.com/youtube/answer/2685977?hl=en
# 'q': '##"some tag"' (double # operator)
# tags: space, science, space race, space exploration, astrophysics, astronomy, quantum physics
class YoutubeStore(webapp2.RequestHandler):
"""
A very basic fetch and store for Youtube API
"""
def get(self):
def build_client():
"""
Build a Youtube Data API client using apiclient
:return: a client object
"""
youtube = build(
YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=_KEY
)
return youtube
def fetch_data(client, q):
"""
Create and execute request to service
:param client: a client object
:param q: a query string
:return: dict() with response
"""
data = client.search().list(
q=q,
part=params['part'],
maxResults=10,
publishedAfter='2015-10-10T00:00:00Z'
).execute()
return data
def store_video(obj):
"""
Store video in the model
:param obj: dict() of a video object
:return: None
"""
WebResource.store_youtube_video(obj)
def store_response(resp):
"""
Loop the items in the response
:param resp: the response dict()
:return: None
"""
print isinstance(resp['items'], list)
for video in resp['items']:
store_video(video)
client = build_client()
response = fetch_data(client, params['q'])
#print response
store_response(response)
# note: pageToken = response.nextPageToken
return None
'''
{
"kind": "youtube#searchResult",
"etag": "\"oyKLwABI4napfYXnGO8jtXfIsfc/MK_9fr0vi4jxlpvMPSqUZ7cnXXE\"",
"id": {
"kind": "youtube#video",
"videoId": "ZtaKWt26dNs"
},
"snippet": {
"publishedAt": "2015-02-20T19:14:29.000Z",
"channelId": "UCIR_LPmEQ9QHR0yB2lxgaxQ",
"title": "Space Exploration - \"Our Universe\" (Episode 01) [2015 Documentary]",
"description": "On this episode of Space Exploration - \"Our Universe\" [2015 Documentary], ongoing journey to discovery celestial structures in outer space.Extreme space ...",
"thumbnails": {
"default": {
"url": "https://i.ytimg.com/vi/ZtaKWt26dNs/default.jpg"
},
"medium": {
"url": "https://i.ytimg.com/vi/ZtaKWt26dNs/mqdefault.jpg"
},
"high": {
"url": "https://i.ytimg.com/vi/ZtaKWt26dNs/hqdefault.jpg"
}
},
"channelTitle": "NewerDocumentaries",
"liveBroadcastContent": "none"
}
}
'''
application = webapp2.WSGIApplication([
webapp2.Route('/cron/storeyoutube', YoutubeStore),
], debug=_DEBUG)
| 28.915966 | 180 | 0.586748 |
dc66c162effc23e9c870fcf5c66157a2da386766 | 7,672 | py | Python | env_backend.py | mizolotu/SmartMantsinen | ffb0401a3bd9449df8124c09e57025bd13d2ad82 | [
"MIT"
] | null | null | null | env_backend.py | mizolotu/SmartMantsinen | ffb0401a3bd9449df8124c09e57025bd13d2ad82 | [
"MIT"
] | null | null | null | env_backend.py | mizolotu/SmartMantsinen | ffb0401a3bd9449df8124c09e57025bd13d2ad82 | [
"MIT"
] | null | null | null | import sys, requests, os
from time import sleep
# gains and thresholds
attempts_max = 10 # maximum number of attempts to register
exec_freq = 1000 # Hz
sleep_interval = 3 # seconds
# http parameters
http_url = 'http://127.0.0.1:5000'
id_uri = 'id'
state_uri = 'post_state'
action_uri = 'get_action'
signals_uri = 'signals'
def post_id(id):
uri = '{0}/{1}'.format(http_url, id_uri)
try:
r = requests.post(uri, json={'id': id})
jdata = r.json()
config_time = jdata['t_config']
except Exception:
config_time = None
return config_time
def get_signals(id):
uri = '{0}/{1}'.format(http_url, signals_uri)
try:
r = requests.get(uri, json={'id': id})
jdata = r.json()
signals = jdata['signals']
config_time = jdata['t_config']
except:
signals = {}
config_time = None
return signals, config_time
def post_state_and_reward(id, state, reward, conditional, t):
uri = '{0}/{1}'.format(http_url, state_uri)
r_time = None
try:
r = requests.post(uri, json={'id': id, 'state': state, 'reward': reward, 'conditional': conditional, 't_state_simulation': t})
r_time = r.json()['t_state_real']
except Exception:
print(Exception)
return r_time
def get_action(id):
uri = '{0}/{1}'.format(http_url, action_uri)
try:
r = requests.get(uri, json={'id': id})
jdata = r.json()
action = jdata['action']
conditional_input = jdata['conditional']
action_submission_real_time = jdata['t_action_real']
action_duration_simulation_time = jdata['t_action_simulation']
except:
action = []
conditional_input = []
action_submission_real_time = None
action_duration_simulation_time = None
return action, conditional_input, action_submission_real_time, action_duration_simulation_time
def initScript():
# register
backend_id = os.getpid()
id_time = None
n_attempts = 0
print('Trying to register...')
while id_time is None:
id_time = post_id(backend_id)
sleep(sleep_interval)
n_attempts += 1
if n_attempts >= attempts_max:
sys.exit(1)
print('Successfully registerd with id {0}!'.format(backend_id))
GObject.data['id'] = backend_id
# init signals
GObject.data['signals'] = None
# init action variables
GObject.data['current_action'] = None
GObject.data['current_conditional'] = None
# init time moments
GObject.data['last_iteration_simulation_time'] = 0
GObject.data['last_action_real_time'] = 0
GObject.data['current_action_end_simulation_time'] = 0
GObject.data['last_state_sent_simulation_time'] = 0
# other
GObject.data['in_action'] = False
def callScript(deltaTime, simulationTime):
# check if required time passed
time_passed = False
t_now = simulationTime
t_last = GObject.data['last_iteration_simulation_time']
t_delta = t_now - t_last
if t_delta > 1.0 / exec_freq:
time_passed = True
# report state and reward if the required time passed
if time_passed:
# check if we have signals
if GObject.data['signals'] is not None:
# query an action
print('querying new action')
action, conditional_input, action_submission_real_time, action_duration_simulation_time = get_action(GObject.data['id'])
# what if this is a new action
if action_submission_real_time is not None and action_submission_real_time > GObject.data['last_action_real_time']:
# save the action
GObject.data['current_action'] = action
GObject.data['current_conditional'] = conditional_input
GObject.data['last_action_real_time'] = action_submission_real_time
GObject.data['current_action_end_simulation_time'] = simulationTime + action_duration_simulation_time
GObject.data['in_action'] = True
# report the result
state_values = [x.value() for x in GObject.data['state_objects']]
reward_values = [x.value() for x in GObject.data['reward_objects']]
conditional_values = [x.value() for x in GObject.data['conditional_objects']]
post_state_and_reward(GObject.data['id'], state_values, reward_values, conditional_values, t_now)
GObject.data['last_state_sent_simulation_time'] = simulationTime
# check if the action is real and it still can be executed
if simulationTime <= GObject.data['current_action_end_simulation_time']:
if action is not None:
print(f"executing action {GObject.data['current_action']} for {GObject.data['current_action_end_simulation_time'] - simulationTime} seconds")
# all good, execute the action
assert len(GObject.data['current_action']) == len(GObject.data['signals']['input'])
for i, input_signal in enumerate(GObject.data['signals']['input']):
GDict[input_signal].setInputValue(GObject.data['current_action'][i])
assert len(GObject.data['current_conditional']) == len(GObject.data['signals']['conditional'])
for i, input_signal in enumerate(GObject.data['signals']['conditional']):
GDict[input_signal].setInputValue(GObject.data['current_conditional'][i])
else:
# keep reporting the result
state_values = [x.value() for x in GObject.data['state_objects']]
reward_values = [x.value() for x in GObject.data['reward_objects']]
conditional_values = [x.value() for x in GObject.data['conditional_objects']]
post_state_and_reward(GObject.data['id'], state_values, reward_values, conditional_values, t_now)
GObject.data['last_state_sent_simulation_time'] = simulationTime
# get signals if they are still None
else:
signals, signals_time = get_signals(GObject.data['id'])
if signals != {}:
GObject.data['signals'] = signals
# save component names
GObject.data['state_objects'] = [
GSolver.getParameter(input_signal, 'InputValue') for input_signal in GObject.data['signals']['input']
] + [
GSolver.getParameter(output_signal, 'value') for output_signal in GObject.data['signals']['output'] + GObject.data['signals']['reward']
]
GObject.data['reward_objects'] = [
GSolver.getParameter(output_signal, 'value') for output_signal in GObject.data['signals']['reward']
]
GObject.data['conditional_objects'] = [
GSolver.getParameter(item, 'InputValue') for item in GObject.data['signals']['conditional']
]
# post the very first observation
state_values = [x.value() for x in GObject.data['state_objects']]
reward_values = [x.value() for x in GObject.data['reward_objects']]
conditional_values = [x.value() for x in GObject.data['conditional_objects']]
post_state_and_reward(GObject.data['id'], state_values, reward_values, conditional_values, t_now)
GObject.data['last_state_sent_simulation_time'] = simulationTime
# save the last iteration time
GObject.data['last_iteration_simulation_time'] = simulationTime | 37.062802 | 161 | 0.627477 |
8af9e1f9e821cbcd3e568b4fc014f99f5f907657 | 2,599 | py | Python | randydata/data_layout.py | JohannesRanderath/randydata | 4f67224019959fda0b02bf5baac7f2440c36ba1e | [
"MIT"
] | null | null | null | randydata/data_layout.py | JohannesRanderath/randydata | 4f67224019959fda0b02bf5baac7f2440c36ba1e | [
"MIT"
] | 2 | 2022-03-29T10:44:48.000Z | 2022-03-30T16:51:10.000Z | randydata/data_layout.py | JohannesRanderath/randydata | 4f67224019959fda0b02bf5baac7f2440c36ba1e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Simple tools to help format data.
"""
from IPython.display import HTML, display
def make_table(headings: [], data: [[]], columns: bool = False, row_headings: [] = None):
"""
Creates table from given headings and data.
headings should be a list of strings.
Data should be a list of tuples or arrays with length of len(headings)
If columns is given and True, data consists of tuples / arrays of columns, else it's elements are interpreted as columns.
In row mode (default) each element of data should be of len(headings), in column mode, there should be len(headings) elements of equal length in data.
Will return False if data input is bad.
:param headings: Row of column headings
:param data: list of table rows (list of lists)
:param columns: If True, data is interpreted as list of columns, instead of rows.
:param row_headings: If given, a column with given contents as headings is added as first column.
:return: No return. Table is displayed using IPython.display.
"""
if not (type(headings) in [type([]), type(())] and type(data) in [type([]), type(())] and (
not row_headings or type(row_headings) in [type([]), type(())])):
print("1")
return False
if columns:
if row_headings and not len(row_headings) == len(data[0]):
print(data[0], len(data[0]), len(row_headings))
print(2)
return False
if not len(data) == len(headings):
print(3)
return False
rows = len(data[0])
for column in data:
if not len(column) == rows:
print(4)
return False
formatted_data = []
for i in range(0, rows):
formatted_data.append([el[i] for el in data])
data = formatted_data
else:
if row_headings and not len(row_headings) == len(data):
print(5)
return False
for row in data:
if not len(row) == len(headings):
print(row)
print(6)
return False
table = "<table><tr>"
if row_headings:
table += "<th></th>"
for heading in headings:
table += "<th>" + str(heading) + "</th>"
table += "</tr>"
for i, row in enumerate(data):
table += "<tr>"
if row_headings:
table += "<td><b>" + row_headings[i] + "</td>"
for element in row:
table += "<td>" + str(element) + "</td>"
table += "</tr>"
table += "</table>"
display(HTML(table))
| 36.605634 | 154 | 0.573682 |
16e1c4d11255804d6092e5967956dc583ae585fb | 16,641 | py | Python | zero/imagenet/train_vanilla.py | kanesoban/ZeRO-Offload-experiments | 81e839b45687f6def09c6888abae60f3976663f1 | [
"Apache-2.0"
] | null | null | null | zero/imagenet/train_vanilla.py | kanesoban/ZeRO-Offload-experiments | 81e839b45687f6def09c6888abae60f3976663f1 | [
"Apache-2.0"
] | null | null | null | zero/imagenet/train_vanilla.py | kanesoban/ZeRO-Offload-experiments | 81e839b45687f6def09c6888abae60f3976663f1 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 38.610209 | 91 | 0.604231 |
cc1feef0ef012bcb321010990c3aafee7ace5eba | 3,681 | py | Python | src/build_tools/tweak_macinstaller_script.py | soleilpqd/mozc | 4767ce2f2b6a63f1f139daea6e98bc7a564d5e4e | [
"BSD-3-Clause"
] | 1 | 2017-09-01T20:55:40.000Z | 2017-09-01T20:55:40.000Z | src/build_tools/tweak_macinstaller_script.py | soleilpqd/mozc | 4767ce2f2b6a63f1f139daea6e98bc7a564d5e4e | [
"BSD-3-Clause"
] | null | null | null | src/build_tools/tweak_macinstaller_script.py | soleilpqd/mozc | 4767ce2f2b6a63f1f139daea6e98bc7a564d5e4e | [
"BSD-3-Clause"
] | 2 | 2020-02-12T15:24:27.000Z | 2020-02-22T13:36:21.000Z | # -*- coding: utf-8 -*-
# Copyright 2010-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Fix @@@...@@@ format of variables in the installer script templates for Mac.
% python tweak_macinstaller_script.py --output=out.txt --input=in.txt \
--version_file=version.txt [--build_type=dev] \
"""
__author__ = "mukai"
import logging
import optparse
import mozc_version
def _ReplaceVariables(data, environment):
"""Replace all occurrence of the variable in data by the value.
Args:
data: the original data string
environment: an iterable of (variable name, its value) pairs
Returns:
the data string which replaces the variables by the value.
"""
result = data
for (k, v) in environment:
result = result.replace(k, v)
return result
def ParseOptions():
"""Parse command line options.
Returns:
An options data.
"""
parser = optparse.OptionParser()
parser.add_option('--version_file', dest='version_file')
parser.add_option('--output', dest='output')
parser.add_option('--input', dest='input')
parser.add_option('--build_type', dest='build_type')
(options, unused_args) = parser.parse_args()
return options
def main():
"""The main function."""
options = ParseOptions()
required_flags = ['version_file', 'output', 'input']
for flag in required_flags:
if getattr(options, flag) is None:
logging.error('--%s is not specified.', flag)
exit(-1)
version = mozc_version.MozcVersion(options.version_file)
if options.build_type == 'dev':
omaha_tag = 'external-dev'
else:
omaha_tag = 'external-stable'
# This definition is copied from tools/scons/script.py
variables = [
('@@@MOZC_VERSION@@@', version.GetVersionString()),
('@@@MOZC_PRODUCT_ID@@@', 'com.google.JapaneseIME'),
('@@@MOZC_APP_PATH@@@', '/Library/Input Methods/GoogleJapaneseInput.app'),
('@@@MOZC_APPLICATIONS_DIR@@@',
'/Applications/GoogleJapaneseInput.localized'),
('@@@MOZC_OMAHA_TAG@@@', omaha_tag),
('@@@MOZC_PACKAGE_NAME@@@', 'GoogleJapaneseInput.pkg'),
]
open(options.output, 'w').write(
_ReplaceVariables(open(options.input).read(), variables))
if __name__ == '__main__':
main()
| 33.770642 | 80 | 0.717468 |
9f920e63d95ae75be6ce3f081137e449e922a59b | 1,124 | py | Python | constants.py | tinshade/lambda_twitter | 5c598ca069402fc2661e7288334e987d2734b7f1 | [
"Apache-2.0"
] | null | null | null | constants.py | tinshade/lambda_twitter | 5c598ca069402fc2661e7288334e987d2734b7f1 | [
"Apache-2.0"
] | null | null | null | constants.py | tinshade/lambda_twitter | 5c598ca069402fc2661e7288334e987d2734b7f1 | [
"Apache-2.0"
] | null | null | null | import os
#TWITTER KEYS
API_KEY = os.getenv('API_KEY')
API_SECRET_KEY = os.getenv('API_SECRET_KEY')
ACCESS_TOKEN = os.getenv('ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')
BEARER_TOKEN = os.getenv('BEARER_TOKEN')
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
#TESTING DATA
VALIDATE_TWEET = {
'tweet': {"type":'string', "minlength":2, "maxlength": 244, "required":True, "nullable": False}
}
LOREM_TEXT = "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum."
SAMPLE_TWEET = "Testing lambdas!"
FALLBACK_QUOTE = "Welp! That was a 404 :/"
| 46.833333 | 589 | 0.77758 |
f3e7f0474bf726c3e09779e65fcc71ebe4cd05b6 | 5,565 | py | Python | fanficfare/adapters/adapter_quotevcom.py | AlexRiina/FanFicFare | 2cd6f53f766e74052c6ca7ab5c2eabff24f59742 | [
"Apache-2.0"
] | 3 | 2020-11-10T16:43:43.000Z | 2021-04-09T07:12:31.000Z | fanficfare/adapters/adapter_quotevcom.py | AlexRiina/FanFicFare | 2cd6f53f766e74052c6ca7ab5c2eabff24f59742 | [
"Apache-2.0"
] | 5 | 2021-11-18T00:20:38.000Z | 2021-11-18T00:21:40.000Z | fanficfare/adapters/adapter_quotevcom.py | AlexRiina/FanFicFare | 2cd6f53f766e74052c6ca7ab5c2eabff24f59742 | [
"Apache-2.0"
] | 1 | 2021-04-08T12:25:09.000Z | 2021-04-08T12:25:09.000Z | # -*- coding: utf-8 -*-
# Copyright 2019 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re
import datetime
import logging
logger = logging.getLogger(__name__)
from .. import exceptions
# py2 vs py3 transition
from ..six import text_type as unicode
from ..six.moves.urllib import parse as urlparse
from .base_adapter import BaseSiteAdapter
from ..htmlcleanup import stripHTML
SITE_DOMAIN = 'quotev.com'
STORY_URL_TEMPLATE = 'https://www.quotev.com/story/%s'
def getClass():
return QuotevComAdapter
class QuotevComAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
story_id = url.split('/')[4]
self._setURL(STORY_URL_TEMPLATE % story_id)
self.story.setMetadata('storyId', story_id)
self.story.setMetadata('siteabbrev', SITE_DOMAIN)
@staticmethod
def getSiteDomain():
return SITE_DOMAIN
@classmethod
def getSiteExampleURLs(cls):
return STORY_URL_TEMPLATE % '1234'
def getSiteURLPattern(self):
pattern = re.escape(STORY_URL_TEMPLATE.rsplit('%', 1)[0]) + r'(.+?)($|&|/)'
pattern = pattern.replace(r'https', r'https?')
pattern = pattern.replace(r'www\.', r'(www\.)?')
return pattern
def extractChapterUrlsAndMetadata(self):
data = self.get_request(self.url)
soup = self.make_soup(data)
element = soup.find('div', {'class': 'result'})
if not element:
raise exceptions.StoryDoesNotExist(self.url)
title = element.find('h1')
self.story.setMetadata('title', title.get_text())
authdiv = soup.find('div', {'class':"quizAuthorList"})
if authdiv:
for a in authdiv.find_all('a'):
self.story.addToList('author', a.get_text())
self.story.addToList('authorId', a['href'].split('/')[-1])
self.story.addToList('authorUrl', urlparse.urljoin(self.url, a['href']))
if not self.story.getList('author'):
self.story.addToList('author','Anonymous')
self.story.addToList('authorUrl','https://www.quotev.com')
self.story.addToList('authorId','0')
self.setDescription(self.url, soup.find('div', id='qdesct'))
imgmeta = soup.find('meta',{'property':"og:image" })
if imgmeta:
self.coverurl = self.setCoverImage(self.url, urlparse.urljoin(self.url, imgmeta['content']))[1]
for a in soup.find_all('a', {'href': re.compile('/fiction(/c)?/')}):
self.story.addToList('category', a.get_text())
for a in soup.find_all('a', {'href': re.compile('/search/')}):
self.story.addToList('searchtags', a.get_text())
elements = soup.find_all('time') # , {'class': 'q_time'}
self.story.setMetadata('datePublished', datetime.datetime.fromtimestamp(float(elements[0]['ts'])))
if len(elements) > 1:
self.story.setMetadata('dateUpdated', datetime.datetime.fromtimestamp(float(elements[1]['ts'])))
metadiv = elements[0].parent.parent
if u'· completed' in stripHTML(metadiv):
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
# pages,readers,reads
metahtml = unicode(metadiv).replace(u'\n',' ')
# logger.debug(metahtml)
for entry in self.getConfigList('extra_valid_entries'):
# if entry in metahtml:
# logger.debug("should find")
# logger.debug(r".*?([0-9,]+) +%s.*?"%entry)
m = re.match((r".*?([0-9,]+) +%s.*?"%entry),metahtml)
if m:
val = m.group(1)
# logger.debug(val)
self.story.setMetadata(entry, val.replace(',', '').replace('.', ''))
favspans = soup.find('a',{'id':'fav_btn'}).find_all('span')
if len(favspans) > 1:
self.story.setMetadata('favorites', stripHTML(favspans[-1]).replace(',', ''))
commentspans = soup.find('a',{'id':'comment_btn'}).find_all('span')
#print("commentspans:%s"%commentspans)
if len(commentspans) > 0:
self.story.setMetadata('comments', stripHTML(commentspans[0]).replace(',', ''))
for a in soup.find('div', id='rselect')('a'):
if 'javascript' not in a['href']:
self.add_chapter(a.get_text(), urlparse.urljoin(self.url, a['href']))
def getChapterText(self, url):
data = self.get_request(url)
soup = self.make_soup(data)
rescontent = soup.find('div', id='rescontent')
# attempt to find and include chapter specific images.
img = soup.find('div',{'id':'quizHeader'}).find('img')
#print("img['src'](%s) != self.coverurl(%s)"%(img['src'],self.coverurl))
if img['src'] != self.coverurl:
rescontent.insert(0,img)
for a in rescontent('a'):
a.unwrap()
return self.utf8FromSoup(url, rescontent)
| 36.854305 | 108 | 0.615633 |
c8abbe167173be8d55a08ee4d670a73cfc781c9b | 3,029 | py | Python | .tox/scenario/lib/python2.7/site-packages/oslo_db/tests/sqlalchemy/test_types.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/oslo_db/tests/sqlalchemy/test_types.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/oslo_db/tests/sqlalchemy/test_types.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for JSON SQLAlchemy types."""
from sqlalchemy import Column, Integer
from sqlalchemy.ext.declarative import declarative_base
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import types
BASE = declarative_base()
class JsonTable(BASE, models.ModelBase):
__tablename__ = 'test_json_types'
id = Column(Integer, primary_key=True)
jdict = Column(types.JsonEncodedDict)
jlist = Column(types.JsonEncodedList)
json = Column(types.JsonEncodedType)
class JsonTypesTestCase(test_base.DbTestCase):
def setUp(self):
super(JsonTypesTestCase, self).setUp()
JsonTable.__table__.create(self.engine)
self.addCleanup(JsonTable.__table__.drop, self.engine)
self.session = self.sessionmaker()
self.addCleanup(self.session.close)
def test_default_value(self):
with self.session.begin():
JsonTable(id=1).save(self.session)
obj = self.session.query(JsonTable).filter_by(id=1).one()
self.assertEqual([], obj.jlist)
self.assertEqual({}, obj.jdict)
self.assertIsNone(obj.json)
def test_dict(self):
test = {'a': 42, 'b': [1, 2, 3]}
with self.session.begin():
JsonTable(id=1, jdict=test).save(self.session)
obj = self.session.query(JsonTable).filter_by(id=1).one()
self.assertEqual(test, obj.jdict)
def test_list(self):
test = [1, True, "hello", {}]
with self.session.begin():
JsonTable(id=1, jlist=test).save(self.session)
obj = self.session.query(JsonTable).filter_by(id=1).one()
self.assertEqual(test, obj.jlist)
def test_dict_type_check(self):
self.assertRaises(db_exc.DBError,
JsonTable(id=1, jdict=[]).save, self.session)
def test_list_type_check(self):
self.assertRaises(db_exc.DBError,
JsonTable(id=1, jlist={}).save, self.session)
def test_generic(self):
tested = [
"string",
42,
True,
None,
[1, 2, 3],
{'a': 'b'}
]
for i, test in enumerate(tested):
with self.session.begin():
JsonTable(id=i, json=test).save(self.session)
obj = self.session.query(JsonTable).filter_by(id=i).one()
self.assertEqual(test, obj.json)
| 34.816092 | 78 | 0.643447 |
a4f0ca05702a78635f6f4b1c973557721a6cf014 | 9,780 | py | Python | dhcp_option_util.py | fernandordguez/dhcp_option_encoding | 2014461da5f2340aa13252f8931f01c91273fe06 | [
"BSD-2-Clause"
] | 1 | 2021-07-27T18:23:07.000Z | 2021-07-27T18:23:07.000Z | dhcp_option_util.py | fernandordguez/dhcp_option_encoding | 2014461da5f2340aa13252f8931f01c91273fe06 | [
"BSD-2-Clause"
] | null | null | null | dhcp_option_util.py | fernandordguez/dhcp_option_encoding | 2014461da5f2340aa13252f8931f01c91273fe06 | [
"BSD-2-Clause"
] | 1 | 2021-09-21T11:35:10.000Z | 2021-09-21T11:35:10.000Z | #!/usr/bin/env python3
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
------------------------------------------------------------------------
Description:
Demonstration script for Hex Encoding/Decoding of DHCP Options
using the bloxone module
Requirements:
bloxone
yaml
Author: Chris Marrison
Date Last Updated: 20210809
Copyright 2021 Chris Marrison / Infoblox
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------------------
"""
__version__ = '0.1.1'
__author__ = 'Chris Marrison'
__email__ = 'chris@infoblox.com'
import bloxone
import argparse
import logging
from bloxone.dhcputils import dhcp_decode
import yaml
from pprint import pprint
import os
def parseargs():
# Parse arguments
parser = argparse.ArgumentParser(description='DHCP Option Encoding ' +
'and Decoding Utility')
exclusive = parser.add_mutually_exclusive_group()
parser.add_argument('-c', '--config', type=str, default='vendor_dict.yaml',
help="Path to vendor file")
exclusive.add_argument('--vendor', type=str, default='',
help="Vendor Identifier")
exclusive.add_argument('--suboptions', type=str, default='',
help="Sub Options to encode")
parser.add_argument('--prefix', type=str, default='',
help="Optional prefix for use with --suboptions")
parser.add_argument('--decode', type=str, default='',
help="Hex string to decode")
exclusive.add_argument('--data_only', action="store_true",
help="Decode hex as 'string' or specified by --type")
parser.add_argument('--type', type=str, default='string',
help="Optional data_type for --decode --data_only")
exclusive.add_argument('--data_seq', type=str, default='',
help="Encode set of <type>:<data> as single hex str")
parser.add_argument('--dump', type=str, default='', help="Dump Vendor")
# parser.add_argument('-y', '--yaml', action="store", help="Alternate yaml config file for objects")
# parser.add_argument('--debug', help="Enable debug logging", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.INFO)
return parser.parse_args()
def dump_vendor(vendor):
'''
'''
global definitions
global dhcp_encoder
if definitions.included(vendor):
print(f'Vendor: ')
print(yaml.dump(definitions.dump_vendor_def(vendor)))
else:
print(f'Vendor: {vendor} not found.')
return
def process_vendor(vendor):
'''
'''
global definitions
global dhcp_encoder
if definitions.included(vendor):
sub_opts = definitions.sub_options(vendor)
prefix = definitions.vendor_prefix(vendor)
if len(sub_opts):
encoded_opts = prefix + dhcp_encoder.encode_dhcp_option(sub_opts)
print(f'Vendor: {vendor}, Encoding: {encoded_opts}')
else:
print(f'Vendor: {vendor} has no sub-options to encode')
return
def process_suboptions(sub_options, prefix=''):
'''
'''
global dhcp_encoder
options= []
subopt = []
subopt_def = {}
suboptions_def = []
options = sub_options.split(',')
for option in options:
subopt = option.split(':')
if len(subopt) == 3:
if subopt[1] in dhcp_encoder.opt_types:
subopt_def = { 'code': subopt[0],
'type': subopt[1],
'data': subopt[2] }
suboptions_def.append(subopt_def)
else:
print(f'Option type: {subopt[1]} is not supported')
print(f'Supported types: {dhcp_encoder.opt_types}')
else:
print('--suboptions data incorrect format')
print('Format is "<code>:<type>:<data>,<code>:<type>:<data>,..."')
break
if len(suboptions_def) == len(options):
encoding = dhcp_encoder.encode_dhcp_option(suboptions_def)
if prefix:
encoding = prefix + encoding
print(f'Encoded sub-options: {encoding}')
return
def opttypes_to_list(sub_options):
'''
'''
global dhcp_decoder
options= []
subopt = []
subopt_def = {}
suboptions_def = []
options = sub_options.split(',')
for option in options:
subopt = option.split(':')
if len(subopt) >= 2:
if subopt[1] in dhcp_decoder.opt_types:
subopt_def = { 'code': subopt[0],
'type': subopt[1] }
suboptions_def.append(subopt_def)
else:
print(f'Option type: {subopt[1]} is not supported')
print(f'Supported types: {dhcp_encoder.opt_types}')
else:
print('--suboptions for decode hex incorrect format')
print('Format is "<code>:<type>,<code>:<type>,..."')
break
return suboptions_def
def process_all():
'''
'''
global definitions
global dhcp_encoder
for vendor in definitions.vendors():
process_vendor(vendor)
return
def encode_data_set(data_set):
'''
'''
global dhcp_encoder
data_list = []
data_item = []
data = {}
hex_str = ''
data_list = data_set.split(',')
for item in data_list:
data_item = item.split(':')
if len(data_item) == 2:
if data_item[0] in dhcp_encoder.opt_types:
data = { 'code': 1,
'type': data_item[0],
'data': data_item[1]}
hex_str += dhcp_encoder.encode_data(data)
else:
print(f'Option type: {data_item[0]} is not supported')
print(f'Supported types: {dhcp_encoder.opt_types}')
else:
print('--data_set for decode hex incorrect format')
print('Format is "<type>:<data>,<type>:<data>,..."')
break
print(f'Encoded data dequence: {hex_str}')
print(f'Length (hex): {dhcp_encoder.hex_length(hex_str)}')
return
def decode_hex(hex_str, opt_defs=None, data_only=False, data_type='string'):
'''
'''
global definitions
global dhcp_decoder
if (len(hex_str) % 2) == 0:
if not data_only:
if opt_defs:
if not isinstance(opt_defs, list):
opt_defs = opttypes_to_list(opt_defs)
else:
opt_defs = []
print()
if opt_defs:
print('Decoding as sub-options, using definitions:')
else:
print('Attempting to decode as sub-options:')
d = dhcp_decoder.decode_dhcp_option(hex_str, sub_opt_defs=opt_defs)
dhcp_decoder.output_decoded_options(d)
# Decode as data_type, only
print()
print(f'Decoding as data-only, using type: {data_type}')
d = dhcp_decoder.decode_data(hex_str, data_type=data_type)
print(repr(d))
else:
logging.error(f'Hex string contains incomplete octets, decoding halted')
return
def main():
'''
Main logic
'''
exitcode = 0
options = parseargs()
global definitions
global dhcp_encoder
global dhcp_decoder
dhcp_encoder = bloxone.dhcp_encode()
dhcp_decoder = bloxone.dhcp_decode()
# Check for direct options
if options.suboptions and not options.decode:
process_suboptions(options.suboptions, prefix=options.prefix)
elif options.decode:
if options.suboptions:
decode_hex(options.decode, opt_defs=options.suboptions)
elif options.vendor:
definitions = bloxone.DHCP_OPTION_DEFS(options.config)
decode_hex(options.decode, opt_defs=definitions.sub_options(options.vendor))
else:
decode_hex(options.decode,
data_type=options.type,
data_only=options.data_only)
elif options.data_seq:
encode_data_set(options.data_seq)
else:
definitions = bloxone.DHCP_OPTION_DEFS(options.config)
# Process using config file based on options
if options.dump:
dump_vendor(options.dump)
elif options.vendor:
process_vendor(options.vendor)
else:
process_all()
return exitcode
### Main ###
if __name__ == '__main__':
exitcode = main()
exit(exitcode) | 32.065574 | 147 | 0.607975 |
865b47cb38065ce3346ecf0c9724270a49f9a883 | 1,035 | py | Python | tgappcategories/model/__init__.py | axant/tgapp-categories | 82be90b92276f06f87f2b0fddf45e310cc70962e | [
"MIT"
] | null | null | null | tgappcategories/model/__init__.py | axant/tgapp-categories | 82be90b92276f06f87f2b0fddf45e310cc70962e | [
"MIT"
] | null | null | null | tgappcategories/model/__init__.py | axant/tgapp-categories | 82be90b92276f06f87f2b0fddf45e310cc70962e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import tg
from tgext.pluggable import PluggableSession
log = logging.getLogger('tgappcategories')
DBSession = PluggableSession()
provider = None
Category = None
CategoryImage = None
def init_model(app_session):
DBSession.configure(app_session)
def configure_models():
global provider, Category, CategoryImage
if tg.config.get('use_sqlalchemy', False):
log.info('Configuring TgappCategories for SQLAlchemy')
from tgappcategories.model.sqla.models import Category, CategoryImage
from sprox.sa.provider import SAORMProvider
provider = SAORMProvider(session=DBSession, engine=False)
elif tg.config.get('use_ming', False):
log.info('Configuring TgappCategories for Ming')
from tgappcategories.model.ming.models import Category, CategoryImage
from sprox.mg.provider import MingProvider
provider = MingProvider(DBSession)
else:
raise ValueError('TgappCategories should be used with sqlalchemy or ming')
| 30.441176 | 82 | 0.738164 |
7acf7775781ebdf7a4310452734a7fc73c20a4fe | 4,735 | py | Python | kedro/contrib/io/bioinformatics/sequence_dataset.py | yhzqb/kedro | 619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab | [
"Apache-2.0"
] | 1 | 2021-11-19T05:36:47.000Z | 2021-11-19T05:36:47.000Z | kedro/contrib/io/bioinformatics/sequence_dataset.py | yhzqb/kedro | 619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab | [
"Apache-2.0"
] | null | null | null | kedro/contrib/io/bioinformatics/sequence_dataset.py | yhzqb/kedro | 619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab | [
"Apache-2.0"
] | 1 | 2021-11-19T05:36:49.000Z | 2021-11-19T05:36:49.000Z | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""BioSequenceLocalDataSet loads and saves data to/from bio-sequence objects to
file.
"""
from os.path import isfile
from pathlib import Path
from typing import Any, Dict, List, Optional
from Bio import SeqIO
from kedro.contrib.io import DefaultArgumentsMixIn
from kedro.io import AbstractDataSet
class BioSequenceLocalDataSet(DefaultArgumentsMixIn, AbstractDataSet):
"""``BioSequenceLocalDataSet`` loads and saves data to a sequence file.
Example:
::
>>> raw_sequence_list = [
>>> '>gi|2765658|emb|Z78533.1|CIZ78533'
>>> 'C.irapeanum 5.8S rRNA gene and ITS1 and ITS2 DNA'
>>> 'CGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTGATGAGACCGTGGAATAAA'
>>> 'CGATCGAGTGAATCCGGAGGACCGGTGTACTCAGCTCACCGGGGGCATTGCTCCCGTGGT'
>>> 'GACCCTGATTTGTTGTTGGGCCGCCTCGGGAGCGTCCATGGCGGGTTTGAACCTCTAGCC'
>>> 'CGGCGCAGTTTGGGCGCCAAGCCATATGAAAGCATCACCGGCGAATGGCATTGTCTTCCC'
>>> 'CAAAACCCGGAGCGGCGGCGTGCTGTCGCGTGCCCAATGAATTTTGATGACTCTCGCAAA'
>>> 'CGGGAATCTTGGCTCTTTGCATCGGATGGAAGGACGCAGCGAAATGCGATAAGTGGTGTG'
>>> 'AATTGCAAGATCCCGTGAACCATCGAGTCTTTTGAACGCAAGTTGCGCCCGAGGCCATCA'
>>> 'GGCTAAGGGCACGCCTGCTTGGGCGTCGCGCTTCGTCTCTCTCCTGCCAATGCTTGCCCG'
>>> 'GCATACAGCCAGGCCGGCGTGGTGCGGATGTGAAAGATTGGCCCCTTGTGCCTAGGTGCG'
>>> 'GCGGGTCCAAGAGCTGGTGTTTTGATGGCCCGGAACCCGGCAAGAGGTGGACGGATGCTG'
>>> 'GCAGCAGCTGCCGTGCGAATCCCCCATGTTGTCGTGCTTGTCGGACAGGCAGGAGAACCC'
>>> 'TTCCGAACCCCAATGGAGGGCGGTTGACCGCCATTCGGATGTGACCCCAGGTCAGGCGGG'
>>> 'GGCACCCGCTGAGTTTACGC']
>>> data_set = BioSequenceLocalDataSet(filepath="ls_orchid.fasta",
>>> load_args={"format": "fasta"},
>>> save_args={"format": "fasta"})
>>> data_set.save(raw_sequence_list)
>>> sequence_list = data_set.load()
>>> assert raw_sequence_list.equals(sequence_list)
"""
def _describe(self) -> Dict[str, Any]:
return dict(
filepath=self._filepath,
load_args=self._load_args,
save_args=self._save_args,
)
def __init__(
self,
filepath: str,
load_args: Optional[Dict[str, Any]] = None,
save_args: Optional[Dict[str, Any]] = None,
) -> None:
"""
Creates a new instance of ``BioSequenceLocalDataSet`` pointing
to a concrete filepath.
Args:
filepath: path to sequence file
load_args: Options for loading sequence files. Here you can find
all supported file formats: https://biopython.org/wiki/SeqIO
save_args: args supported by Biopython are 'handle' and 'format'.
Handle by default is equal to ``filepath``.
"""
self._filepath = filepath
super().__init__(load_args, save_args)
def _load(self) -> List:
return list(SeqIO.parse(self._filepath, **self._load_args))
def _save(self, data: list) -> None:
save_path = Path(self._filepath)
save_path.parent.mkdir(parents=True, exist_ok=True)
SeqIO.write(data, handle=str(save_path), **self._save_args)
def _exists(self) -> bool:
return isfile(self._filepath)
| 42.657658 | 79 | 0.691235 |
b3a4b3f30bda7d3b4d6710b9eae50b90e95ff4b1 | 187 | py | Python | python_basics/data_type/str_test/test1.py | panc-test/python-study | fb172ed4a4f7fb521de9a005cd55115ad63a5b6d | [
"MIT"
] | 1 | 2019-11-06T03:34:50.000Z | 2019-11-06T03:34:50.000Z | python_basics/data_type/str_test/test1.py | panc-test/python-study | fb172ed4a4f7fb521de9a005cd55115ad63a5b6d | [
"MIT"
] | null | null | null | python_basics/data_type/str_test/test1.py | panc-test/python-study | fb172ed4a4f7fb521de9a005cd55115ad63a5b6d | [
"MIT"
] | null | null | null | """
str -字符串基本操作
1、创建字符串
2、访问字符串
3、操作字符串
"""
# 创建字符串
str0 = '' # 创建空字符串
str1 = 'aaa' # 方法1,使用单引号''创建字符串
str2 = "222" # 方法2,使用双引号""创建字符串
str3 = "'123'" # 方法3,使用单引号''和双引号""创建字符串
| 14.384615 | 40 | 0.572193 |
44d72c24634cee53dc3d78b3de30d21abfe42590 | 439 | py | Python | backend/chat/migrations/0004_message_imagepath.py | CSCI34284/group4_project | ee87cb5fd0b194fea4f7e2baed2a790c0bc1950f | [
"MIT"
] | 1 | 2019-10-25T14:56:50.000Z | 2019-10-25T14:56:50.000Z | backend/chat/migrations/0004_message_imagepath.py | CSCI34284/group4_project | ee87cb5fd0b194fea4f7e2baed2a790c0bc1950f | [
"MIT"
] | 8 | 2020-02-12T03:09:18.000Z | 2021-06-10T19:31:18.000Z | backend/chat/migrations/0004_message_imagepath.py | CSCI34284/group4_project | ee87cb5fd0b194fea4f7e2baed2a790c0bc1950f | [
"MIT"
] | 2 | 2019-10-18T18:20:28.000Z | 2019-12-05T19:38:57.000Z | # Generated by Django 2.2.7 on 2019-11-26 00:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0003_auto_20191125_2156'),
]
operations = [
migrations.AddField(
model_name='message',
name='imagePath',
field=models.ImageField(default=0, upload_to='images/'),
preserve_default=False,
),
]
| 21.95 | 68 | 0.599089 |
8aaee8b68238c1755aae8a5a19a6bd40c0d32ad3 | 100 | py | Python | gnosis/xml/pickle/test/test_pass_3.py | LehmRob/od-conv | 1ffed758f27e906a1e36f5d137180a1808a82c0a | [
"MIT"
] | 3 | 2018-09-29T14:14:28.000Z | 2022-01-05T03:45:50.000Z | library/CanFestival/objdictgen/gnosis/xml/pickle/test/test_pass_3.py | Lembed/STM32duino-gcc-Projects | 67829e9cd1388601daf9815b0561da557e0b9f82 | [
"MIT"
] | 1 | 2017-06-17T08:15:28.000Z | 2017-06-17T08:15:28.000Z | library/CanFestival/objdictgen/gnosis/xml/pickle/test/test_pass_3.py | Lembed/STM32duino-gcc-Projects | 67829e9cd1388601daf9815b0561da557e0b9f82 | [
"MIT"
] | 1 | 2019-12-08T15:11:55.000Z | 2019-12-08T15:11:55.000Z | # used for sanity checking the test harness
# a "pass" test that prints something
print "** OK **"
| 20 | 43 | 0.7 |
6d5c0788b272f9f0d07a2876810ea3d43df65858 | 1,012 | py | Python | moduller/osmod-l.py | onselaydin/pytry | 314aa50b6f8535e275dc8a2edd0c21637fb5a745 | [
"Apache-2.0"
] | null | null | null | moduller/osmod-l.py | onselaydin/pytry | 314aa50b6f8535e275dc8a2edd0c21637fb5a745 | [
"Apache-2.0"
] | null | null | null | moduller/osmod-l.py | onselaydin/pytry | 314aa50b6f8535e275dc8a2edd0c21637fb5a745 | [
"Apache-2.0"
] | null | null | null | import os
from datetime import datetime
for klasör_yolu,klasör_isimleri,dosya_isimler in os.walk("C:/Users/user/Desktop"):
for i in klasör_isimleri:
if (i.startswith("kr")):
print(i)
#from datetime import datetime
# print(dir(os))
# print(os.getcwd())
# os.chdir("C:/Users/user/Desktop/")
# print(os.getcwd())
# print(os.listdir())
#os.mkdir("Deneme1")
#os.makedirs("Deneme2/Deneme3")
#os.rmdir("Deneme2/Deneme3")
# os.rename("Deneme1","Deneme2")
# os.removedirs("Deneme2/Deneme3")
# os.rename("test.txt","test2.txt")
# print(os.stat("test2.txt"))
# degistirilme = os.stat("test2.txt").st_mtime
# print(datetime.fromtimestamp(degistirilme))
"""for klasör_yolu,klasör_isimleri,dosya_isimleri in os.walk("C:/Users/user/Desktop"):
print("Current Path",klasör_yolu)
print("Directories",klasör_isimleri)
print("Dosyalar",dosya_isimleri)
print("**********************************")"""
| 16.866667 | 87 | 0.614625 |
227d40028e22e683004ae8c8503523cb2e966dc5 | 400 | py | Python | psychopy_tobii_controller/constants.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | psychopy_tobii_controller/constants.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | psychopy_tobii_controller/constants.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | #
# Tobii controller for PsychoPy
#
# author: Hiroyuki Sogo
# Distributed under the terms of the GNU General Public License v3 (GPLv3).
#
TimeStamp = 0
GazePointXLeft = 1
GazePointYLeft = 2
PupilLeft = 3
ValidityLeft = 4
GazePointXRight = 5
GazePointYRight = 6
PupilRight = 7
ValidityRight = 8
GazePointX = 9
GazePointY = 10
FixStart = 0
FixEnd = 1
FixX = 2
FixY = 3
EventTime = 0
EventText = 1
| 14.814815 | 75 | 0.73 |
d725bf44f1254278448c6888f806ddea635065c4 | 7,870 | py | Python | halotools/mock_observables/pairwise_velocities/tests/test_los_pvd_vs_rp.py | mclaughlin6464/halotools_old | 96fbdf5fc156160f19ccd4ae3ee964f831d26fa6 | [
"BSD-3-Clause"
] | null | null | null | halotools/mock_observables/pairwise_velocities/tests/test_los_pvd_vs_rp.py | mclaughlin6464/halotools_old | 96fbdf5fc156160f19ccd4ae3ee964f831d26fa6 | [
"BSD-3-Clause"
] | null | null | null | halotools/mock_observables/pairwise_velocities/tests/test_los_pvd_vs_rp.py | mclaughlin6464/halotools_old | 96fbdf5fc156160f19ccd4ae3ee964f831d26fa6 | [
"BSD-3-Clause"
] | null | null | null | """
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from astropy.tests.helper import pytest
from astropy.utils.misc import NumpyRNGContext
from ..los_pvd_vs_rp import los_pvd_vs_rp
from ...tests.cf_helpers import generate_locus_of_3d_points
__all__ = ('test_los_pvd_vs_rp_correctness1', 'test_los_pvd_vs_rp_correctness2',
'test_los_pvd_vs_rp_correctness3',
'test_los_pvd_vs_rp_auto_consistency', 'test_los_pvd_vs_rp_cross_consistency')
fixed_seed = 43
@pytest.mark.slow
def test_los_pvd_vs_rp_correctness1():
""" This function tests that the
`~halotools.mock_observables.los_pvd_vs_rp` function returns correct
results for a controlled distribution of points whose line-of-sight velocity
can be simply calculated.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.25).
The first set of points is moving with random uniform z-velocities;
the second set of points is at rest.
PBCs are set to infinity in this test.
For every velocity in sample1, since we can count pairs analytically
for this configuration we know exactly how many appearances of each
velocity there will be, so we can calculate np.std on the exact
same set of points as the marked pair-counter should operate on.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.25
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s2 = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False)
correct_cross_pvd = np.std(np.repeat(velocities1[:, 2], npts))
assert np.allclose(s1s2[0], 0, rtol=0.1)
assert np.allclose(s1s2[1], correct_cross_pvd, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_correctness2():
""" This function tests that the
`~halotools.mock_observables.los_pvd_vs_rp` function returns correct
results for a controlled distribution of points whose line-of-sight velocity
can be simply calculated.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.95).
The first set of points is moving with random uniform z-velocities;
the second set of points is at rest.
PBCs are operative in this test.
For every velocity in sample1, since we can count pairs analytically
for this configuration we know exactly how many appearances of each
velocity there will be, so we can calculate np.std on the exact
same set of points as the marked pair-counter should operate on.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s2 = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False, period=1)
correct_cross_pvd = np.std(np.repeat(velocities1[:, 2], npts))
assert np.allclose(s1s2[0], 0, rtol=0.1)
assert np.allclose(s1s2[1], correct_cross_pvd, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_correctness3():
""" This function tests that the
`~halotools.mock_observables.los_pvd_vs_rp` function returns correct
results for a controlled distribution of points whose line-of-sight velocity
can be simply calculated.
For this test, the configuration is two tight localizations of points,
the first at (0.5, 0.5, 0.1), the second at (0.5, 0.35, 0.95).
The first set of points is moving with random uniform z-velocities;
the second set of points is at rest.
PBCs are operative in this test.
For every velocity in sample1, since we can count pairs analytically
for this configuration we know exactly how many appearances of each
velocity there will be, so we can calculate np.std on the exact
same set of points as the marked pair-counter should operate on.
This is the same test as test_los_pvd_vs_rp_correctness2, only here we
bundle the two sets of points into the same sample.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.35, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
sample = np.concatenate((sample1, sample2))
velocities = np.concatenate((velocities1, velocities2))
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s1 = los_pvd_vs_rp(sample, velocities, rp_bins, pi_max, period=1)
correct_cross_pvd = np.std(np.repeat(velocities1[:, 2], npts))
assert np.allclose(s1s1[1], correct_cross_pvd, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_auto_consistency():
""" Verify that we get self-consistent auto-correlation results
regardless of whether we ask for cross-correlations.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.5, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s1b, s2s2b = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_cross=False)
assert np.allclose(s1s1a, s1s1b, rtol=0.001)
assert np.allclose(s2s2a, s2s2b, rtol=0.001)
@pytest.mark.slow
def test_los_pvd_vs_rp_cross_consistency():
""" Verify that we get self-consistent auto-correlation results
regardless of whether we ask for cross-correlations.
"""
npts = 100
xc1, yc1, zc1 = 0.5, 0.5, 0.1
xc2, yc2, zc2 = 0.5, 0.5, 0.95
sample1 = generate_locus_of_3d_points(npts, xc=xc1, yc=yc1, zc=zc1, seed=fixed_seed)
sample2 = generate_locus_of_3d_points(npts, xc=xc2, yc=yc2, zc=zc2, seed=fixed_seed)
velocities1 = np.zeros(npts*3).reshape(npts, 3)
velocities2 = np.zeros(npts*3).reshape(npts, 3)
with NumpyRNGContext(fixed_seed):
velocities1[:, 2] = np.random.uniform(0, 1, npts)
rp_bins, pi_max = np.array([0.001, 0.1, 0.3]), 0.2
s1s1a, s1s2a, s2s2a = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2)
s1s2b = los_pvd_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2,
do_auto=False)
assert np.allclose(s1s2a, s1s2b, rtol=0.001)
| 37.47619 | 88 | 0.705591 |
9ffcfb72f5decfec20d2461f4dac5f0c92f898f0 | 840 | py | Python | chapter_8/find_duplicate_str.py | not-sponsored/Guide-to-Data-Structures-and-Algorithms-Exercises | a905298c594a826e558cd1c94876b632db5d4d11 | [
"Fair"
] | null | null | null | chapter_8/find_duplicate_str.py | not-sponsored/Guide-to-Data-Structures-and-Algorithms-Exercises | a905298c594a826e558cd1c94876b632db5d4d11 | [
"Fair"
] | null | null | null | chapter_8/find_duplicate_str.py | not-sponsored/Guide-to-Data-Structures-and-Algorithms-Exercises | a905298c594a826e558cd1c94876b632db5d4d11 | [
"Fair"
] | null | null | null | # Answer to exercise 2 in chapter 8
#
# Exercise from: A Common-Sense Guide to Data Structures and Algorithms
# Level Up Your Core Programming Skills
# by Jay Wengrow and edited by Brian MacDonald
from collections import defaultdict
import sys
def find_duplicate(strings: list):
"""find the first duplicate string from list:strings"""
strings = [str(x) for x in strings.split(',')]
map = defaultdict(int)
duplicate_string = ''
for word in strings:
if map[word]:
duplicate_string = word
print(f'The duplicate string is "{word}"')
break
else:
map[word] += 1
return duplicate_string
if __name__ == "__main__":
# example usage:
# $python find_duplicate_str.py a,b,c,d,a,e,f
# -> The duplicate string is "a"
# argv[1] is a list of strings separated by commas with no spaces
find_duplicate(sys.argv[1])
| 26.25 | 73 | 0.710714 |
5e06ffbfae51d08a310a35671dacaaa4e53635df | 629 | py | Python | Scripting/Scripts/ParseMultipleFilesRemoveStringCharacters.py | SGIvanov/LodeRunnerMachineLearning | faf26c9560dded715f64a6ca062e30ee273ad7d1 | [
"MIT"
] | 1 | 2022-02-04T08:00:02.000Z | 2022-02-04T08:00:02.000Z | Scripting/Scripts/ParseMultipleFilesRemoveStringCharacters.py | SGIvanov/LodeRunnerMachineLearning | faf26c9560dded715f64a6ca062e30ee273ad7d1 | [
"MIT"
] | null | null | null | Scripting/Scripts/ParseMultipleFilesRemoveStringCharacters.py | SGIvanov/LodeRunnerMachineLearning | faf26c9560dded715f64a6ca062e30ee273ad7d1 | [
"MIT"
] | null | null | null | f_inRoot = 'D:\\LastYear\\Licenta\\Scripting\\Levels\\'
f_outRoot = 'D:\\LastYear\\Licenta\\Scripting\\LevelsOut\\'
f_Name = 'Level'
f_outReplacer = 'Out'
extension = '.txt'
for i in range (1,150):
f_in = f_inRoot + f_Name + str(i) + extension
f_out = f_outRoot + f_Name + f_outReplacer + str(i) + extension
with open(f_in, 'r') as fin,open(f_out,'w') as fout:
for lineno, line in enumerate(fin, 1):
line = line.replace("+","");
if(lineno !=16):
line = line.split("\"")[1] + "\n"
else:
line = line.split("\"")[1]
fout.write(line) | 39.3125 | 67 | 0.550079 |
df7ee0dfc9d2d3be4ba97033813307d5d6c9e548 | 743 | py | Python | examples/addons/iterdxf_add_to_new_drawing.py | hh-wu/ezdxf | 62509ba39b826ee9b36f19c0a5abad7f3518186a | [
"MIT"
] | null | null | null | examples/addons/iterdxf_add_to_new_drawing.py | hh-wu/ezdxf | 62509ba39b826ee9b36f19c0a5abad7f3518186a | [
"MIT"
] | null | null | null | examples/addons/iterdxf_add_to_new_drawing.py | hh-wu/ezdxf | 62509ba39b826ee9b36f19c0a5abad7f3518186a | [
"MIT"
] | null | null | null | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import time
from pathlib import Path
from collections import Counter
import ezdxf
from ezdxf.addons import iterdxf
DIR = Path('~/Desktop/Outbox').expanduser()
BIGFILE = Path(r'D:\Source\dxftest\GKB-R2010.dxf')
# BIGFILE = Path(r'D:\Source\dxftest\ACAD_R2000.dxf')
doc = ezdxf.new()
msp = doc.modelspace()
print('Modelspace Iterator:')
counter = Counter()
t0 = time.perf_counter()
for entity in iterdxf.modelspace(BIGFILE):
counter[entity.dxftype()] += 1
try:
msp.add_foreign_entity(entity)
except ezdxf.DXFValueError:
pass
ta = time.perf_counter() - t0
print(f'Processing time: {ta:.2f}s')
print('Saving as ezdxf.dxf')
doc.saveas(DIR / 'ezdxf.dxf')
| 23.21875 | 53 | 0.713324 |
2c24e6dddf13ab51673e7db09708ecf28d0d678a | 44 | py | Python | test.py | be-panther/Bow | 62f5df020d445152e298e76e787cbaa0bb2f7fd8 | [
"Apache-2.0"
] | 1 | 2019-03-28T08:21:41.000Z | 2019-03-28T08:21:41.000Z | test.py | be-panther/Bow | 62f5df020d445152e298e76e787cbaa0bb2f7fd8 | [
"Apache-2.0"
] | null | null | null | test.py | be-panther/Bow | 62f5df020d445152e298e76e787cbaa0bb2f7fd8 | [
"Apache-2.0"
] | null | null | null | print('hello')
print('hello')
print('hello') | 14.666667 | 14 | 0.681818 |
b73e5444b22e41207fb6a9634f93e66a68401bb1 | 3,309 | py | Python | homeassistant/components/demo/alarm_control_panel.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/demo/alarm_control_panel.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/demo/alarm_control_panel.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Demo platform that has two fake alarm control panels."""
from __future__ import annotations
import datetime
from homeassistant.components.manual.alarm_control_panel import ManualAlarm
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ARMING_TIME,
CONF_DELAY_TIME,
CONF_TRIGGER_TIME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_VACATION,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Demo alarm control panel platform."""
async_add_entities(
[
ManualAlarm(
hass,
"Security",
"1234",
None,
True,
False,
{
STATE_ALARM_ARMED_AWAY: {
CONF_ARMING_TIME: datetime.timedelta(seconds=5),
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_ARMED_HOME: {
CONF_ARMING_TIME: datetime.timedelta(seconds=5),
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_ARMED_NIGHT: {
CONF_ARMING_TIME: datetime.timedelta(seconds=5),
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_ARMED_VACATION: {
CONF_ARMING_TIME: datetime.timedelta(seconds=5),
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_DISARMED: {
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_ARMED_CUSTOM_BYPASS: {
CONF_ARMING_TIME: datetime.timedelta(seconds=5),
CONF_DELAY_TIME: datetime.timedelta(seconds=0),
CONF_TRIGGER_TIME: datetime.timedelta(seconds=10),
},
STATE_ALARM_TRIGGERED: {
CONF_ARMING_TIME: datetime.timedelta(seconds=5)
},
},
)
]
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
| 38.034483 | 75 | 0.584769 |
9ed92299fbee2078ed318bfe38ec85e16fafb716 | 3,166 | py | Python | tests/bugs/core_0629_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_0629_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_0629_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_0629
# title: Grouping on derived fields processing NULL data kills IB
# decription:
# tracker_id: CORE-0629
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
create or alter view v_test as select 1 id from rdb$database;
commit;
recreate table test(
id integer not null,
dt_beg date,
dt_end date,
constraint pk_test primary key (id)
);
commit;
create or alter view v_test as
select id, extract(year from dt_beg) - extract(year from dt_end) dy
from test;
commit;
insert into test values(1, '01.01.2015', null);
insert into test values(2, '01.01.2015', '01.01.2015');
insert into test values(3, null, null);
insert into test values(4, null, null);
insert into test values(5, '01.01.2015', '31.12.2014');
commit;
select dy from v_test group by dy;
commit;
-------------------------------------------
create or alter view v_test as select 1 id from rdb$database;
commit;
recreate table test
(
a integer,
b date,
c computed by (extract(day from b)-extract(day from b))
);
commit;
insert into test(a, b) values(1, DATE '2015-05-24');
insert into test(a, b) values(1, null);
commit;
select c from test group by c;
commit;
create or alter view v_test as select b-b as dd from test;
commit;
select dd from v_test group by dd;
commit;
create or alter view v_test as select b-0 as dd from test;
select dd from v_test group by dd;
create or alter view v_test
as select cast(b as timestamp) as dt from test;
select dt from v_test group by dt;
------------
create or alter view v_test as select 1 id from rdb$database;
commit;
recreate table test(a int, b time, c computed by(cast(b as time)));
commit;
insert into test(a, b) values(1, '15:00:29.191');
insert into test(a, b) values(1, null);
commit;
select c from test group by c;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
DY <null>
DY 0
DY 1
C <null>
C 0
DD <null>
DD 0
DD <null>
DD 2015-05-24
DT <null>
DT 2015-05-24 00:00:00.0000
C <null>
C 15:00:29.1910
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 25.532258 | 72 | 0.541377 |
4905d0aee134a872cb1077d613fc479dbfa7b7c8 | 5,130 | py | Python | dcekit/optimization/fast_opt_svr_hyperparams.py | hkaneko1985/dcek | 13d9228b2dc2fd87c2e08a01721e1b1b220f2e19 | [
"MIT"
] | 25 | 2019-08-23T12:39:14.000Z | 2022-03-30T08:58:15.000Z | dcekit/optimization/fast_opt_svr_hyperparams.py | hkaneko1985/dcek | 13d9228b2dc2fd87c2e08a01721e1b1b220f2e19 | [
"MIT"
] | 2 | 2022-01-06T11:21:21.000Z | 2022-01-18T22:11:12.000Z | dcekit/optimization/fast_opt_svr_hyperparams.py | hkaneko1985/dcek | 13d9228b2dc2fd87c2e08a01721e1b1b220f2e19 | [
"MIT"
] | 16 | 2019-12-12T08:20:48.000Z | 2022-01-26T00:34:31.000Z | # -*- coding: utf-8 -*- %reset -f
"""
@author: Hiromasa Kaneko
"""
import sys
import numpy as np
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from ..validation import make_midknn_dataset
def fast_opt_svr_hyperparams(x, y, cs, epsilons, gammas, validation_method, parameter):
"""
Fast optimization of SVR hyperparameters
Optimize SVR hyperparameters based on variance of gram matrix and cross-validation or midknn
Parameters
----------
x : numpy.array or pandas.DataFrame
(autoscaled) m x n matrix of X-variables of training data,
m is the number of training sammples and
n is the number of X-variables
y : numpy.array or pandas.DataFrame
(autoscaled) m x 1 vector of a Y-variable of training data
cs : numpy.array or list
vector of candidates of C
epsilons : numpy.array or list
vector of candidates of epsilons
gammas : numpy.array or list
vector of candidates of gammas
validation_method : 'cv' or 'midknn'
if 'cv', cross-validation is used, and if 'midknn', midknn is used
parameter : int
"fold_number"-fold cross-validation in cross-validation, and k in midknn
Returns
-------
optimal_c : float
optimized C
optimal_epsilon : float
optimized epsilon
optimal_gamma : float
optimized gamma
"""
if validation_method != 'cv' and validation_method != 'midknn':
# print('\'{0}\' is unknown. Please check \'validation_method\'.'.format(validation_method))
# return 0, 0, 0
sys.exit('\'{0}\' is unknown. Please check \'validation_method\'.'.format(validation_method))
x = np.array(x)
y = np.array(y)
cs = np.array(cs)
epsilons = np.array(epsilons)
gammas = np.array(gammas)
print('1/4 ... pre-optimization of gamma')
optimal_gamma = maximize_variance_of_gram_matrix(x, gammas)
if validation_method == 'midknn':
# make midknn data points
x_midknn, y_midknn = make_midknn_dataset(x, y, parameter)
# Optimize epsilon with cross-validation
print('2/4 ... optimization of epsilon')
if validation_method == 'cv':
model = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_gamma), {'epsilon': epsilons}, cv=parameter)
model.fit(x, y)
optimal_epsilon = model.best_params_['epsilon']
elif validation_method == 'midknn':
r2_midknns = []
for epsilon in epsilons:
model = svm.SVR(kernel='rbf', C=3, epsilon=epsilon, gamma=optimal_gamma)
model.fit(x, y)
estimated_y_midknn = np.ndarray.flatten(model.predict(x_midknn))
r2_midknns.append(float(1 - sum((y_midknn - estimated_y_midknn) ** 2) / sum((y_midknn - y_midknn.mean()) ** 2)))
optimal_epsilon = epsilons[np.where(r2_midknns == np.max(r2_midknns))[0][0]]
# Optimize C with cross-validation
print('3/4 ... optimization of c')
if validation_method == 'cv':
model = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_epsilon, gamma=optimal_gamma), {'C': cs}, cv=parameter)
model.fit(x, y)
optimal_c = model.best_params_['C']
elif validation_method == 'midknn':
r2_midknns = []
for c in cs:
model = svm.SVR(kernel='rbf', C=c, epsilon=optimal_epsilon, gamma=optimal_gamma)
model.fit(x, y)
estimated_y_midknn = np.ndarray.flatten(model.predict(x_midknn))
r2_midknns.append(float(1 - sum((y_midknn - estimated_y_midknn) ** 2) / sum((y_midknn - y_midknn.mean()) ** 2)))
optimal_c = cs[np.where(r2_midknns == np.max(r2_midknns))[0][0]]
# Optimize gamma with cross-validation (optional)
print('4/4 ... optimization of gamma')
if validation_method == 'cv':
model = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_epsilon, C=optimal_c), {'gamma': gammas}, cv=parameter)
model.fit(x, y)
optimal_gamma = model.best_params_['gamma']
elif validation_method == 'midknn':
r2_midknns = []
for gamma in gammas:
model = svm.SVR(kernel='rbf', C=optimal_c, epsilon=optimal_epsilon, gamma=gamma)
model.fit(x, y)
estimated_y_midknn = np.ndarray.flatten(model.predict(x_midknn))
r2_midknns.append(float(1 - sum((y_midknn - estimated_y_midknn) ** 2) / sum((y_midknn - y_midknn.mean()) ** 2)))
optimal_gamma = gammas[np.where(r2_midknns == np.max(r2_midknns))[0][0]]
return optimal_c, optimal_epsilon, optimal_gamma
def maximize_variance_of_gram_matrix(x, gammas):
variance_of_gram_matrix = []
for svr_gamma in gammas:
gram_matrix = np.exp(
-svr_gamma * ((x[:, np.newaxis] - x) ** 2).sum(axis=2))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_gamma = gammas[np.where(variance_of_gram_matrix == np.max(variance_of_gram_matrix))[0][0]]
return optimal_gamma
| 41.04 | 125 | 0.62924 |
b2fe2fba146d070100d267a8377e0fe877d193f9 | 541 | py | Python | lodes_downloader/__main__.py | karstendick/lodes-downloader | 8be425050a92fd74b0d1babf912a7b6121032a0d | [
"MIT"
] | null | null | null | lodes_downloader/__main__.py | karstendick/lodes-downloader | 8be425050a92fd74b0d1babf912a7b6121032a0d | [
"MIT"
] | null | null | null | lodes_downloader/__main__.py | karstendick/lodes-downloader | 8be425050a92fd74b0d1babf912a7b6121032a0d | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import wget
# subdirectories = ['rac', 'wac', 'od']
PA_ROOT = 'https://lehd.ces.census.gov/data/lodes/LODES7/pa/wac/'
def main():
pa_root_response = requests.get(PA_ROOT)
pa_root_html = pa_root_response.text
soup = BeautifulSoup(pa_root_html, 'html.parser')
for link in soup.find_all('a'):
href = link.get('href')
if 'csv' in href:
print(PA_ROOT+href)
wget.download(PA_ROOT+href, out='data/')
if __name__ == "__main__":
main()
| 21.64 | 65 | 0.643253 |
4f85b45925a536d6f3223a9af7a9ce2306b9933e | 2,365 | py | Python | aioflo/water.py | dmulcahey/aioflo | 0c7e2befc1bc38cd4274f5e75627e4b8b516a93a | [
"MIT"
] | 9 | 2020-05-21T02:31:00.000Z | 2021-10-21T21:24:06.000Z | aioflo/water.py | dmulcahey/aioflo | 0c7e2befc1bc38cd4274f5e75627e4b8b516a93a | [
"MIT"
] | 48 | 2020-01-16T17:22:47.000Z | 2022-03-01T18:06:20.000Z | aioflo/water.py | dmulcahey/aioflo | 0c7e2befc1bc38cd4274f5e75627e4b8b516a93a | [
"MIT"
] | 1 | 2020-07-24T11:41:52.000Z | 2020-07-24T11:41:52.000Z | """Define /water endpoints."""
from datetime import datetime
from typing import Awaitable, Callable
from .const import API_V2_BASE
from .util import raise_on_invalid_argument
INTERVAL_DAILY = "1d"
INTERVAL_HOURLY = "1h"
INTERVAL_MONTHLY = "1m"
INTERVALS = set([INTERVAL_DAILY, INTERVAL_HOURLY, INTERVAL_MONTHLY])
class Water: # pylint: disable=too-few-public-methods
"""Define an object to handle the endpoints."""
def __init__(self, request: Callable[..., Awaitable]) -> None:
"""Initialize."""
self._request: Callable[..., Awaitable] = request
async def get_consumption_info(
self,
location_id: str,
start: datetime,
end: datetime,
interval: str = INTERVAL_HOURLY,
) -> dict:
"""Return user account data.
:param location_id: A Flo location UUID
:type location_id: ``str``
:param start: The start datetime of the range to examine
:type start: ``datetime.datetime``
:param end: The end datetime of the range to examine
:type end: ``datetime.datetime``
:rtype: ``dict``
"""
raise_on_invalid_argument(interval, INTERVALS)
return await self._request(
"get",
f"{API_V2_BASE}/water/consumption",
params={
"endDate": end.isoformat(),
"interval": interval,
"locationId": location_id,
"startDate": start.isoformat(),
},
)
async def get_metrics(
self,
device_mac_address: str,
start: datetime,
end: datetime,
interval: str = INTERVAL_HOURLY,
) -> dict:
"""Return user account data.
:param start: The start datetime of the range to examine
:type start: ``datetime.datetime``
:param end: The end datetime of the range to examine
:type end: ``datetime.datetime``
:rtype: ``dict``
"""
raise_on_invalid_argument(interval, INTERVALS)
return await self._request(
"get",
f"{API_V2_BASE}/water/metrics",
params={
"endDate": end.isoformat(),
"interval": interval,
"macAddress": device_mac_address.replace(":", ""),
"startDate": start.isoformat(),
},
)
| 30.320513 | 68 | 0.577167 |
b921e1bfe7718177ab6eacbf84da7a882943a933 | 23,368 | py | Python | src/c3s_sm/interface.py | pstradio/c3s_sm | 83b8c833a54e3decd47efbd980806bb3ed9f9b60 | [
"MIT"
] | null | null | null | src/c3s_sm/interface.py | pstradio/c3s_sm | 83b8c833a54e3decd47efbd980806bb3ed9f9b60 | [
"MIT"
] | 7 | 2018-11-20T13:56:39.000Z | 2020-07-03T19:18:51.000Z | src/c3s_sm/interface.py | pstradio/c3s_sm | 83b8c833a54e3decd47efbd980806bb3ed9f9b60 | [
"MIT"
] | 6 | 2018-10-19T12:20:16.000Z | 2022-02-02T08:48:43.000Z | # The MIT License (MIT)
#
# Copyright (c) 2018, TU Wien
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Readers for the C3S soil moisture products daily, dekadal (10-daily) and monthly
images as well as for timeseries generated using this module
'''
import pandas as pd
import os
import netCDF4 as nc
import numpy as np
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from netCDF4 import num2date
from smecv_grid.grid import SMECV_Grid_v052
from pygeobase.object_base import Image
from pygeobase.io_base import ImageBase
from pygeobase.io_base import MultiTemporalImageBase
from pygeogrids.netcdf import load_grid
import warnings
from netCDF4 import Dataset
from pynetcf.time_series import GriddedNcOrthoMultiTs
from datetime import datetime
from parse import parse
try:
import xarray as xr
xr_supported = True
except ImportError:
xr_supported = False
fntempl = "C3S-SOILMOISTURE-L3S-SSM{unit}-{prod}-{temp}-{datetime}-{cdr}-{vers}.{subvers}.nc"
class C3SImg(ImageBase):
"""
Class to read a single C3S image (for one time stamp)
"""
def __init__(self,
filename,
parameters=None,
mode='r',
subgrid=SMECV_Grid_v052(None),
flatten=False,
fillval=None):
"""
Parameters
----------
filename : str
Path to the file to read
parameters : str or Iterable, optional (default: 'sm')
Names of parameters in the file to read.
If None are passed, all are read.
mode : str, optional (default: 'r')
Netcdf file mode, choosing something different to r may delete data.
subgrid : SMECV_Grid_v052
A subgrid of points to read. All other GPIS are masked (2d reading)
or ignored (when flattened).
flatten: bool, optional (default: False)
If set then the data is read into 1D arrays. This is used to e.g
reshuffle the data for a subset of points.
fillval : float or dict or None, optional (default: np.nan)
Fill Value for masked pixels, if a dict is passed, this can be
set for each parameter individually, otherwise it applies to all.
Note that choosing np.nan can lead to a change in dtype for some
(int) parameters. None will use the fill value from the netcdf file
"""
self.path = os.path.dirname(filename)
self.fname = os.path.basename(filename)
super(C3SImg, self).__init__(os.path.join(self.path, self.fname), mode=mode)
if parameters is None:
parameters = []
if type(parameters) != list:
parameters = [parameters]
self.parameters = parameters
self.subgrid = subgrid # subset to read
self.grid = SMECV_Grid_v052(None) # global input image
self.flatten = flatten
self.image_missing = False
self.img = None # to be loaded
self.glob_attrs = None
if isinstance(fillval, dict):
self.fillval = fillval
for p in self.parameters:
if p not in self.fillval:
self.fillval[p] = None
else:
self.fillval ={p: fillval for p in self.parameters}
def _read_flat_img(self) -> (dict, dict, dict, datetime):
"""
Reads a single C3S image, flat with gpi0 as first element
"""
with Dataset(self.filename, mode='r') as ds:
timestamp = num2date(ds['time'], ds['time'].units,
only_use_cftime_datetimes=True,
only_use_python_datetimes=False)
assert len(timestamp) == 1, "Found more than 1 time stamps in image"
timestamp = timestamp[0]
param_img = {}
param_meta = {}
if len(self.parameters) == 0:
# all data vars, exclude coord vars
self.parameters = [k for k in ds.variables.keys()
if k not in ds.dimensions.keys()]
parameters = list(self.parameters)
for parameter in parameters:
metadata = {}
param = ds.variables[parameter]
data = param[:][0] # there is only 1 time stamp in the image
self.shape = (data.shape[0], data.shape[1])
# read long name, FillValue and unit
for attr in param.ncattrs():
metadata[attr] = param.getncattr(attr)
if parameter in self.fillval:
if self.fillval[parameter] is None:
self.fillval[parameter] = data.fill_value
common_dtype = np.find_common_type(
array_types=[data.dtype],
scalar_types=[type(self.fillval[parameter])])
self.fillval[parameter] = np.array([self.fillval[parameter]],
dtype=common_dtype)[0]
data = data.astype(common_dtype)
data = data.filled(self.fillval[parameter])
else:
self.fillval[parameter] = data.fill_value
data = data.filled()
data = np.flipud(data)
data = data.flatten()
metadata['image_missing'] = 0
param_img[parameter] = data
param_meta[parameter] = metadata
global_attrs = ds.__dict__
global_attrs['timestamp'] = str(timestamp)
return param_img, param_meta, global_attrs, timestamp
def _mask_and_reshape(self,
data: dict) -> dict:
"""
Takes the grid and drops points that are not active.
for flattened arrays that means that only the active gpis are kept.
for 2 arrays inactive gpis are set to nan.
Parameters
----------
data: dict
Variable names and flattened image data.
shape_2d : tuple
2d shape of the original image.
Returns
-------
dat : dict
Masked, reshaped data.
"""
# check if flatten. if flatten, dont crop and dont reshape
# if not flatten, reshape based on grid shape.
# select active gpis
for param, dat in data.items():
if self.flatten:
dat = dat[self.subgrid.activegpis]
else:
exclude = (~np.isin(self.grid.gpis, self.subgrid.activegpis))
dat[exclude] = self.fillval[param]
if len(self.shape) != 2:
raise ValueError(
"Reading 2d image needs grid with 2d shape"
"You can either use the global grid without subsets,"
"or make sure that you create a subgrid from bbox in"
"an area where no gpis are missing.")
dat = dat.reshape(self.shape)
data[param] = dat
return data
def read(self, timestamp=None):
"""
Read a single C3S image, if it exists, otherwise fill an empty image.
Parameters
----------
timestamp : datetime, optional (default: None)
Time stamp of the image, if this is passed, it is compared to
the time stamp from the loaded file and must match
"""
data, var_meta, glob_meta, img_timestamp = self._read_flat_img()
if timestamp is not None:
if img_timestamp is None:
img_timestamp = timestamp
assert img_timestamp == timestamp, "Time stamps do not match"
# when flattened, this drops already all non-active gpis
data = self._mask_and_reshape(data)
if self.flatten:
return Image(self.subgrid.activearrlon,
self.subgrid.activearrlat,
data,
var_meta,
timestamp)
else:
# also cut 2d case to active area
min_lat, min_lon = self.subgrid.activearrlat.min(), \
self.subgrid.activearrlon.min()
max_lat, max_lon = self.subgrid.activearrlat.max(), \
self.subgrid.activearrlon.max()
corners = self.grid.gpi2rowcol([
self.grid.find_nearest_gpi(min_lon, min_lat)[0], # llc
self.grid.find_nearest_gpi(max_lon, min_lat)[0], # lrc
self.grid.find_nearest_gpi(max_lon, max_lat)[0], # urc
])
rows = slice(corners[0][0], corners[0][2] + 1)
cols = slice(corners[1][0], corners[1][1] + 1)
return Image(self.grid.arrlon.reshape(*self.shape)[rows, cols],
np.flipud(self.grid.arrlat.reshape(*self.shape)[rows, cols]),
{k: np.flipud(v[rows, cols]) for k, v in data.items()},
var_meta,
timestamp)
def write(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def flush(self, *args, **kwargs):
pass
class C3S_Nc_Img_Stack(MultiTemporalImageBase):
"""
Class for reading multiple images and iterate over them.
"""
def __init__(self,
data_path,
parameters='sm',
subgrid=SMECV_Grid_v052(None),
flatten=False,
solve_ambiguity='sort_last',
fntempl=fntempl,
subpath_templ=None,
fillval=None):
"""
Parameters
----------
data_path : str
Path to directory where C3S images are stored
parameters : list or str, optional (default: 'sm')
Variables to read from the image files.
grid : pygeogrids.CellGrid, optional (default: SMECV_Grid_v052(None)
Subset of the image to read
array_1D : bool, optional (default: False)
Flatten the read image to a 1D array instead of a 2D array
solve_ambiguity : str, optional (default: 'latest')
Method to solve ambiguous time stamps, e.g. if a reprocessing
was performed.
- error: raises error in case of ambiguity
- sort_last (default): uses the last file when sorted by file
name, in case that multiple files are found.
- sort_first: uses the first file when sorted by file name
in case that multiple files are found.
filename_templ: str, optional
Filename template to parse datetime from.
subpath_templ : list or None, optional (default: None)
List of subdirectory names to build file paths. e.g. ['%Y'] if files
in collected by years.
fillval : float or dict or None, optional (default: None)
Fill Value for masked pixels, if a dict is passed, this can be
set for each parameter individually, otherwise it applies to all.
Note that choosing np.nan can lead to a change in dtype for some
parameters (int to float).
None will use the fill value from the netcdf file
"""
self.data_path = data_path
ioclass_kwargs = {'parameters': parameters,
'subgrid': subgrid,
'flatten': flatten,
'fillval': fillval}
self.fname_args = self._parse_filename(fntempl)
self.solve_ambiguity = solve_ambiguity
fn_args = self.fname_args.copy()
fn_args['subvers'] = '*'
fn_args['cdr'] = '*'
filename_templ = fntempl.format(**fn_args)
super(C3S_Nc_Img_Stack, self).__init__(path=data_path,
ioclass=C3SImg,
fname_templ=filename_templ ,
datetime_format="%Y%m%d%H%M%S",
subpath_templ=subpath_templ,
exact_templ=False,
ioclass_kws=ioclass_kwargs)
def _build_filename(self, timestamp:datetime, custom_templ:str=None,
str_param:dict=None):
"""
This function uses _search_files to find the correct
filename and checks if the search was unambiguous.
Parameters
----------
timestamp: datetime
datetime for given filename
custom_tmpl : string, optional
If given the fname_templ is not used but the custom_templ. This
is convenient for some datasets where not all file names follow
the same convention and where the read_image function can choose
between templates based on some condition.
str_param : dict, optional
If given then this dict will be applied to the fname_templ using
the fname_templ.format(**str_param) notation before the resulting
string is put into datetime.strftime.
"""
filename = self._search_files(timestamp, custom_templ=custom_templ,
str_param=str_param)
if len(filename) == 0:
raise IOError("No file found for {:}".format(timestamp.ctime()))
if len(filename) > 1:
filename = sorted(filename)
if self.solve_ambiguity == 'sort_last':
warnings.warn(f'Ambiguous file for {str(timestamp)} found.'
f' Sort and use last: {filename[-1]}, skipped {filename[:-1]}')
filename = [filename[-1]]
elif self.solve_ambiguity == 'sort_first':
warnings.warn(f'Ambiguous file for {str(timestamp)} found.'
f' Sort and use first: {filename[0]}')
filename = [filename[0]]
else:
raise IOError(
"File search is ambiguous {:}".format(filename))
return filename[0]
def _parse_filename(self, template):
"""
Search a file in the passed directory and use the filename template to
to read settings.
Parameters
-------
template : str
Template for all files in the passed directory.
Returns
-------
parse_result : parse.Result
Parsed content of filename string from filename template.
"""
for curr, subdirs, files in os.walk(self.data_path):
for f in files:
file_args = parse(template, f)
if file_args is None:
continue
else:
file_args = file_args.named
file_args['datetime'] = '{datetime}'
return file_args
raise IOError('No file name in passed directory fits to template')
def tstamps_for_daterange(self, start_date, end_date):
"""
Return dates in the passed period, with respect to the temp resolution
of the images in the path.
Parameters
----------
start_date: datetime
start of date range
end_date: datetime
end of date range
Returns
-------
timestamps : list
list of datetime objects of each available image between
start_date and end_date
"""
if self.fname_args['temp'] == 'MONTHLY':
next = lambda date : date + relativedelta(months=1)
elif self.fname_args['temp'] == 'DAILY':
next = lambda date : date + relativedelta(days=1)
elif self.fname_args['temp'] == 'DEKADAL':
next = lambda date : date + relativedelta(days=10)
else:
raise NotImplementedError
timestamps = [start_date]
while next(timestamps[-1]) <= end_date:
timestamps.append(next(timestamps[-1]))
return timestamps
def read(self, timestamp, **kwargs):
"""
Return an image for a specific timestamp.
Parameters
----------
timestamp : datetime.datetime
Time stamp.
Returns
-------
image : object
pygeobase.object_base.Image object
"""
try:
img = self._assemble_img(timestamp, **kwargs)
return img
except IOError:
warnings.warn(f'Could not load image for {timestamp}.')
raise IOError
class C3STs(GriddedNcOrthoMultiTs):
"""
Module for reading C3S time series in netcdf format.
"""
def __init__(self, ts_path, grid_path=None, remove_nans=False, drop_tz=True,
**kwargs):
"""
Class for reading C3S SM time series after reshuffling.
Parameters
----------
ts_path : str
Directory where the netcdf time series files are stored
grid_path : str, optional (default: None)
Path to grid file, that is used to organize the location of time
series to read. If None is passed, grid.nc is searched for in the
ts_path.
remove_nans : bool or dict, optional (default: False)
Replace fill values in SM time series. Either
- dict of form {parameter: {val_to_replace: replacement_val}, ... }
- dict of form {parameter : val_to_set_NaN ...}
- True to replace -9999 with nan anywhere
- False to do nothing
drop_tz: bool, optional (default: True)
Drop time zone information from time series
Optional keyword arguments that are passed to the Gridded Base:
------------------------------------------------------------------------
parameters : list, optional (default: None)
Specific variable names to read, if None are selected, all are read.
offsets : dict, optional (default:None)
Offsets (values) that are added to the parameters (keys)
scale_factors : dict, optional (default:None)
Offset (value) that the parameters (key) is multiplied with
ioclass_kws: dict
Optional keyword arguments to pass to OrthoMultiTs class:
----------------------------------------------------------------
read_bulk : boolean, optional (default:False)
if set to True the data of all locations is read into memory,
and subsequent calls to read_ts read from the cache and not from disk
this makes reading complete files faster#
read_dates : boolean, optional (default:False)
if false dates will not be read automatically but only on specific
request useable for bulk reading because currently the netCDF
num2date routine is very slow for big datasets
"""
if isinstance(remove_nans, dict):
for var, is_should in remove_nans.copy().items():
if not isinstance(is_should, dict):
remove_nans[var] = {is_should: np.nan}
self.remove_nans = remove_nans
if grid_path is None:
grid_path = os.path.join(ts_path, "grid.nc")
grid = load_grid(grid_path)
self.drop_tz = drop_tz
super(C3STs, self).__init__(ts_path, grid=grid, **kwargs)
def _read_gp(self, gpi, **kwargs):
"""Read a single point from passed gpi or from passed lon, lat """
# override the _read_gp function from parent class, to add dropna functionality
ts = super(C3STs, self)._read_gp(gpi, **kwargs)
if ts is None:
return None
if self.remove_nans:
if self.remove_nans == True:
ts = ts.replace(-9999.0000, np.nan)
else:
ts = ts.replace(self.remove_nans)
if not self.drop_tz:
ts.index = ts.index.tz_localize('UTC')
else:
if (hasattr(ts.index, 'tz') and (ts.index.tz is not None)):
ts.index = ts.index.tz_convert(None)
return ts
def read_cell(self, cell, var='sm') -> pd.DataFrame:
"""
Read all time series for a single variable in the selected cell.
Parameters
-------
cell: int
Cell number as in the c3s grid
var : str, optional (default: 'sm')
Name of the variable to read.
"""
file_path = os.path.join(self.path, '{}.nc'.format("%04d" % (cell,)))
with nc.Dataset(file_path) as ncfile:
loc_id = ncfile.variables['location_id'][:]
time = ncfile.variables['time'][:]
unit_time = ncfile.variables['time'].units
delta = lambda t: timedelta(t)
vfunc = np.vectorize(delta)
since = pd.Timestamp(unit_time.split('since ')[1])
time = since + vfunc(time)
variable = ncfile.variables[var][:]
variable = np.transpose(variable)
data = pd.DataFrame(variable, columns=loc_id, index=time)
if self.remove_nans:
if self.remove_nans == True:
data = data.replace(-9999, np.nan)
else:
data = data.replace(self.remove_nans)
return data
def iter_ts(self, **kwargs):
pass
def write_ts(self, *args, **kwargs):
pass
if __name__ == '__main__':
root = r"R:\Projects\C3S_312b\08_scratch\v202012_ts2img\060_daily_images\passive"
old_template = r"C3S-SOILMOISTURE-L3S-SSMV-PASSIVE-{dt}000000-fv202012.nc"
new_tempate = "C3S-SOILMOISTURE-L3S-SSMV-PASSIVE-DAILY-{dt}000000-TCDR-v202012.0.0.nc"
for year in os.listdir(root):
for f in os.listdir(os.path.join(root, year)):
dt = parse(old_template, f)['dt']
os.rename(os.path.join(root, year, f),
os.path.join(root, year, new_tempate.format(dt=dt)))
| 38.561056 | 93 | 0.563848 |
21edbb7663115c4110d8325ea4a7afa6b96c5732 | 5,056 | py | Python | modules/sr/robot/vision/vectors.py | 13ros27/competition-simulator | 4dfea0e92c12fa9e9656ce273db3d240aee34bc4 | [
"MIT"
] | 4 | 2020-06-12T18:00:45.000Z | 2021-02-17T14:16:59.000Z | modules/sr/robot/vision/vectors.py | 13ros27/competition-simulator | 4dfea0e92c12fa9e9656ce273db3d240aee34bc4 | [
"MIT"
] | 158 | 2020-04-29T17:59:31.000Z | 2021-11-13T00:00:21.000Z | modules/sr/robot/vision/vectors.py | 13ros27/competition-simulator | 4dfea0e92c12fa9e9656ce273db3d240aee34bc4 | [
"MIT"
] | 10 | 2020-06-12T16:19:46.000Z | 2021-07-07T21:06:48.000Z | """
Vector utilities.
"""
import math
from typing import Union, Iterable, overload
# between vectors considered the same
DEGREES_TOLERANCE = 10
class Vector:
"""
An arbitrary length vector of floating point values.
In addition to the usual Python niceties, this supports scalar
multiplication & division, vector addition and vector multiplication (dot
product).
"""
def __init__(self, data: Iterable[float]) -> None:
self.data = tuple(data)
def magnitude(self) -> float:
return math.sqrt(sum(x ** 2 for x in self.data))
def __eq__(self, other: object) -> bool:
if not isinstance(other, Vector):
return NotImplemented
return self.data == other.data
def __hash__(self) -> int:
return hash(self.data)
def __repr__(self) -> str:
return 'Vector({!r})'.format(self.data)
def __len__(self) -> int:
return len(self.data)
def __round__(self, precision: int) -> 'Vector':
return Vector(round(x, precision) for x in self.data)
def __neg__(self) -> 'Vector':
return self * -1
def __add__(self, other: 'Vector') -> 'Vector':
if not isinstance(other, Vector):
return NotImplemented # type: ignore[unreachable]
if len(self) != len(other):
raise ValueError("Dimension mismatch: cannot add {} to {}".format(
len(self),
len(other),
))
return Vector(x + y for x, y in zip(self.data, other.data))
def __sub__(self, other: 'Vector') -> 'Vector':
if not isinstance(other, Vector):
return NotImplemented # type: ignore[unreachable]
return self.__add__(-other)
@overload
def __mul__(self, other: float) -> 'Vector':
"""
Multiply vector by scalar.
"""
...
@overload
def __mul__(self, other: 'Vector') -> float:
"""
Dot product between two vectors of equal length.
Given vectors A and B, ``A · B == ||A|| ||B|| cos(theta)`` where
theta is the angle between them.
"""
...
def __mul__(self, value: 'Union[Vector, float]') -> 'Union[Vector, float]':
if isinstance(value, (float, int)):
return Vector(value * x for x in self.data)
if not isinstance(value, Vector):
return NotImplemented # type: ignore[unreachable]
if len(self) != len(value):
raise ValueError("Dimension mismatch: cannot multiply {} by {}".format(
len(self),
len(value),
))
return sum(x * y for x, y in zip(self.data, value.data))
__rmul__ = __mul__
def __truediv__(self, other: float) -> 'Vector':
if not isinstance(other, (float, int)):
return NotImplemented # type: ignore[unreachable]
return Vector(x / other for x in self.data)
def cross_product(vec_a: Vector, vec_b: Vector) -> Vector:
"""
Cross product of two 3-vectors.
Given vectors A and B, ``A × B == ||A|| ||B|| sin(theta)`` where
theta is the angle between them.
"""
a_x, a_y, a_z = vec_a.data
b_x, b_y, b_z = vec_b.data
return Vector((
(a_y * b_z) - (a_z * b_y),
(a_z * b_x) - (a_x * b_z),
(a_x * b_y) - (a_y * b_x),
))
def dot_product(vec_a: Vector, vec_b: Vector) -> float:
"""
Dot product between two vectors of equal size.
Given vectors A and B, ``A · B == ||A|| ||B|| cos(theta)`` where
theta is the angle between them.
"""
return vec_a * vec_b
ZERO_3VECTOR = Vector((0, 0, 0))
def angle_between(vec_a: Vector, vec_b: Vector) -> float:
"""
Determine the angle between two vectors, in degrees.
This is calculated using the definition of the dot product and
knowing the size of the vectors.
"""
if len(vec_a) != 3 or len(vec_b) != 3:
raise ValueError(
"Can only find angle between three-dimensional vectors, not {!r} and {!r}".format(
vec_a,
vec_b,
),
)
if ZERO_3VECTOR in (vec_a, vec_b):
raise ValueError("Cannot find the angle between an empty vector and another")
dp = dot_product(vec_a, vec_b)
mod_ab = vec_a.magnitude() * vec_b.magnitude()
cos_theta = dp / mod_ab
if abs(cos_theta) > 1:
# Round small floating point rounding errors to avoid a math domain
# error from math.acos, without masking genuine errors.
cos_theta = round(cos_theta, 15)
theta_rads = math.acos(cos_theta)
theta_degrees = math.degrees(theta_rads)
return theta_degrees
def are_same_direction(vec_a: Vector, vec_b: Vector) -> bool:
if ZERO_3VECTOR in (vec_a, vec_b):
return False
theta = angle_between(vec_a, vec_b)
return theta < DEGREES_TOLERANCE
def unit_vector(direction_vector: Vector) -> Vector:
magnitude = direction_vector.magnitude()
if not magnitude:
return direction_vector
return direction_vector / magnitude
| 27.478261 | 94 | 0.598299 |
a38abf90f03f41677b395b672f25055bbeb7ffd5 | 8,783 | py | Python | ci/infra/testrunner/platforms/terraform.py | mjura/skuba | 8794e47265056e980f884846187aae1ec8455b5b | [
"Apache-2.0"
] | null | null | null | ci/infra/testrunner/platforms/terraform.py | mjura/skuba | 8794e47265056e980f884846187aae1ec8455b5b | [
"Apache-2.0"
] | null | null | null | ci/infra/testrunner/platforms/terraform.py | mjura/skuba | 8794e47265056e980f884846187aae1ec8455b5b | [
"Apache-2.0"
] | null | null | null | import hcl
import json
import os
import subprocess
from timeout_decorator import timeout
from utils import (Format, step, Utils)
class Terraform:
def __init__(self, conf, platform):
self.conf = conf
self.utils = Utils(conf)
self.tfdir = os.path.join(self.conf.terraform.tfdir, platform)
self.tfjson_path = os.path.join(conf.workspace, "tfout.json")
self.state = None
def _env_setup_cmd(self):
"""Returns the command for setting up the platform environment"""
return ""
def _cleanup_platform(self):
"""Platform specific cleanup. Expected to be overridden by platforms"""
def _get_platform_logs(self):
"""Platform specific logs to collect. Expected to be overridden by platforms"""
return False
def cleanup(self):
""" Clean up """
cleanup_failure = False
try:
self._cleanup_platform()
except Exception as ex:
cleanup_failure = True
print(Format.alert("Received the following error {}".format(ex)))
print("Attempting to finish cleanup")
dirs = [os.path.join(self.conf.workspace, "tfout"),
self.tfjson_path]
for tmp_dir in dirs:
try:
self.utils.runshellcommand("rm -rf {}".format(tmp_dir))
except Exception as ex:
cleanup_failure = True
print("Received the following error {}".format(ex))
print("Attempting to finish cleanup")
if cleanup_failure:
raise Exception(Format.alert("Failure(s) during cleanup"))
@timeout(600)
@step
def gather_logs(self):
logging_errors = False
node_ips = {"master": self.get_nodes_ipaddrs("master"),
"worker": self.get_nodes_ipaddrs("worker")}
logs = {"files": ["/var/run/cloud-init/status.json",
"/var/log/cloud-init-output.log",
"/var/log/cloud-init.log"],
"dirs": ["/var/log/pods"],
"services": ["kubelet"]}
if not os.path.isdir(self.conf.log_dir):
os.mkdir(self.conf.log_dir)
print(f"Created log dir {self.conf.log_dir}")
for node_type in node_ips:
for ip_address in node_ips[node_type]:
node_log_dir = self._create_node_log_dir(ip_address, node_type, self.conf.log_dir)
logging_error = self.utils.collect_remote_logs(ip_address, logs, node_log_dir)
if logging_error:
logging_errors = logging_error
platform_log_error = self._get_platform_logs()
if platform_log_error:
logging_errors = platform_log_error
return logging_errors
@step
def provision(self, num_master=-1, num_worker=-1):
""" Create and apply terraform plan"""
if num_master > -1 or num_worker > -1:
print("Overriding number of nodes")
if num_master > -1:
self.conf.master.count = num_master
print(" Masters:{} ".format(num_master))
if num_worker > -1:
self.conf.worker.count = num_worker
print(" Workers:{} ".format(num_worker))
print("Init terraform")
self._check_tf_deployed()
self.utils.setup_ssh()
init_cmd = "terraform init"
if self.conf.terraform.plugin_dir:
print("Installing plugins from {}".format(self.conf.terraform.plugin_dir))
init_cmd = init_cmd+" -plugin-dir="+self.conf.terraform.plugin_dir
self._runshellcommandterraform(init_cmd)
self._runshellcommandterraform("terraform version")
self._generate_tfvars_file()
plan_cmd = ("{env_setup};"
" terraform plan "
" -out {workspace}/tfout".format(
env_setup=self._env_setup_cmd(),
workspace=self.conf.workspace))
apply_cmd = ("{env_setup};"
"terraform apply -auto-approve {workspace}/tfout".format(
env_setup=self._env_setup_cmd(),
workspace=self.conf.workspace))
# TODO: define the number of retries as a configuration parameter
for retry in range(1, 5):
print(Format.alert("Run terraform plan - execution # {}".format(retry)))
self._runshellcommandterraform(plan_cmd)
print(Format.alert("Run terraform apply - execution # {}".format(retry)))
try:
self._runshellcommandterraform(apply_cmd)
break
except Exception:
print("Failed terraform apply n. %d" % retry)
if retry == 4:
print(Format.alert("Failed Openstack Terraform deployment"))
raise
finally:
self._fetch_terraform_output()
@staticmethod
def _create_node_log_dir(ip_address, node_type, log_dir_path):
node_log_dir_path = os.path.join(log_dir_path, f"{node_type}_{ip_address.replace('.', '_')}")
if not os.path.isdir(node_log_dir_path):
os.mkdir(node_log_dir_path)
print(f"Created log dir {node_log_dir_path}")
return node_log_dir_path
def _load_tfstate(self):
if self.state is None:
fn = os.path.join(self.tfdir, "terraform.tfstate")
print("Reading {}".format(fn))
with open(fn) as f:
self.state = json.load(f)
def get_lb_ipaddr(self):
self._load_tfstate()
return self.state["modules"][0]["outputs"]["ip_load_balancer"]["value"]
def get_nodes_ipaddrs(self, role):
self._load_tfstate()
if role not in ("master", "worker"):
raise ValueError("Invalid role: {}".format(role))
role_key = "ip_"+role+"s"
return self.state["modules"][0]["outputs"][role_key]["value"]
@step
def _fetch_terraform_output(self):
cmd = ("{env_setup};"
"terraform output -json >"
"{json_f}".format(
env_setup=self._env_setup_cmd(),
json_f=self.tfjson_path))
self._runshellcommandterraform(cmd)
def _generate_tfvars_file(self):
"""Generate terraform tfvars file"""
tfvars_template = os.path.join(self.tfdir, self.conf.terraform.tfvars)
tfvars_final = os.path.join(self.tfdir, "terraform.tfvars.json")
with open(tfvars_template) as f:
if '.json' in os.path.basename(tfvars_template).lower():
tfvars = json.load(f)
else:
tfvars = hcl.load(f)
self._update_tfvars(tfvars)
with open(tfvars_final, "w") as f:
json.dump(tfvars, f)
def _update_tfvars(self, tfvars):
new_vars = {
"internal_net": self.conf.terraform.internal_net,
"stack_name": self.conf.terraform.stack_name,
"username": self.conf.nodeuser,
"masters": self.conf.master.count,
"workers": self.conf.worker.count,
"authorized_keys": [self.utils.authorized_keys()]
}
for k, v in new_vars.items():
if tfvars.get(k) is not None:
if isinstance(v, list):
tfvars[k] = tfvars[k] + v
elif isinstance(v, dict):
tfvars[k].update(v)
else:
tfvars[k] = v
# Update mirror urls
repos = tfvars.get("repositories")
if self.conf.terraform.mirror and repos is not None:
for name, url in repos.items():
tfvars["repositories"][name] = url.replace("download.suse.de", self.conf.terraform.mirror)
def _runshellcommandterraform(self, cmd, env={}):
"""Running terraform command in {terraform.tfdir}/{platform}"""
cwd = self.tfdir
# Terraform needs PATH and SSH_AUTH_SOCK
sock_fn = self.utils.ssh_sock_fn()
env["SSH_AUTH_SOCK"] = sock_fn
env["PATH"] = os.environ['PATH']
print(Format.alert("$ {} > {}".format(cwd, cmd)))
subprocess.check_call(cmd, cwd=cwd, shell=True, env=env)
def _check_tf_deployed(self):
if os.path.exists(self.tfjson_path):
raise Exception(Format.alert("tf file found. Please run cleanup and try again{}"))
# TODO: this function is currently not used. Identify points where it should
# be invoked
def _verify_tf_dependency(self):
if not os.path.exists(self.tfjson_path):
raise Exception(Format.alert("tf file not found. Please run terraform and try again{}"))
| 36.595833 | 106 | 0.580439 |
6ae2bfe9690305fe8239169ce4e1cac0913037a7 | 3,215 | py | Python | testaskjunoace.py | jpvelsamy/hotdog | df45cdc0b9e6abfecd16a43f75f1671e51cbc47c | [
"Apache-2.0"
] | null | null | null | testaskjunoace.py | jpvelsamy/hotdog | df45cdc0b9e6abfecd16a43f75f1671e51cbc47c | [
"Apache-2.0"
] | null | null | null | testaskjunoace.py | jpvelsamy/hotdog | df45cdc0b9e6abfecd16a43f75f1671e51cbc47c | [
"Apache-2.0"
] | null | null | null | import logging
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
logger = logging.getLogger("ACE")
class TestAskJunoACE:
def __init__(self):
self.k_fold_count = 4
self.num_epochs = 500
self.all_mae_histories = []
def fit_1(self, file_name):
names = ["reach", "impressions", "results", "amount", "frequency", "clicks", "cpc", "ctr", "cpreach", "cpm",
"engagement", "cpr"]
data = pd.read_csv(file_name, engine='c', dtype='float64', names=names, header=0, skiprows=0)
mean = data.mean(axis=0)
data -= mean
std = data.std(axis=0)
data /= std
x = data.iloc[:, 0:10]
y = data.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
model = keras.Sequential([
layers.Dense(64, activation="relu", input_shape=(x_train.shape[1],)),
layers.Dense(64, activation="relu"),
layers.Dense(1)
])
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
model.fit(x_train, y_train, epochs=130, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(x_test, y_test)
logger.info(f'mse score #{test_mse_score}, mae score #{test_mae_score}')
#https://stackoverflow.com/questions/40729162/merging-results-from-model-predict-with-original-pandas-dataframe
y_hats = model.predict(x_test)
y_test['preds'] = y_hats
df_out = pd.merge(data, y_test[['preds']], how='left', left_index=True, right_index=True)
df_out.to_csv('/home/jpvel/Desktop/outcome.csv', float_format='%.2f')
def fit_2(self, file_name):
names = ["reach", "impressions", "results", "amount", "frequency", "clicks", "cpc", "ctr", "cpreach", "cpm",
"engagement", "cpr"]
data = pd.read_csv(file_name, engine='c', dtype='float64', names=names, header=0, skiprows=0)
mean = data.mean(axis=0)
data -= mean
std = data.std(axis=0)
data /= std
x = data.iloc[:, 0:10]
y = data.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
model = keras.Sequential([
layers.Dense(64, activation="relu", input_shape=(x_train.shape[1],)),
layers.Dense(64, activation="relu"),
layers.Dense(1)
])
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
model.fit(x_train, y_train, epochs=130, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(x_test, y_test)
logger.info(f'mse score #{test_mse_score}, mae score #{test_mae_score}')
#https://stackoverflow.com/questions/40729162/merging-results-from-model-predict-with-original-pandas-dataframe
outcome = model.predict(x_test)
y_test['preds'] = outcome
df_out = pd.merge(data, y_test, how='left', left_index=True, right_index=True)
logger.info(df_out.head(10))
df_out.to_csv('/home/jpvel/Desktop/outcome2.csv', float_format='%.2f') | 44.652778 | 119 | 0.625505 |
5bc5504fb354eddcdf7d5fb926fdfa7bb14e61a1 | 3,207 | py | Python | lehmer.py | mateuszchudyk/lehmer | 89b90d63afb3f7fef5525988f3982315288dfc0c | [
"MIT"
] | null | null | null | lehmer.py | mateuszchudyk/lehmer | 89b90d63afb3f7fef5525988f3982315288dfc0c | [
"MIT"
] | null | null | null | lehmer.py | mateuszchudyk/lehmer | 89b90d63afb3f7fef5525988f3982315288dfc0c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Implementation of Lehmer Code.
Permutation - an array of integers. For the given interval, the array has to contains all numbers
for the interval and each of the number has to appear only once.
GitHub: https://github.com/mateuszchudyk/lehmer
"""
__author__ = "Mateusz Chudyk"
__license__ = "MIT"
def static_var(varname, value):
def decorate(func):
setattr(func, varname, value)
return func
return decorate
@static_var("lut", [1])
def factorial(n):
while n >= len(factorial.lut):
factorial.lut.append(factorial.lut[-1] * len(factorial.lut))
return factorial.lut[n]
def encode(permutation):
"""Return Lehmer Code of the given permutation.
"""
def permutation_is_valid(permutation):
if not permutation:
return False
minimum = min(permutation)
maximum = max(permutation)
used = [0] * (maximum - minimum + 1)
for i in permutation:
used[i - minimum] += 1
if min(used) == 1 and max(used) == 1:
return True
else:
return False
def count_lesser(i, permutation):
return sum(it < permutation[i] for it in permutation[i + 1:])
def parial_result(i, permutation):
return count_lesser(i, permutation) * factorial(len(permutation) - 1 - i)
if not permutation_is_valid(permutation):
return False
return sum(parial_result(i, permutation) for i in range(0, len(permutation)))
def decode(length, lehmer):
"""Return permutation for the given Lehmer Code and permutation length. Result permutation contains
number from 0 to length-1.
"""
result = [(lehmer % factorial(length - i)) // factorial(length - 1 - i) for i in range(length)]
used = [False] * length
for i in range(length):
counter = 0
for j in range(length):
if not used[j]:
counter += 1
if counter == result[i] + 1:
result[i] = j
used[j] = True
break
return result
def test():
assert factorial(0) == 1
assert factorial(1) == 1
assert factorial(2) == 2
assert factorial(3) == 6
assert factorial(4) == 24
assert encode([]) == 0
assert encode([0]) == 0
assert encode([0, 1]) == 0
assert encode([1, 0]) == 1
assert encode([0, 1, 2, 3]) == 0
assert encode([3, 1, 0, 2]) == 20
assert encode([3, 2, 1, 0]) == 23
assert decode(1, 0) == [0]
assert decode(2, 1) == [1, 0]
assert decode(3, 5) == [2, 1, 0]
assert decode(4, 0) == [0, 1, 2, 3]
assert decode(4, 20) == [3, 1, 0, 2]
assert decode(4, 23) == [3, 2, 1, 0]
assert decode(5, 119) == [4, 3, 2, 1, 0]
assert decode(6, 719) == [5, 4, 3, 2, 1, 0]
assert decode(7, 5039) == [6, 5, 4, 3, 2, 1, 0]
assert decode(8, 40319) == [7, 6, 5, 4, 3, 2, 1, 0]
assert decode(9, 362879) == [8, 7, 6, 5, 4, 3, 2, 1, 0]
assert decode(10, 3628799) == [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
assert decode(11, 39916799) == [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
assert decode(12, 479001599) == [11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
if __name__ == "__main__":
test()
| 30.542857 | 103 | 0.569379 |
48719d9d0909166a536f93dfda26f85c6df73d8a | 559 | py | Python | packs/mistral/actions/get_results/get_task_results.py | jonico/st2contrib | 149c9c553f24360d91a14fef7ea6146707de75fd | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | packs/mistral/actions/get_results/get_task_results.py | jonico/st2contrib | 149c9c553f24360d91a14fef7ea6146707de75fd | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | packs/mistral/actions/get_results/get_task_results.py | jonico/st2contrib | 149c9c553f24360d91a14fef7ea6146707de75fd | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | import requests
from st2actions.runners.pythonrunner import Action
class TaskResults(Action):
TASKS_BASE_URL = None
def run(self, task_id):
resp = requests.get(self._get_tasks_url(task_id))
return resp.json()
def _get_tasks_url(self, task_id):
if not TaskResults.TASKS_BASE_URL:
host = self.config['host']
api_version = self.config['api_version']
url = host + api_version + '/tasks/'
TaskResults.TASKS_BASE_URL = url
return TaskResults.TASKS_BASE_URL + task_id
| 26.619048 | 57 | 0.65653 |
3203eff6b673aa71a082ce6922bd9ba5f13f42ba | 15,272 | py | Python | uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/egtps5s8sgwglobals/egtps5s8sgwglobals.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/egtps5s8sgwglobals/egtps5s8sgwglobals.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/egtps5s8sgwglobals/egtps5s8sgwglobals.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class EgtpS5S8SgwGlobals(Base):
"""
The EgtpS5S8SgwGlobals class encapsulates a list of egtpS5S8SgwGlobals resources that are managed by the user.
A list of resources can be retrieved from the server using the EgtpS5S8SgwGlobals.find() method.
The list can be managed by using the EgtpS5S8SgwGlobals.add() and EgtpS5S8SgwGlobals.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'egtpS5S8SgwGlobals'
_SDM_ATT_MAP = {
'EnableDynamicQosCtrl': 'enableDynamicQosCtrl',
'EnableGatewayArp': 'enableGatewayArp',
'EnablePartialNegotiation': 'enablePartialNegotiation',
'GatewayArpRequestRate': 'gatewayArpRequestRate',
'MaxMbrUAndD': 'maxMbrUAndD',
'MaxOutstandingGatewayArpRequests': 'maxOutstandingGatewayArpRequests',
'MaxOutstandingReleases': 'maxOutstandingReleases',
'MaxOutstandingRequests': 'maxOutstandingRequests',
'ObjectId': 'objectId',
'SendOneArpFromEachInterface': 'sendOneArpFromEachInterface',
'SetupRateInitial': 'setupRateInitial',
'TeardownRateInitial': 'teardownRateInitial',
'TsSpec': 'tsSpec',
'UseMaxRatesForDcp': 'useMaxRatesForDcp',
}
def __init__(self, parent):
super(EgtpS5S8SgwGlobals, self).__init__(parent)
@property
def GlobalEgtpApnS5S8(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.egtps5s8sgwglobals.globalegtpapns5s8.globalegtpapns5s8.GlobalEgtpApnS5S8): An instance of the GlobalEgtpApnS5S8 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.egtps5s8sgwglobals.globalegtpapns5s8.globalegtpapns5s8 import GlobalEgtpApnS5S8
return GlobalEgtpApnS5S8(self)
@property
def GlobalTrafficProfileS5S8(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.egtps5s8sgwglobals.globaltrafficprofiles5s8.globaltrafficprofiles5s8.GlobalTrafficProfileS5S8): An instance of the GlobalTrafficProfileS5S8 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.egtps5s8sgwglobals.globaltrafficprofiles5s8.globaltrafficprofiles5s8 import GlobalTrafficProfileS5S8
return GlobalTrafficProfileS5S8(self)
@property
def EnableDynamicQosCtrl(self):
"""
Returns
-------
- bool: Enable Dynamic QoS Enforcement
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableDynamicQosCtrl'])
@EnableDynamicQosCtrl.setter
def EnableDynamicQosCtrl(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableDynamicQosCtrl'], value)
@property
def EnableGatewayArp(self):
"""
Returns
-------
- bool: When enabled, every IP address will ARP the specified gateway.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableGatewayArp'])
@EnableGatewayArp.setter
def EnableGatewayArp(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableGatewayArp'], value)
@property
def EnablePartialNegotiation(self):
"""
Returns
-------
- bool:
"""
return self._get_attribute(self._SDM_ATT_MAP['EnablePartialNegotiation'])
@EnablePartialNegotiation.setter
def EnablePartialNegotiation(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnablePartialNegotiation'], value)
@property
def GatewayArpRequestRate(self):
"""
Returns
-------
- number: Maximum ARP request rate
"""
return self._get_attribute(self._SDM_ATT_MAP['GatewayArpRequestRate'])
@GatewayArpRequestRate.setter
def GatewayArpRequestRate(self, value):
self._set_attribute(self._SDM_ATT_MAP['GatewayArpRequestRate'], value)
@property
def MaxMbrUAndD(self):
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxMbrUAndD'])
@MaxMbrUAndD.setter
def MaxMbrUAndD(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxMbrUAndD'], value)
@property
def MaxOutstandingGatewayArpRequests(self):
"""
Returns
-------
- number: Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstandingGatewayArpRequests'])
@MaxOutstandingGatewayArpRequests.setter
def MaxOutstandingGatewayArpRequests(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxOutstandingGatewayArpRequests'], value)
@property
def MaxOutstandingReleases(self):
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstandingReleases'])
@MaxOutstandingReleases.setter
def MaxOutstandingReleases(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxOutstandingReleases'], value)
@property
def MaxOutstandingRequests(self):
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxOutstandingRequests'])
@MaxOutstandingRequests.setter
def MaxOutstandingRequests(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxOutstandingRequests'], value)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def SendOneArpFromEachInterface(self):
"""
Returns
-------
- bool: When set, each interface will send one ARP request.
"""
return self._get_attribute(self._SDM_ATT_MAP['SendOneArpFromEachInterface'])
@SendOneArpFromEachInterface.setter
def SendOneArpFromEachInterface(self, value):
self._set_attribute(self._SDM_ATT_MAP['SendOneArpFromEachInterface'], value)
@property
def SetupRateInitial(self):
"""
Returns
-------
- number: Initial setup rate
"""
return self._get_attribute(self._SDM_ATT_MAP['SetupRateInitial'])
@SetupRateInitial.setter
def SetupRateInitial(self, value):
self._set_attribute(self._SDM_ATT_MAP['SetupRateInitial'], value)
@property
def TeardownRateInitial(self):
"""
Returns
-------
- number: Initial teardown rate
"""
return self._get_attribute(self._SDM_ATT_MAP['TeardownRateInitial'])
@TeardownRateInitial.setter
def TeardownRateInitial(self, value):
self._set_attribute(self._SDM_ATT_MAP['TeardownRateInitial'], value)
@property
def TsSpec(self):
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['TsSpec'])
@TsSpec.setter
def TsSpec(self, value):
self._set_attribute(self._SDM_ATT_MAP['TsSpec'], value)
@property
def UseMaxRatesForDcp(self):
"""
Returns
-------
- bool: Use default rates (DCP mode)
"""
return self._get_attribute(self._SDM_ATT_MAP['UseMaxRatesForDcp'])
@UseMaxRatesForDcp.setter
def UseMaxRatesForDcp(self, value):
self._set_attribute(self._SDM_ATT_MAP['UseMaxRatesForDcp'], value)
def update(self, EnableDynamicQosCtrl=None, EnableGatewayArp=None, EnablePartialNegotiation=None, GatewayArpRequestRate=None, MaxMbrUAndD=None, MaxOutstandingGatewayArpRequests=None, MaxOutstandingReleases=None, MaxOutstandingRequests=None, SendOneArpFromEachInterface=None, SetupRateInitial=None, TeardownRateInitial=None, TsSpec=None, UseMaxRatesForDcp=None):
"""Updates egtpS5S8SgwGlobals resource on the server.
Args
----
- EnableDynamicQosCtrl (bool): Enable Dynamic QoS Enforcement
- EnableGatewayArp (bool): When enabled, every IP address will ARP the specified gateway.
- EnablePartialNegotiation (bool):
- GatewayArpRequestRate (number): Maximum ARP request rate
- MaxMbrUAndD (number):
- MaxOutstandingGatewayArpRequests (number): Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
- MaxOutstandingReleases (number):
- MaxOutstandingRequests (number):
- SendOneArpFromEachInterface (bool): When set, each interface will send one ARP request.
- SetupRateInitial (number): Initial setup rate
- TeardownRateInitial (number): Initial teardown rate
- TsSpec (str):
- UseMaxRatesForDcp (bool): Use default rates (DCP mode)
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EnableDynamicQosCtrl=None, EnableGatewayArp=None, EnablePartialNegotiation=None, GatewayArpRequestRate=None, MaxMbrUAndD=None, MaxOutstandingGatewayArpRequests=None, MaxOutstandingReleases=None, MaxOutstandingRequests=None, SendOneArpFromEachInterface=None, SetupRateInitial=None, TeardownRateInitial=None, TsSpec=None, UseMaxRatesForDcp=None):
"""Adds a new egtpS5S8SgwGlobals resource on the server and adds it to the container.
Args
----
- EnableDynamicQosCtrl (bool): Enable Dynamic QoS Enforcement
- EnableGatewayArp (bool): When enabled, every IP address will ARP the specified gateway.
- EnablePartialNegotiation (bool):
- GatewayArpRequestRate (number): Maximum ARP request rate
- MaxMbrUAndD (number):
- MaxOutstandingGatewayArpRequests (number): Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
- MaxOutstandingReleases (number):
- MaxOutstandingRequests (number):
- SendOneArpFromEachInterface (bool): When set, each interface will send one ARP request.
- SetupRateInitial (number): Initial setup rate
- TeardownRateInitial (number): Initial teardown rate
- TsSpec (str):
- UseMaxRatesForDcp (bool): Use default rates (DCP mode)
Returns
-------
- self: This instance with all currently retrieved egtpS5S8SgwGlobals resources using find and the newly added egtpS5S8SgwGlobals resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained egtpS5S8SgwGlobals resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, EnableDynamicQosCtrl=None, EnableGatewayArp=None, EnablePartialNegotiation=None, GatewayArpRequestRate=None, MaxMbrUAndD=None, MaxOutstandingGatewayArpRequests=None, MaxOutstandingReleases=None, MaxOutstandingRequests=None, ObjectId=None, SendOneArpFromEachInterface=None, SetupRateInitial=None, TeardownRateInitial=None, TsSpec=None, UseMaxRatesForDcp=None):
"""Finds and retrieves egtpS5S8SgwGlobals resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve egtpS5S8SgwGlobals resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all egtpS5S8SgwGlobals resources from the server.
Args
----
- EnableDynamicQosCtrl (bool): Enable Dynamic QoS Enforcement
- EnableGatewayArp (bool): When enabled, every IP address will ARP the specified gateway.
- EnablePartialNegotiation (bool):
- GatewayArpRequestRate (number): Maximum ARP request rate
- MaxMbrUAndD (number):
- MaxOutstandingGatewayArpRequests (number): Threshold at which the plugin begins throttling back the number of new ARP requests sent out.
- MaxOutstandingReleases (number):
- MaxOutstandingRequests (number):
- ObjectId (str): Unique identifier for this object
- SendOneArpFromEachInterface (bool): When set, each interface will send one ARP request.
- SetupRateInitial (number): Initial setup rate
- TeardownRateInitial (number): Initial teardown rate
- TsSpec (str):
- UseMaxRatesForDcp (bool): Use default rates (DCP mode)
Returns
-------
- self: This instance with matching egtpS5S8SgwGlobals resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of egtpS5S8SgwGlobals data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the egtpS5S8SgwGlobals resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 42.071625 | 378 | 0.690479 |
85f5630f9eb393f3f0c95f1f846853bbe04df7ca | 3,629 | py | Python | raiden_contracts/tests/test_token.py | loredanacirstea/raiden-contracts | 1510812e54dc0e98583d17b5a8e629a3119751ac | [
"MIT"
] | null | null | null | raiden_contracts/tests/test_token.py | loredanacirstea/raiden-contracts | 1510812e54dc0e98583d17b5a8e629a3119751ac | [
"MIT"
] | null | null | null | raiden_contracts/tests/test_token.py | loredanacirstea/raiden-contracts | 1510812e54dc0e98583d17b5a8e629a3119751ac | [
"MIT"
] | null | null | null | import pytest
from eth_tester.exceptions import TransactionFailed
from raiden_contracts.constants import (
CONTRACT_HUMAN_STANDARD_TOKEN,
CONTRACT_CUSTOM_TOKEN,
)
from raiden_contracts.utils.bytecode import runtime_hexcode
def test_token_mint(web3, custom_token, get_accounts):
""" Use the mint() function of the custom token contract """
(A, B) = get_accounts(2)
token = custom_token
multiplier = custom_token.functions.multiplier().call()
supply = token.functions.totalSupply().call()
token_pre_balance = web3.eth.getBalance(token.address)
tokens_a = 50 * multiplier
token.functions.mint(tokens_a).transact({'from': A})
assert token.functions.balanceOf(A).call() == tokens_a
assert token.functions.balanceOf(B).call() == 0
assert token.functions.totalSupply().call() == supply + tokens_a
assert web3.eth.getBalance(token.address) == token_pre_balance
tokens_b = 50 * multiplier
token.functions.mintFor(tokens_b, B).transact({'from': A})
assert token.functions.balanceOf(A).call() == tokens_a
assert token.functions.balanceOf(B).call() == tokens_b
assert token.functions.totalSupply().call() == supply + tokens_a + tokens_b
assert web3.eth.getBalance(token.address) == token_pre_balance
def test_approve_transfer(web3, custom_token, get_accounts):
""" Use the approve() function of the custom token contract """
(A, B) = get_accounts(2)
token = custom_token
token.functions.mint(50).transact({'from': A})
initial_balance_A = token.functions.balanceOf(A).call()
initial_balance_B = token.functions.balanceOf(B).call()
to_transfer = 20
token.functions.approve(B, to_transfer).transact({'from': A})
token.functions.transferFrom(A, B, to_transfer).transact({'from': B})
assert token.functions.balanceOf(B).call() == initial_balance_B + to_transfer
assert token.functions.balanceOf(A).call() == initial_balance_A - to_transfer
assert custom_token.functions.allowance(A, B).call() == 0
assert custom_token.functions.approve(B, 25).transact({'from': A})
assert custom_token.functions.allowance(A, B).call() == 25
assert custom_token.functions.allowance(A, token.address).call() == 0
def test_token_transfer_funds(web3, custom_token, get_accounts, txn_gas):
""" transferFunds() should fail when the ETH balance of the contract is zero """
(A, B) = get_accounts(2)
token = custom_token
multiplier = custom_token.functions.multiplier().call()
assert multiplier > 0
supply = token.functions.totalSupply().call()
assert supply > 0
owner = custom_token.functions.owner_address().call()
assert web3.eth.getBalance(token.address) == 0
with pytest.raises(TransactionFailed):
token.functions.transferFunds().transact({'from': owner})
token.functions.mint(50).transact({'from': A})
assert web3.eth.getBalance(token.address) == 0
def test_custom_token(custom_token, web3, contracts_manager):
""" See custom_token.address contains the expected code """
blockchain_bytecode = web3.eth.getCode(custom_token.address).hex()
compiled_bytecode = runtime_hexcode(contracts_manager, CONTRACT_CUSTOM_TOKEN)
assert blockchain_bytecode == compiled_bytecode
def test_human_standard_token(human_standard_token, web3, contracts_manager):
""" See human_standard_token.address contains the expected code """
blockchain_bytecode = web3.eth.getCode(human_standard_token.address).hex()
compiled_bytecode = runtime_hexcode(contracts_manager, CONTRACT_HUMAN_STANDARD_TOKEN)
assert blockchain_bytecode == compiled_bytecode
| 42.197674 | 89 | 0.736842 |
85c0d18fcf111ef3fa59c710b7e2f1c9b1da1eb9 | 329 | py | Python | tests/system/mqttbeat.py | plinde/mqttbeat | 64e3bf19a45686a5cc6aecae58d9d2e140e7c915 | [
"Apache-2.0"
] | 19 | 2017-07-20T11:39:44.000Z | 2021-11-08T11:18:05.000Z | tests/system/mqttbeat.py | plinde/mqttbeat | 64e3bf19a45686a5cc6aecae58d9d2e140e7c915 | [
"Apache-2.0"
] | 8 | 2017-07-20T21:13:17.000Z | 2020-01-10T08:08:34.000Z | tests/system/mqttbeat.py | plinde/mqttbeat | 64e3bf19a45686a5cc6aecae58d9d2e140e7c915 | [
"Apache-2.0"
] | 16 | 2017-08-19T14:58:18.000Z | 2022-01-20T10:09:06.000Z | import sys
sys.path.append('../../vendor/github.com/elastic/beats/libbeat/tests/system')
from beat.beat import TestCase
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "mqttbeat"
self.build_path = "../../build/system-tests/"
self.beat_path = "../../mqttbeat.test"
| 25.307692 | 77 | 0.662614 |
aa85d983c84fed61169afdfabd938f86c232db65 | 512 | py | Python | discord_bot/commands/silent_close.py | ehefk/djqueue | 3d3cd55bd4d66c16bdbb20ad87eca4626ca7c794 | [
"Unlicense"
] | 1 | 2021-04-30T03:26:58.000Z | 2021-04-30T03:26:58.000Z | discord_bot/commands/silent_close.py | ehefk/djqueue | 3d3cd55bd4d66c16bdbb20ad87eca4626ca7c794 | [
"Unlicense"
] | null | null | null | discord_bot/commands/silent_close.py | ehefk/djqueue | 3d3cd55bd4d66c16bdbb20ad87eca4626ca7c794 | [
"Unlicense"
] | null | null | null | async def Main(self, message, command, arguments):
channel = await self.fetch_channel(self.secrets["PublicChannel"])
queue = self.mongo.db["QueueHistory"].find_one({'$or': [{"Status": "Open"}, {"Status": "Locked"}]})
if queue:
queue["Status"] = "Closed"
self.mongo.db["QueueHistory"].replace_one({'$or': [{"Status": "Open"}, {"Status": "Locked"}]}, queue)
await channel.send("The Queue is now closed!")
else:
await message.channel.send("The Queue is not open!")
| 42.666667 | 109 | 0.621094 |
b903400301ee5aae75e6a47ab962cca16940e2b0 | 2,127 | py | Python | tests/argparse/test_advanced.py | FredHappyface/Python.Cli2Gui | 688b2b552a22dfbaec4199bdd47eeef6d8649274 | [
"MIT"
] | 2 | 2020-03-13T15:12:27.000Z | 2020-03-19T05:06:10.000Z | tests/argparse/test_advanced.py | FredHappyface/Python.Cli2Gui | 688b2b552a22dfbaec4199bdd47eeef6d8649274 | [
"MIT"
] | 1 | 2020-03-13T15:16:21.000Z | 2020-03-16T10:33:01.000Z | tests/argparse/test_advanced.py | FredHappyface/Python.Cli2Gui | 688b2b552a22dfbaec4199bdd47eeef6d8649274 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Tests an advanced parser
"""
from __future__ import annotations
import argparse
import sys
from pathlib import Path
THISDIR = str(Path(__file__).resolve().parent)
sys.path.insert(0, str(Path(THISDIR).parent.parent))
from cli2gui import Cli2Gui
def handle(args):
"""Handle the args."""
print(args)
@Cli2Gui(
run_function=handle,
menu={
"File": THISDIR + "/file.md",
"Another File": THISDIR + "/another_file.md",
},
)
def cli():
"""Cli entrypoint."""
parser = argparse.ArgumentParser("Simple Parser")
# Positional and file
parser.add_argument("positional", help="positional arg")
parser.add_argument(
"positional-file", type=argparse.FileType("r"), help="positional arg for a file"
)
parser.add_argument("--optional", help="optional arg")
# Store true, false, store, count, choices
parser.add_argument("--store-true", action="store_true", help="optional arg store true")
parser.add_argument("--store-false", action="store_false", help="optional arg store false")
parser.add_argument("--store", action="store", help="optional arg store")
parser.add_argument("--count", action="count", help="optional arg count")
parser.add_argument(
"--choices",
action="store",
choices=["choice1", "choice2"],
help="optional arg store with choices",
)
parser.add_argument(
"--somefile",
type=argparse.FileType("r"),
required=False,
)
group = parser.add_argument_group(
"choose one of the following", "use the following arguments to change the look of the image"
)
mxg = group.add_mutually_exclusive_group()
mxg.add_argument("--mxg-true", action="store_true", help="mutually exclusive arg store true")
mxg.add_argument("--mxg-false", action="store_false", help="mutually exclusive arg store false")
mxg.add_argument("--mxg", action="store", help="mutually exclusive arg store")
mxg.add_argument("--mxg-count", action="count", help="mutually exclusive arg count")
mxg.add_argument(
"--mxg-choices",
action="store",
choices=["choice1", "choice2"],
help="mutually exclusive arg store with choices",
)
args = parser.parse_args()
handle(args)
cli()
| 27.269231 | 97 | 0.715092 |
1c6cc51d501d7a86f15c93d5717a1224217cfa09 | 2,067 | py | Python | flask/lib/python3.4/site-packages/setuptools/command/install_scripts.py | ddayguerrero/blogme | e6ee6a47310c382648eefd96634630c3bceb864f | [
"MIT"
] | 7 | 2017-04-26T12:28:22.000Z | 2021-02-09T18:59:50.000Z | flask/lib/python3.4/site-packages/setuptools/command/install_scripts.py | ddayguerrero/blogme | e6ee6a47310c382648eefd96634630c3bceb864f | [
"MIT"
] | 4 | 2017-10-24T22:44:01.000Z | 2017-10-24T22:44:19.000Z | flask/lib/python3.4/site-packages/setuptools/command/install_scripts.py | ddayguerrero/blogme | e6ee6a47310c382648eefd96634630c3bceb864f | [
"MIT"
] | 8 | 2017-06-01T08:42:16.000Z | 2020-07-23T12:30:19.000Z | from distutils.command.install_scripts import install_scripts \
as _install_scripts
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(_install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
_install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
_install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
| 38.277778 | 77 | 0.662796 |
41bd1529eba4cfaf343cc662afcbf44edcde3557 | 1,248 | py | Python | setup.py | captain-kark/module_resources | d85453ff4f5022127874a5842449d95bb5eda234 | [
"MIT"
] | null | null | null | setup.py | captain-kark/module_resources | d85453ff4f5022127874a5842449d95bb5eda234 | [
"MIT"
] | 8 | 2019-07-24T11:18:36.000Z | 2019-08-05T06:43:32.000Z | setup.py | captain-kark/module_resources | d85453ff4f5022127874a5842449d95bb5eda234 | [
"MIT"
] | 1 | 2021-07-18T06:30:32.000Z | 2021-07-18T06:30:32.000Z | import os
import pathlib
import pkg_resources
from setuptools import setup
# this script sets the version number in CI builds, and also at install time by the user
VERSION = None
TRAVIS_TAG, TRAVIS_BUILD_NUMBER = map(os.getenv, ('TRAVIS_TAG', 'TRAVIS_BUILD_NUMBER'))
if not TRAVIS_TAG and not TRAVIS_BUILD_NUMBER:
# not a CI build, user is installing package
user_installed_version = pkg_resources.require('module-resources')[0].version
VERSION = user_installed_version
else:
VERSION = TRAVIS_TAG or f"0.0.{TRAVIS_BUILD_NUMBER}"
setup(
name="module-resources",
version=VERSION,
description="Import non-python files in a project directory as python namedtuple objects.",
long_description=(pathlib.Path(__file__).parent / "README.md").read_text(),
long_description_content_type="text/markdown",
url="https://github.com/captain-kark/module_resources",
author="Andrew Yurisich",
author_email="andrew.yurisich@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["module_resources"],
extras_require={
'yaml': ['pyyaml']
}
)
| 34.666667 | 95 | 0.710737 |
582e29de2a1dabf05ac552a2ed61a7f75c720a02 | 6,276 | py | Python | GR/swsh.py | paralab/SymPyGR | 3aa4164a64773b9015b83744cd104550ae465e8a | [
"MIT"
] | 7 | 2019-08-29T20:41:39.000Z | 2022-03-26T17:47:16.000Z | GR/swsh.py | paralab/SymPyGR | 3aa4164a64773b9015b83744cd104550ae465e8a | [
"MIT"
] | 2 | 2019-02-01T22:20:48.000Z | 2019-05-24T20:39:33.000Z | GR/swsh.py | paralab/SymPyGR | 3aa4164a64773b9015b83744cd104550ae465e8a | [
"MIT"
] | 1 | 2018-12-18T19:36:13.000Z | 2018-12-18T19:36:13.000Z | ##########################################################################
# module: swsh (spin weighted sperical harmonic)
# author: Milinda Fernando
# email: milinda@cs.utah.edu
# python module to generate code for far-field energy ratiation extraction (Gravitational Waves).
# (c) 2016 University of Utah, All rights reserved.
##########################################################################
import dendro as dendro
from sympy import *
from sympy.tensor.array import *
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.utilities import numbered_symbols
from sympy.printing import print_ccode
from sympy.printing.dot import dotprint
import math as math
import numpy as np
import sys as sys
# declare the variables to evaluate spin weigneted sperical harmonics.
theta=symbols('theta') #dendro.scalar("dtheta","[pp]")
phi=symbols('phi') #dendro.scalar("dphi","[pp]")
s,l,m,t,C1,C2=symbols('s,l,m,t,C1,C2')
#sYlm=dendro.scalar("sYml","[pp]")
C1=Max(0,m-s)
C2=Min(l+m,l-s)
# $d^{l}_{ms}(\theta)$
#ldms=Sum((((-1)**t)*sqrt(factorial(l+m)*factorial(l-m)*factorial(l+s)*factorial(l-s))/(factorial(l+m-t)*factorial(l-s-t)*factorial(t)*factorial(t+s-m))) * ((cos(theta)/2.0)**(2*l+m-s-2*t))* ((sin(theta)/2.0)**(2*t+s-m)),(t,C1,C2))
#sYlm=((-1)**s)*sqrt((2*l+1)/(4*pi))*ldms.doit()*exp(I*m*phi)
sYlm=((-1)**(m)) *sqrt((factorial(l+m)*factorial(l-m)*(2*l+1))/(4*pi*factorial(l+s)*factorial(l-s)))*(sin(theta/2)**(2*l)) * Sum(((factorial(l-s)*factorial(l+s))/(factorial(t)*factorial(l-s-t)*factorial(t+s-m)*factorial(l-t+m))*((-1)**(l-t-s))*exp(I*m*phi)*(cot(theta/2)**(2*t+s-m))),(t,0,l-s))
#pprint(sYlm.subs({s:1,l:1,m:0,theta:0,phi:pi}).evalf())
#Y=sqrt(3/(8*pi))*sin(theta) swsh function for s=1 l=1 m=0;
dtheta=((1e-1)*(math.pi))
dphi=((1e-1)*2*math.pi)
thetaSz = math.ceil(math.pi/dtheta)
phiSz = math.ceil(2*math.pi/dphi)
S=-2
L=[2]
M=[0,1,2]
codeOutput=sys.stdout
## generate header files for the swsh evaluated at each point.
print("//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" ,file=codeOutput )
print("//",file=codeOutput)
print("//Automatic code generated by sympy based swsh module in Dendro-GR ",file=codeOutput)
print("//python module to generate code for far-field energy ratiation extraction (Gravitational Waves).",file=codeOutput)
print("//(c) 2016 University of Utah, All rights reserved.",file=codeOutput)
print("//",file=codeOutput)
print("//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" ,file=codeOutput )
##########################################################################
print("#ifndef DENDRO_SWSH_H",file=codeOutput)
print("#define DENDRO_SWSH_H",file=codeOutput)
#evaluates the SWSH functions at the lebedev quadrature points.
inFilePrefix="/home/milinda/lebedev/lebedev_" # path to the lebedev quadrature point files.
precision=["025"]
numPts=[230]
varNamePrefix="m2Y"
print("namespace swsh { " ,file=codeOutput)
for li in L :
for mj in M:
values_real=[]
values_imag=[]
for p in precision:
print("// spin weighted sperical harmonic basis evaluated at the %s lebedev quadrature points " %(p),file=codeOutput)
inFileName=inFilePrefix+p+".txt"
inFile=open(inFileName,'r')
lebedev_phi=[]
lebedev_theta=[]
lebedev_weight=[]
for line in inFile:
data=line.split()
#print(data)
lebedev_phi.append(str("%.20f"%(((float(data[0])+180)*math.pi)/180.0)))
lebedev_theta.append(str("%.20f" %(float(data[1])*math.pi/180.0)) )
lebedev_weight.append(str("%.20f" %(float(data[2]))))
for pts in range(0,len(lebedev_theta)):
val=complex(sYlm.subs({s:S,l:li,m:mj,theta:float(lebedev_theta[pts]),phi:float(lebedev_phi[pts])}).evalf())
values_real.append(str("%.20f" %(val.real) ))
values_imag.append(str("%.20f" %(val.imag) ))
varName=varNamePrefix+str(li).replace('-','m')+"_"+str(mj).replace('-','m')+"_REAL"
print("static double %s [] = { " %(varName),file=codeOutput)
print(", ".join(values_real),file=codeOutput)
print("};")
varName=varNamePrefix+str(li).replace('-','m')+"_"+str(mj).replace('-','m')+"_IMAG"
print("static double %s [] = { " %(varName),file=codeOutput)
print(", ".join(values_imag),file=codeOutput)
print("};")
print("}// end of namespace swsh" ,file=codeOutput)
print("#endif",file=codeOutput)
'''
# evaluates the SWSH functions at points define by the theta and phi step size.
print("",file=codeOutput)
print("#define NUM_THETA_PTS %d" %(thetaSz),file=codeOutput)
print("#define NUM_PHI_PTS %d" %(phiSz),file=codeOutput)
print("",file=codeOutput);
varNamePrefix="m2Y"
print("namespace swsh { " ,file=codeOutput)
for li in L :
for mj in M:
values_real=[]
values_imag=[]
for itheta in np.arange(0,math.pi,dtheta):
for jphi in np.arange(0,2*math.pi,dphi):
val=complex(sYlm.subs({s:1,l:li,m:mj,theta:itheta,phi:jphi}).evalf())
#val1=Y.subs({s:1,l:li,m:mj,theta:itheta,phi:jphi}).evalf()
#print("(%.6f,%.6f) l: %d m: %d sYml : %.10f Y:%.10f diff: %.10f" %(itheta,jphi,li,mj,val,val1,abs(val1-val))) #pprint(val)
values_real.append(str("%.20f" %(val.real) ))
values_imag.append(str("%.20f" %(val.imag) ))
varName=varNamePrefix+str(li).replace('-','m')+"_"+str(mj).replace('-','m')+"_REAL"
print("static double %s [] = { " %(varName),file=codeOutput)
print(", ".join(values_real),file=codeOutput)
print("};")
varName=varNamePrefix+str(li).replace('-','m')+"_"+str(mj).replace('-','m')+"_IMAG"
print("static double %s [] = { " %(varName),file=codeOutput)
print(", ".join(values_imag),file=codeOutput)
print("};")
print("}// end of namespace swsh" ,file=codeOutput)
print("#endif",file=codeOutput)
'''
| 39.225 | 294 | 0.570427 |
ee6d9b8eabe57c2fc61c46d4acb37c8ba4c03429 | 429 | py | Python | 1_Fundamental Python Programming/1_1_Python Fundamentals/IDE.py | NishkarshRaj/Python-Programming | 29506d8d7ed80730ce5cafd5945f83c81f003e68 | [
"Unlicense"
] | 6 | 2019-07-15T14:54:52.000Z | 2021-11-26T22:34:44.000Z | 1_Fundamental Python Programming/1_1_Python Fundamentals/IDE.py | NishkarshRaj/Python-Programming | 29506d8d7ed80730ce5cafd5945f83c81f003e68 | [
"Unlicense"
] | 1 | 2019-07-15T14:53:35.000Z | 2019-07-15T14:53:35.000Z | 1_Fundamental Python Programming/1_1_Python Fundamentals/IDE.py | NishkarshRaj/Python-Programming | 29506d8d7ed80730ce5cafd5945f83c81f003e68 | [
"Unlicense"
] | 2 | 2019-08-05T01:30:10.000Z | 2020-05-04T18:36:46.000Z | 4*5; ''' Works on normal IDE but not here but wont give any interpretation error'''
41-31;
print("Print Statement");
print("It has inbuilt linefeed by default, we will learn how to override the default print configurations later");
print("print","statement","has","inbuilt","spaces","within the comma separated strings");
print("Output can also be written using " + "concatenation but spaces have to be then added accordingly");
| 61.285714 | 114 | 0.752914 |
8ffbe8b375d251c87050b3a45c6b9adc9fa6094c | 91 | py | Python | openFlow/ofptHello.py | wsharif/thesis | 96a9bd6c86ed027c3eeee231b2eae1c14394d728 | [
"MIT"
] | null | null | null | openFlow/ofptHello.py | wsharif/thesis | 96a9bd6c86ed027c3eeee231b2eae1c14394d728 | [
"MIT"
] | null | null | null | openFlow/ofptHello.py | wsharif/thesis | 96a9bd6c86ed027c3eeee231b2eae1c14394d728 | [
"MIT"
] | null | null | null | from ofptHeader import ofptHeader
def ofptHello():
header = ofptHeader(0)
return header | 15.166667 | 33 | 0.78022 |
b14f3b141e7f3a9d51db9ef15f931ecef8bcf99b | 4,189 | py | Python | ML/Pytorch/more_advanced/image_captioning/get_loader.py | xuyannus/Machine-Learning-Collection | 6d5dcd18d4e40f90e77355d56a2902e4c617ecbe | [
"MIT"
] | 3,094 | 2020-09-20T04:34:31.000Z | 2022-03-31T23:59:46.000Z | ML/Pytorch/more_advanced/image_captioning/get_loader.py | xkhainguyen/Machine-Learning-Collection | 425d196e9477dbdbbd7cc0d19d29297571746ab5 | [
"MIT"
] | 79 | 2020-09-24T08:54:17.000Z | 2022-03-30T14:45:08.000Z | ML/Pytorch/more_advanced/image_captioning/get_loader.py | xkhainguyen/Machine-Learning-Collection | 425d196e9477dbdbbd7cc0d19d29297571746ab5 | [
"MIT"
] | 1,529 | 2020-09-20T16:21:21.000Z | 2022-03-31T21:16:25.000Z | import os # when loading file paths
import pandas as pd # for lookup in annotation file
import spacy # for tokenizer
import torch
from torch.nn.utils.rnn import pad_sequence # pad batch
from torch.utils.data import DataLoader, Dataset
from PIL import Image # Load img
import torchvision.transforms as transforms
# We want to convert text -> numerical values
# 1. We need a Vocabulary mapping each word to a index
# 2. We need to setup a Pytorch dataset to load the data
# 3. Setup padding of every batch (all examples should be
# of same seq_len and setup dataloader)
# Note that loading the image is very easy compared to the text!
# Download with: python -m spacy download en
spacy_eng = spacy.load("en")
class Vocabulary:
def __init__(self, freq_threshold):
self.itos = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
self.stoi = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
self.freq_threshold = freq_threshold
def __len__(self):
return len(self.itos)
@staticmethod
def tokenizer_eng(text):
return [tok.text.lower() for tok in spacy_eng.tokenizer(text)]
def build_vocabulary(self, sentence_list):
frequencies = {}
idx = 4
for sentence in sentence_list:
for word in self.tokenizer_eng(sentence):
if word not in frequencies:
frequencies[word] = 1
else:
frequencies[word] += 1
if frequencies[word] == self.freq_threshold:
self.stoi[word] = idx
self.itos[idx] = word
idx += 1
def numericalize(self, text):
tokenized_text = self.tokenizer_eng(text)
return [
self.stoi[token] if token in self.stoi else self.stoi["<UNK>"]
for token in tokenized_text
]
class FlickrDataset(Dataset):
def __init__(self, root_dir, captions_file, transform=None, freq_threshold=5):
self.root_dir = root_dir
self.df = pd.read_csv(captions_file)
self.transform = transform
# Get img, caption columns
self.imgs = self.df["image"]
self.captions = self.df["caption"]
# Initialize vocabulary and build vocab
self.vocab = Vocabulary(freq_threshold)
self.vocab.build_vocabulary(self.captions.tolist())
def __len__(self):
return len(self.df)
def __getitem__(self, index):
caption = self.captions[index]
img_id = self.imgs[index]
img = Image.open(os.path.join(self.root_dir, img_id)).convert("RGB")
if self.transform is not None:
img = self.transform(img)
numericalized_caption = [self.vocab.stoi["<SOS>"]]
numericalized_caption += self.vocab.numericalize(caption)
numericalized_caption.append(self.vocab.stoi["<EOS>"])
return img, torch.tensor(numericalized_caption)
class MyCollate:
def __init__(self, pad_idx):
self.pad_idx = pad_idx
def __call__(self, batch):
imgs = [item[0].unsqueeze(0) for item in batch]
imgs = torch.cat(imgs, dim=0)
targets = [item[1] for item in batch]
targets = pad_sequence(targets, batch_first=False, padding_value=self.pad_idx)
return imgs, targets
def get_loader(
root_folder,
annotation_file,
transform,
batch_size=32,
num_workers=8,
shuffle=True,
pin_memory=True,
):
dataset = FlickrDataset(root_folder, annotation_file, transform=transform)
pad_idx = dataset.vocab.stoi["<PAD>"]
loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=pin_memory,
collate_fn=MyCollate(pad_idx=pad_idx),
)
return loader, dataset
if __name__ == "__main__":
transform = transforms.Compose(
[transforms.Resize((224, 224)), transforms.ToTensor(),]
)
loader, dataset = get_loader(
"flickr8k/images/", "flickr8k/captions.txt", transform=transform
)
for idx, (imgs, captions) in enumerate(loader):
print(imgs.shape)
print(captions.shape)
| 29.293706 | 86 | 0.633087 |
ab0392aaed0b659e2c4cf92b1c4740f930fcbd14 | 4,415 | py | Python | docs/examples/use_cases/tensorflow/efficientdet/dataset/create_pascal_tfrecord_test.py | lbhm/DALI | d478d768c55069351a78d6bdcebed7632ca21ecb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | docs/examples/use_cases/tensorflow/efficientdet/dataset/create_pascal_tfrecord_test.py | lbhm/DALI | d478d768c55069351a78d6bdcebed7632ca21ecb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | docs/examples/use_cases/tensorflow/efficientdet/dataset/create_pascal_tfrecord_test.py | lbhm/DALI | d478d768c55069351a78d6bdcebed7632ca21ecb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for create_pascal_tfrecord.py."""
import os
from absl import logging
import numpy as np
import PIL.Image
import six
import tensorflow as tf
from dataset import create_pascal_tfrecord
class CreatePascalTFRecordTest(tf.test.TestCase):
def _assertProtoEqual(self, proto_field, expectation):
"""Helper function to assert if a proto field equals some value.
Args:
proto_field: The protobuf field to compare.
expectation: The expected value of the protobuf field.
"""
proto_list = [p for p in proto_field]
self.assertListEqual(proto_list, expectation)
def test_dict_to_tf_example(self):
image_file_name = "2012_12.jpg"
image_data = np.random.rand(256, 256, 3)
save_path = os.path.join(self.get_temp_dir(), image_file_name)
image = PIL.Image.fromarray(image_data, "RGB")
image.save(save_path)
data = {
"folder": "",
"filename": image_file_name,
"size": {
"height": 256,
"width": 256,
},
"object": [
{
"difficult": 1,
"bndbox": {
"xmin": 64,
"ymin": 64,
"xmax": 192,
"ymax": 192,
},
"name": "person",
"truncated": 0,
"pose": "",
},
],
}
label_map_dict = {
"background": 0,
"person": 1,
"notperson": 2,
}
example = create_pascal_tfrecord.dict_to_tf_example(
data, self.get_temp_dir(), label_map_dict, image_subdirectory=""
)
self._assertProtoEqual(
example.features.feature["image/height"].int64_list.value, [256]
)
self._assertProtoEqual(
example.features.feature["image/width"].int64_list.value, [256]
)
self._assertProtoEqual(
example.features.feature["image/filename"].bytes_list.value,
[six.b(image_file_name)],
)
self._assertProtoEqual(
example.features.feature["image/source_id"].bytes_list.value,
[six.b(str(1))],
)
self._assertProtoEqual(
example.features.feature["image/format"].bytes_list.value, [six.b("jpeg")]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/xmin"].float_list.value, [0.25]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/ymin"].float_list.value, [0.25]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/xmax"].float_list.value, [0.75]
)
self._assertProtoEqual(
example.features.feature["image/object/bbox/ymax"].float_list.value, [0.75]
)
self._assertProtoEqual(
example.features.feature["image/object/class/text"].bytes_list.value,
[six.b("person")],
)
self._assertProtoEqual(
example.features.feature["image/object/class/label"].int64_list.value, [1]
)
self._assertProtoEqual(
example.features.feature["image/object/difficult"].int64_list.value, [1]
)
self._assertProtoEqual(
example.features.feature["image/object/truncated"].int64_list.value, [0]
)
self._assertProtoEqual(
example.features.feature["image/object/view"].bytes_list.value, [six.b("")]
)
if __name__ == "__main__":
logging.set_verbosity(logging.WARNING)
tf.test.main()
| 34.492188 | 87 | 0.574858 |
7908bdf8227c02f4c6de44c83bc393fb992675d5 | 1,507 | py | Python | model-optimizer/extensions/middle/UselessMerge.py | undeadinu/dldt | fbc7a4a710c24def8ab199926a7da90a0394b87d | [
"Apache-2.0"
] | 1 | 2019-03-22T06:35:55.000Z | 2019-03-22T06:35:55.000Z | model-optimizer/extensions/middle/UselessMerge.py | undeadinu/dldt | fbc7a4a710c24def8ab199926a7da90a0394b87d | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/middle/UselessMerge.py | undeadinu/dldt | fbc7a4a710c24def8ab199926a7da90a0394b87d | [
"Apache-2.0"
] | 1 | 2019-06-11T06:20:42.000Z | 2019-06-11T06:20:42.000Z | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import networkx as nx
from extensions.middle.ConstSwitchResolver import ConstSwitchEraser
from mo.graph.graph import erase_node
from mo.middle.replacement import MiddleReplacementPattern
class UselessMergeEraser(MiddleReplacementPattern):
enabled = True
def run_after(self):
return [ConstSwitchEraser]
def pattern(self):
return dict(
nodes=[('merge', dict(kind='op', op='Merge')),
('merge_data', dict(kind='data'))],
edges=[('merge', 'merge_data')]
)
def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
if len(graph.in_edges(match['merge'].id)) <= 1:
erase_node(match['merge'])
erase_node(match['merge_data'])
log.info("Useles Merge op and data nodes was deleted op='{}' data='{}'"
"".format(match['merge'].id, match['merge_data'].id))
| 33.488889 | 83 | 0.68215 |
595bc00f65793d4c0363b32b2da4e533253543a5 | 832 | py | Python | src/build/lib/binance_f/model/balancev2.py | Han1018/Cryptocurrency-Automated-Trading | 52a5b6d15eb9b2a3a69cc53bd159f6eec073614d | [
"MIT"
] | 1 | 2021-04-12T09:15:01.000Z | 2021-04-12T09:15:01.000Z | binance_f/model/balancev2.py | rmdpadula/Binance_Futures | 1297a0a0be7b396e5a3980c8ae68f18ca492cb9a | [
"MIT"
] | 1 | 2021-07-20T15:25:11.000Z | 2021-07-20T15:25:11.000Z | binance_f/model/balancev2.py | rmdpadula/Binance_Futures | 1297a0a0be7b396e5a3980c8ae68f18ca492cb9a | [
"MIT"
] | 1 | 2021-12-14T02:39:04.000Z | 2021-12-14T02:39:04.000Z | class BalanceV2:
def __init__(self):
self.accountAlias = ""
self.asset = ""
self.balance = 0.0
self.crossWalletBalance = 0.0
self.crossUnPnl = 0.0
self.availableBalance = 0.0
self.maxWithdrawAmount = 0.0
@staticmethod
def json_parse(json_data):
result = BalanceV2()
result.accountAlias = json_data.get_string("accountAlias")
result.asset = json_data.get_string("asset")
result.balance = json_data.get_float("balance")
result.crossWalletBalance = json_data.get_float("crossWalletBalance")
result.crossUnPnl = json_data.get_float("crossUnPnl")
result.availableBalance = json_data.get_float("availableBalance")
result.maxWithdrawAmount = json_data.get_float("maxWithdrawAmount")
return result | 36.173913 | 77 | 0.664663 |
86013e17b5fc0e6e507b1eeb106e135d86a1dcb7 | 1,602 | py | Python | setup.py | Dafvid/klaus | beea95327245bf6465eb824b9d46c5371d36d885 | [
"ISC"
] | null | null | null | setup.py | Dafvid/klaus | beea95327245bf6465eb824b9d46c5371d36d885 | [
"ISC"
] | null | null | null | setup.py | Dafvid/klaus | beea95327245bf6465eb824b9d46c5371d36d885 | [
"ISC"
] | null | null | null | # encoding: utf-8
import glob
from setuptools import setup
def install_data_files_hack():
# This is a clever hack to circumvent distutil's data_files
# policy "install once, find never". Definitely a TODO!
# -- https://groups.google.com/group/comp.lang.python/msg/2105ee4d9e8042cb
from distutils.command.install import INSTALL_SCHEMES
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
install_data_files_hack()
requires = ['flask', 'pygments', 'dulwich>=0.9.6', 'httpauth', 'humanize']
try:
import argparse # not available for Python 2.6
except ImportError:
requires.append('argparse')
setup(
name='klaus',
version='0.4.9',
author='Jonas Haag',
author_email='jonas@lophus.org',
packages=['klaus', 'klaus.contrib'],
scripts=['bin/klaus'],
include_package_data=True,
zip_safe=False,
url='https://github.com/jonashaag/klaus',
description='The first Git web viewer that Just Works™.',
long_description=__doc__,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Software Development :: Version Control",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
],
install_requires=requires,
)
| 30.226415 | 78 | 0.655431 |
a0d964dc6d5fc6e3f52f765dbceca52e233d36a8 | 597 | py | Python | chevah/compat/tests/__init__.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 5 | 2016-12-03T22:54:50.000Z | 2021-11-17T11:17:39.000Z | chevah/compat/tests/__init__.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 76 | 2015-01-22T16:00:31.000Z | 2022-02-09T22:13:34.000Z | chevah/compat/tests/__init__.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 1 | 2016-12-10T15:57:31.000Z | 2016-12-10T15:57:31.000Z | # Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
"""
Tests for the system compatibility module.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from chevah.compat.testing import mk
def setup_package():
"""
Called before running all tests.
"""
# Prepare the main testing filesystem.
mk.fs.setUpTemporaryFolder()
def teardown_package():
"""
Called after all tests were run.
"""
# Remove main testing folder.
mk.fs.tearDownTemporaryFolder()
mk.fs.checkCleanTemporaryFolders()
| 22.111111 | 42 | 0.720268 |
8dbc651d964fbed4da7597097fa53c3723132958 | 9,756 | py | Python | read.py | OneStone2/mcmc_growth | 85180b132311affc18a72a3fb717cb26ebcf614a | [
"MIT"
] | 2 | 2016-07-01T13:29:16.000Z | 2016-07-06T14:30:30.000Z | read.py | OneStone2/mcmc_growth | 85180b132311affc18a72a3fb717cb26ebcf614a | [
"MIT"
] | null | null | null | read.py | OneStone2/mcmc_growth | 85180b132311affc18a72a3fb717cb26ebcf614a | [
"MIT"
] | null | null | null | """
Contains utilities to read and clean FIA files
"""
from pandas import DataFrame, read_csv
import pandas as pd
from collections import defaultdict
import numpy as np
import math
import bisect
import urllib2
import os
import sys
def chunk_report(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
def chunk_read(response, chunk_size=8192, report_hook=None):
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
data = []
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
data += chunk
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return "".join(data)
def check(row):
"""
Checks for human intervention in a plot
"""
if row['DSTRBCD1'] == 80.0:
return True
if row['DSTRBCD2'] == 80.0:
return True
if row['DSTRBCD3'] == 80.0:
return True
if row['TRTCD1'] == 10.0:
return True
if row['TRTCD1'] == 30.0:
return True
if row['TRTCD1'] == 50.0:
return True
if row['TRTCD2'] == 10.0:
return True
if row['TRTCD2'] == 30.0:
return True
if row['TRTCD2'] == 50.0:
return True
if row['TRTCD3'] == 10.0:
return True
if row['TRTCD3'] == 30.0:
return True
if row['TRTCD3'] == 50.0:
return True
return False
def check_n(row):
"""
Checks for negative human intervention in a plot
"""
if row['DSTRBCD1'] == 80.0:
return 1
if row['DSTRBCD2'] == 80.0:
return 1
if row['DSTRBCD3'] == 80.0:
return 1
if row['TRTCD1'] == 10.0:
return 1
if row['TRTCD2'] == 10.0:
return 1
if row['TRTCD3'] == 10.0:
return 1
return 0
def check_p(row):
"""
Checks for recent planting in a plot
"""
if row['TRTCD1'] == 30.0:
return 1
if row['TRTCD2'] == 30.0:
return 1
if row['TRTCD3'] == 30.0:
return 1
return 0
def check_f(row):
"""
Checks for positive human intervention in a plot
"""
if row['TRTCD1'] == 50.0:
return 1
if row['TRTCD2'] == 50.0:
return 1
if row['TRTCD3'] == 50.0:
return 1
return 0
class Plot(object):
"""
Contains all the subplots/trees measured for a particular plot.
Internally contains a dataframe of all the trees and their subplots.
Has methods to computer total BA/TPA and species importance values
"""
def __init__(self, trees, plot, dstrb, py):
self.df = trees
self.py = py
self.na = self.df.TPA_UNADJ.isnull().sum()
self.df.fillna(0)
self.tpa = self.df['TPA_UNADJ'].sum()
self.ba = ((math.pi * (self.df['DIA']/2) ** 2) * self.df['TPA_UNADJ']).sum()
self.carb = ((self.df['CARBON_AG'] + self.df['CARBON_BG']) * self.df['TPA_UNADJ']).sum()
self.lon = plot.loc['LON']
self.lat = plot['LAT']
self.human_n = check_n(dstrb)
self.human_p = check_p(dstrb)
self.human_f = check_f(dstrb)
def calc_iv(self):
"""
Calculates all importance values for species in this plot.
Returns:
{
<spp1>: <impval1>,
...
}
"""
#Element 0 is TPA
#Element 1 is BA
#Element 2 is subplots
param_dd = defaultdict(lambda: [0, 0, set()])
grouped = self.df.groupby('SPCD')
for name, group in grouped:
param_dd[name][0] = group['TPA_UNADJ'].sum()
param_dd[name][1] = ((math.pi * (group['DIA'] / 2) ** 2) * group['TPA_UNADJ']).sum()
param_dd[name][2].update(group['SUBP'])
total_subp = set()
total_subp.update(self.df['SUBP'])
sum_freq = 0.0
for spp in param_dd:
param_dd[spp][2] = len(param_dd[spp][2])/float(len(total_subp))
sum_freq += param_dd[spp][2]
for spp in param_dd:
param_dd[spp][0] /= self.tpa
param_dd[spp][1] /= self.ba
param_dd[spp][2] /= sum_freq
iv_dd = dict()
for spp in param_dd:
iv_dd['iv'+str(spp)] = sum(param_dd[spp][x] for x in np.arange(3)) / 3.0
return iv_dd
def plot_stats(self):
"""
Returns a dictionary of all the plot stats.
Can be used as a row in the dataframe used for clustering
"""
stats = {
'py': self.py,
'carb': self.carb,
'samples': len(self.df.index),
'na': self.na,
'lon': self.lon,
'lat': self.lat,
'human_p': self.human_p,
'human_n': self.human_n,
'human_f': self.human_f
}
stats.update(self.calc_iv())
return stats
def parse(state, online=True):
"""
Takes the raw FIA file and returns a bunch of Plot objects
"""
TREES_COLS = [
'INVYR', 'PLOT', 'STATUSCD', 'CARBON_AG', 'CARBON_BG',
'TPA_UNADJ', 'DIA', 'PREVDIA', 'DIACALC', 'SPCD', 'SUBP'
]
PLOT_COLS = ['INVYR', 'PLOT', 'LAT', 'LON']
DSTRB_COLS = [
'PLOT', 'INVYR', 'DSTRBCD1', 'DSTRBCD2', 'DSTRBCD3',
'TRTCD1', 'TRTCD2', 'TRTCD3'
]
if online:
TREES_WEB = "http://apps.fs.fed.us/fiadb-downloads/CSV/"+state+"_TREE.csv"
PLOT_WEB = "http://apps.fs.fed.us/fiadb-downloads/CSV/"+state+"_PLOT.csv"
DSTRB_WEB = "http://apps.fs.fed.us/fiadb-downloads/CSV/"+state+"_COND.csv"
response = urllib2.urlopen(TREES_WEB)
print TREES_WEB
csv = chunk_read(response, report_hook=chunk_report)
f = open('temp', 'w')
f.write(csv)
f.close()
trees_df = pd.read_csv('temp', usecols=TREES_COLS)
response = urllib2.urlopen(PLOT_WEB)
print PLOT_WEB
csv = chunk_read(response, report_hook=chunk_report)
f = open('temp', 'w')
f.write(csv)
f.close()
plot_df = pd.read_csv('temp', usecols=PLOT_COLS)
response = urllib2.urlopen(DSTRB_WEB)
print DSTRB_WEB
csv = chunk_read(response, report_hook=chunk_report)
f = open('temp', 'w')
f.write(csv)
f.close()
dstrb_df = pd.read_csv('temp', usecols=DSTRB_COLS)
os.remove('temp')
else:
TREES_FILE = 'data/'+state+'_TREE.csv'
PLOT_FILE = 'data/'+state+'_PLOT.csv'
DSTRB_FILE = 'data/'+state+'_COND.csv'
trees_df = pd.read_csv(TREES_FILE, usecols=TREES_COLS)
plot_df = pd.read_csv(PLOT_FILE, usecols=PLOT_COLS)
dstrb_df = pd.read_csv(DSTRB_FILE, usecols=DSTRB_COLS)
trees_df = trees_df[trees_df.STATUSCD == 1]
trees_df.DIA.fillna(trees_df.DIACALC, inplace=True)
trees_df.drop('DIACALC', axis=1, inplace=True)
trees_df.DIA.fillna(trees_df.PREVDIA, inplace=True)
trees_df.drop('PREVDIA', axis=1, inplace=True)
grouped = trees_df.groupby(['PLOT', 'INVYR'])
for name, group in grouped:
yield Plot(
group,
plot_df[(plot_df.INVYR == name[1]) & (plot_df.PLOT == name[0])].iloc[0],
dstrb_df[(dstrb_df.INVYR == name[1]) & (dstrb_df.PLOT == name[0])].iloc[0],
name[0] * 10000 + name[1]
)
def cluster_prep_file(plots, state):
"""
Given a list of Plot objects, write them to a named CSV
"""
out_filename = 'data/'+state+'_1.csv'
df = pd.DataFrame([p.plot_stats() for p in plots])
df = df.fillna(0)
df.to_csv(out_filename, index=False)
return out_filename
def clean(state, b):
"""
Cleans the data for usage in the analysis.
"""
in_file = 'data/'+state+'_1.csv'
out_file = 'data/'+state+'_2a.csv'
data_points = pd.read_csv(in_file)
#Remove entries before the year 1999
MIN_YR = 1999
data_points = data_points[data_points['py'] % 10000 >= MIN_YR]
#Remove entries with few trees
MIN_TREES = 5
data_points = data_points[data_points['samples'] - data_points['na'] >= MIN_TREES]
#Remove entries with too many invalid trees
NA_THRESHOLD = 5
data_points = data_points[data_points['na'] < NA_THRESHOLD]
#Keep only most importaqnt species
MIN_IV = 0.7
keep_cols = [col for col in list(data_points) if not col.startswith('iv')]
col_iv = [col for col in list(data_points) if col.startswith('iv')]
sorted_iv = data_points[col_iv].apply(sum, axis=0).sort_values(ascending=False)
cutoff = bisect.bisect_left(np.cumsum(sorted_iv), len(data_points.index) * MIN_IV) +1
#Add 1 to the cutoff so the total IV is guaranteed to be over MIN_IV
for i in np.arange(cutoff):
keep_cols.append(sorted_iv.index[i])
data_points = data_points.loc[:, keep_cols]
#Drop samples and na. They're not necessary anymore
data_points = data_points.drop(['samples', 'na'], axis=1)
data_points.to_csv(out_file, index=False)
if b:
out_file = 'data/'+state+'_2b.csv'
#Re-number the plots so that human interventions are not applied
cur_np = 1
prev_id = data_points.loc[data_points.index[0], 'py'] // 10000
for i, row in data_points.iterrows():
if (prev_id != row['py'] // 10000) or (row['human_n'] == 1) or (row['human_p'] == 1) or (row['human_f'] == 1):
cur_np += 1
prev_id = row['py'] // 10000
data_points.loc[i, 'py'] = int(cur_np * 10000 + row['py'] % 10000)
data_points.to_csv(out_file, index=False)
| 32.41196 | 122 | 0.582718 |
d0f8b9b18c5924b91b28e55a186ebdbca0b8e241 | 632 | py | Python | venv/bin/rst2pseudoxml.py | Nitrokey/nitrokey-app2 | ab66bbceb854e1f18987b0331528e86e3e7ff702 | [
"Apache-2.0"
] | 1 | 2021-11-23T12:54:35.000Z | 2021-11-23T12:54:35.000Z | venv/bin/rst2pseudoxml.py | Nitrokey/nitrokey-app2 | ab66bbceb854e1f18987b0331528e86e3e7ff702 | [
"Apache-2.0"
] | null | null | null | venv/bin/rst2pseudoxml.py | Nitrokey/nitrokey-app2 | ab66bbceb854e1f18987b0331528e86e3e7ff702 | [
"Apache-2.0"
] | null | null | null | #!/home/niklas/git/nitrokey-app2/venv/bin/python3
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| 26.333333 | 73 | 0.742089 |
23f9e7bd21dae11ff92798249fd128390ae9a931 | 10,184 | py | Python | xdfile/xwordinfo2xd.py | jmviz/xd | f905e5c61b2835073b19cc3fa0d6917432fa7ece | [
"MIT"
] | 179 | 2016-03-05T03:14:56.000Z | 2022-02-12T22:48:55.000Z | xdfile/xwordinfo2xd.py | jmviz/xd | f905e5c61b2835073b19cc3fa0d6917432fa7ece | [
"MIT"
] | 24 | 2016-02-14T07:43:42.000Z | 2021-12-14T01:09:54.000Z | xdfile/xwordinfo2xd.py | jmviz/xd | f905e5c61b2835073b19cc3fa0d6917432fa7ece | [
"MIT"
] | 25 | 2016-02-19T20:35:03.000Z | 2022-01-31T09:15:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from lxml import html, etree
from datetime import datetime
from xdfile.utils import info, debug, error
import xdfile
SPLIT_REBUS_TITLES = "CRYPTOCROSSWORD TIC-TAC-TOE".split()
class XWordInfoParseError(Exception):
pass
# content is unicode()
def parse_xwordinfo(content, filename):
content = content.decode('utf-8')
REBUS_LONG_HANDS = {'NINE': '9',
'EIGHT': '8',
'SEVEN': '7',
'SIX': '6',
'FIVE': '5',
'FOUR': '4',
'THREE': '3',
'TWO': '2',
'ONE': '1',
'ZERO': '0',
'AUGHT': '0',
'AMPERSAND': '&',
'AND': '&',
'ASTERISK': '*',
'PERCENT': '%',
'STAR': '*',
'AT': '@',
'DOLLAR': '$',
'PLUS': '+',
'CENT': 'c',
# 'DASH': '-',
# 'DOT': '●'
}
rsh = 'zyxwvutsrqponmlkjihgfedcba♚♛♜♝♞♟⚅⚄⚃⚂⚁⚀♣♦♥♠Фθиλπφя+&%$@?*0987654321'
REBUS_SHORT_HANDS = list(rsh)
content = content.replace("<b>", "{*")
content = content.replace("</b>", "*}")
content = content.replace("<i>", "{/")
content = content.replace("</i>", "/}")
content = content.replace("<em>", "{/")
content = content.replace("</em>", "/}")
content = content.replace("<u>", "{_")
content = content.replace("</u>", "_}")
content = content.replace("<strike>", "{-")
content = content.replace("</strike>", "-}")
content = content.replace("’", "'")
content = content.replace('“', '"')
# content = content.replace('–', '-')
if "CPHContent_" in content:
xwiprefix = '#CPHContent_'
else:
xwiprefix = '#'
root = html.fromstring(content)
## debug("ROOT: %s" % root)
special_type = ''
rebus = {}
rebus_order = []
xd = xdfile.xdfile('', filename)
# get crossword info
title = root.cssselect('#PuzTitle')[0].text.strip()
try:
subtitle = root.cssselect(xwiprefix + 'SubTitle')[0].text.strip()
subtitle = ' [%s]' % subtitle
except:
subtitle = ""
author = root.cssselect('.aegrid div')[1].text.strip()
editor = root.cssselect('.aegrid div')[3].text.strip()
copyright = root.cssselect(xwiprefix + 'Copyright')[0].text.strip()
xd.set_header("Title", '%s%s' % (title, subtitle))
xd.set_header("Author", author)
xd.set_header("Editor", editor)
xd.set_header("Copyright", copyright)
# nyt title normally has date as e.g. January 1, 2020
date_re = "(January|February|March|April|May|June|July|August|September|October|November|December)\s+\d{1,2},\s+\d{4}"
try:
m = re.search(date_re, subtitle if subtitle else title)
date_string = m.group(0)
date = datetime.strptime(date_string, "%B %d, %Y")
xd.set_header("Date", date.strftime("%Y-%m-%d"))
except:
pass
_process_notes(xd, xwiprefix, root) # add header for notes, if any
puzzle_table = root.cssselect(xwiprefix + 'PuzTable tr') or root.cssselect('#PuzTable tr')
for row in puzzle_table:
row_data = ""
for cell in row.cssselect('td'):
# check if the cell is special - with a shade or a circle
cell_class = cell.get('class')
cell_type = ''
if cell_class == 'shade':
cell_type = 'shaded'
elif cell_class == 'bigcircle':
cell_type = 'circle'
letter = cell.cssselect('div.letter')
letter = (len(letter) and letter[0].text) or xdfile.BLOCK_CHAR
# handle rebuses
if letter == xdfile.BLOCK_CHAR:
subst = cell.cssselect('div.subst2')
subst = (len(subst) and subst[0].text) or ''
if not subst:
subst = cell.cssselect('div.subst')
if subst:
if title in SPLIT_REBUS_TITLES:
subst = "/".join(list(subst[0].text))
else:
subst = subst[0].text
else: # check if color rebus
cell_string = etree.tostring(cell).decode('utf-8')
m = re.search("background-color:([A-Z]+);", cell_string)
if m:
subst = m.group(1)
else:
subst = ''
if subst:
if subst not in rebus:
if subst in REBUS_LONG_HANDS:
rebus_val = REBUS_LONG_HANDS[subst]
if rebus_val in REBUS_SHORT_HANDS:
REBUS_SHORT_HANDS.remove(rebus_val)
else:
rebus_val = REBUS_SHORT_HANDS.pop()
rebus[subst] = rebus_val
rebus_order.append(subst)
letter = rebus[subst]
if cell_type:
# the special cell's letter should be represented in lower case
letter = letter.lower()
if not special_type:
# hopefully there shouldn't be both shades and circles in
# the same puzzle - if that is the case, only the last value
# will be put up in the header
special_type = cell_type
row_data += letter
xd.grid.append(row_data)
if len(rebus):
rebus = ["%s=%s" % (rebus[x], x.upper()) for x in rebus_order]
xd.set_header("Rebus", ','.join(rebus))
if special_type:
xd.set_header("Special", special_type)
across_div = root.cssselect('#ACluesPan') or root.cssselect(xwiprefix + 'ACluesPan')
down_div = root.cssselect('#DCluesPan') or root.cssselect(xwiprefix + 'DCluesPan')
if across_div and down_div: # normal puzzle
_process_clues(xd, 'A', across_div) # add across clues
_process_clues(xd, 'D', down_div) # add down clues
elif across_div: # uniclue puzzle?
_process_uniclues(xd, across_div)
else:
raise XWordInfoParseError("No clue divs found.")
return xd
def _process_notes(xd, xwiprefix, root):
note_div = root.cssselect('#notepad') or root.cssselect(xwiprefix + 'NotepadDiv')
if note_div:
note_div_string = etree.tostring(note_div[0]).decode('utf-8')
note_div_string = note_div_string.replace("<br/>", "\n")
note_div_string = note_div_string.replace("{*Notepad:*}", "\n")
note_div_string = note_div_string.replace(" ", "\n")
note_div_string = note_div_string.strip()
note_div = html.fromstring(note_div_string)
note_text = note_div.text_content()
note_text = note_text.replace("\n", " ")
note_text = note_text.strip()
xd.set_header("Notes", note_text)
elif root.cssselect(xwiprefix + 'UnicluePan'):
note_text = ("This was published as a uniclue puzzle in print. " +
"All the clues appear in a single list, combining Across and Down. " +
"When two answers share a number, they also share a clue.")
xd.set_header("Notes", note_text)
def _process_clues(xd, clueprefix, clues_div):
error_text = 'Parsing %s clues failed. ' % ('Across' if clueprefix == 'A' else 'Down')
numclue_divs = clues_div[0].cssselect('.numclue div')
if len(numclue_divs) % 2 != 0:
raise XWordInfoParseError(error_text +
'Either the number of numbers does not match the ' +
'number of clues, or there are additional unknown divs.')
for i in range(0, len(numclue_divs), 2):
num = numclue_divs[i].text
clue_div = numclue_divs[i + 1]
clue_end = clue_div.text.rfind(' :')
if clue_end < 0:
raise XWordInfoParseError(error_text +
'Malformed clue for number %s.' % num)
clue = clue_div.text[:clue_end]
anchor = clue_div.cssselect('a')
if len(anchor) != 1:
raise XWordInfoParseError(error_text +
'Not exactly one anchor in clue div for number %s.' % num)
else:
answer = anchor[0].text
xd.clues.append(((clueprefix, num), clue, answer))
def _process_uniclues(xd, clues_div):
error_text = 'Parsing suspected uniclues failed. '
grid_answers = xd.iteranswers()
down_clues = []
numclue_divs = clues_div[0].cssselect('.numclue div')
if len(numclue_divs) % 2 != 0:
raise XWordInfoParseError(error_text +
'Either the number of numbers does not match the ' +
'number of clues, or there are additional unknown divs.')
for i in range(0, len(numclue_divs), 2):
num = numclue_divs[i].text
clue_div = numclue_divs[i + 1]
clue_end = clue_div.text.rfind(' :')
if clue_end < 0:
raise XWordInfoParseError(error_text +
'Malformed clue for number %s.' % num)
clue = clue_div.text[:clue_end]
anchor = clue_div.cssselect('a')
if not anchor or len(anchor) > 2:
raise XWordInfoParseError(error_text +
'Neither 1 nor 2 anchors in clue div for number %s.' % num)
for a in anchor:
answer = a.text
direction, grid_num, _ = next(grid_answers)
if direction == 'A':
xd.clues.append(((direction, grid_num), clue, answer))
else:
down_clues.append(((direction, grid_num), clue, answer))
for clue in down_clues:
xd.clues.append(clue)
if __name__ == "__main__":
import sys
from .utils import find_files
for fn, contents in find_files(*sys.argv[1:]):
xd = parse_xwordinfo(contents, fn)
print("--- %s ---" % fn)
print(xd.to_unicode())
| 38.870229 | 122 | 0.534564 |
27158d062a9d07a0582cefd29bafac243d1e9d25 | 218 | py | Python | example/jam_mei_academia/cadastros_basicos/views.py | PqES/forkuptool | ae7caf33642245618ff59cdc6ff7eb94ea125034 | [
"MIT"
] | null | null | null | example/jam_mei_academia/cadastros_basicos/views.py | PqES/forkuptool | ae7caf33642245618ff59cdc6ff7eb94ea125034 | [
"MIT"
] | null | null | null | example/jam_mei_academia/cadastros_basicos/views.py | PqES/forkuptool | ae7caf33642245618ff59cdc6ff7eb94ea125034 | [
"MIT"
] | 1 | 2021-03-17T17:25:37.000Z | 2021-03-17T17:25:37.000Z | from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#return HttpResponse("Teste de configuração da aplicação - apostila página 57.")
return render(request,"admin/base.html")
| 31.142857 | 81 | 0.798165 |
74095717bc6150279d16cbef4bd79458118d40d8 | 21,213 | py | Python | pettingzoo/sisl/pursuit/pursuit_base.py | MarioJayakumar/PettingZoo | 0673d44c33ae1843a773babf5e6595baf8214664 | [
"MIT"
] | null | null | null | pettingzoo/sisl/pursuit/pursuit_base.py | MarioJayakumar/PettingZoo | 0673d44c33ae1843a773babf5e6595baf8214664 | [
"MIT"
] | null | null | null | pettingzoo/sisl/pursuit/pursuit_base.py | MarioJayakumar/PettingZoo | 0673d44c33ae1843a773babf5e6595baf8214664 | [
"MIT"
] | null | null | null | import glob
import os
from os.path import join
from subprocess import call
import numpy as np
from gym import spaces
from gym.utils import seeding
import pygame
from .utils import agent_utils
from .utils.agent_layer import AgentLayer
from .utils.controllers import RandomPolicy, SingleActionPolicy
from .utils import two_d_maps
class Pursuit():
def __init__(self, seed=0, **kwargs):
"""
In evade purusit a set of pursuers must 'tag' a set of evaders
Required arguments:
xs, ys: World size
reward_mech: local or global reward mechanism
n_evaders
n_pursuers
obs_range: how far each agent can see
Optional arguments:
Ally layer: list of pursuers
Opponent layer: list of evaders
pursuer controller: stationary policy of ally pursuers
evader controller: stationary policy of opponent evaders
catchr: reward for 'tagging' a single evader
caughtr: reward for getting 'tagged' by a pursuer
train_pursuit: flag indicating if we are simulating pursuers or evaders
max_frames: after how many frames should the game end
n_catch: how surrounded evader needs to be, before removal
random_opponents: randomized number of evaders on reset
max_opponents: maximum number of random evaders on reset
freeze_evaders: toggle evaders move or not
term_pursuit: reward for pursuer who catches an evader
urgency_reward: reward added in each step
train_pursuit: toggles whether pursuers are rewarded or evaders
surround: toggles surround condition for evader removal
constraint_window: window in which agents can randomly spawn
"""
self.xs = kwargs.pop('xs', 16)
self.ys = kwargs.pop('ys', 16)
xs = self.xs
ys = self.ys
self.map_matrix = two_d_maps.rectangle_map(self.xs, self.ys)
self.max_frames = kwargs.pop("max_frames", 500)
self.seed(seed)
self._reward_mech = kwargs.pop('reward_mech', 'local')
self.n_evaders = kwargs.pop('n_evaders', 30)
self.n_pursuers = kwargs.pop('n_pursuers', 8)
self.num_agents = self.n_pursuers
self.latest_reward_state = [0 for _ in range(self.num_agents)]
self.latest_done_state = [False for _ in range(self.num_agents)]
self.latest_obs = [None for _ in range(self.num_agents)]
# can see 7 grids around them by default
self.obs_range = kwargs.pop('obs_range', 7)
# assert self.obs_range % 2 != 0, "obs_range should be odd"
self.obs_offset = int((self.obs_range - 1) / 2)
self.pursuers = agent_utils.create_agents(
self.n_pursuers, self.map_matrix, self.obs_range, self.np_random)
self.evaders = agent_utils.create_agents(
self.n_evaders, self.map_matrix, self.obs_range, self.np_random)
self.pursuer_layer = kwargs.pop(
'ally_layer', AgentLayer(xs, ys, self.pursuers))
self.evader_layer = kwargs.pop(
'opponent_layer', AgentLayer(xs, ys, self.evaders))
self.layer_norm = kwargs.pop('layer_norm', 10)
self.n_catch = kwargs.pop('n_catch', 2)
self.random_opponents = kwargs.pop('random_opponents', False)
self.max_opponents = kwargs.pop('max_opponents', 10)
n_act_purs = self.pursuer_layer.get_nactions(0)
n_act_ev = self.evader_layer.get_nactions(0)
self.freeze_evaders = kwargs.pop('freeze_evaders', False)
if self.freeze_evaders:
self.evader_controller = kwargs.pop(
'evader_controller', SingleActionPolicy(4))
self.pursuer_controller = kwargs.pop(
'pursuer_controller', SingleActionPolicy(4))
else:
self.evader_controller = kwargs.pop(
'evader_controller', RandomPolicy(n_act_purs, self.np_random))
self.pursuer_controller = kwargs.pop(
'pursuer_controller', RandomPolicy(n_act_ev, self.np_random))
self.current_agent_layer = np.zeros((xs, ys), dtype=np.int32)
self.catchr = kwargs.pop('catchr', 0.01)
self.caughtr = kwargs.pop('caughtr', -0.01)
self.term_pursuit = kwargs.pop('term_pursuit', 5.0)
self.urgency_reward = kwargs.pop('urgency_reward', 0.0)
self.ally_actions = np.zeros(n_act_purs, dtype=np.int32)
self.opponent_actions = np.zeros(n_act_ev, dtype=np.int32)
self.train_pursuit = kwargs.pop('train_pursuit', True)
if self.train_pursuit:
self.low = np.array([0.0 for i in range(3 * self.obs_range**2)])
self.high = np.array([1.0 for i in range(3 * self.obs_range**2)])
self.action_space = [spaces.Discrete(
n_act_purs) for _ in range(self.n_pursuers)]
self.observation_space = [spaces.Box(low=0, high=5, shape=(
self.obs_range, self.obs_range), dtype=np.float32) for _ in range(self.n_pursuers)]
self.local_obs = np.zeros(
(self.n_pursuers, self.obs_range, self.obs_range)) # Nagents X 3 X xsize X ysize
self.act_dims = [n_act_purs for i in range(self.n_pursuers)]
else:
self.low = np.array([0.0 for i in range(3 * self.obs_range**2)])
self.high = np.array([1.0 for i in range(3 * self.obs_range**2)])
self.action_space = [spaces.Discrete(
n_act_ev) for _ in range(self.n_evaders)]
self.observation_space = [spaces.Box(low=0, high=5, shape=(
self.obs_range, self.obs_range), dtype=np.float32) for _ in range(self.n_evaders)]
self.local_obs = np.zeros(
(self.n_evaders, self.obs_range, self.obs_range)) # Nagents X 3 X xsize X ysize
self.act_dims = [n_act_purs for i in range(self.n_evaders)]
self.pursuers_gone = np.array([False for i in range(self.n_pursuers)])
self.evaders_gone = np.array([False for i in range(self.n_evaders)])
self.surround = kwargs.pop('surround', True)
self.constraint_window = kwargs.pop('constraint_window', 1.0)
self.surround_mask = np.array([[-1, 0], [1, 0], [0, 1], [0, -1]])
self.model_state = np.zeros(
(4,) + self.map_matrix.shape, dtype=np.float32)
self.renderOn = False
self.pixel_scale = 30
self.clock = pygame.time.Clock()
self.frames = 0
self.reset()
def close(self):
if self.renderOn:
pygame.event.pump()
pygame.display.quit()
pygame.quit()
#################################################################
# The functions below are the interface with MultiAgentSiulator #
#################################################################
@property
def agents(self):
return self.pursuers
@property
def reward_mech(self):
return self._reward_mech
def seed(self, seed=None):
self.np_random, seed_ = seeding.np_random(seed)
return [seed_]
def get_param_values(self):
return self.__dict__
def reset(self):
self.pursuers_gone.fill(False)
self.evaders_gone.fill(False)
if self.random_opponents:
if self.train_pursuit:
self.n_evaders = self.np_random.randint(1, self.max_opponents)
else:
self.n_pursuers = self.np_random.randint(1, self.max_opponents)
x_window_start = self.np_random.uniform(0.0, 1.0 - self.constraint_window)
y_window_start = self.np_random.uniform(0.0, 1.0 - self.constraint_window)
xlb, xub = int(self.xs * x_window_start), int(self.xs * (x_window_start + self.constraint_window))
ylb, yub = int(self.ys * y_window_start), int(self.ys * (y_window_start + self.constraint_window))
constraints = [[xlb, xub], [ylb, yub]]
self.pursuers = agent_utils.create_agents(self.n_pursuers, self.map_matrix, self.obs_range, self.np_random,
randinit=True, constraints=constraints)
self.pursuer_layer = AgentLayer(self.xs, self.ys, self.pursuers)
self.evaders = agent_utils.create_agents(self.n_evaders, self.map_matrix, self.obs_range, self.np_random,
randinit=True, constraints=constraints)
self.evader_layer = AgentLayer(self.xs, self.ys, self.evaders)
self.latest_reward_state = [0 for _ in range(self.num_agents)]
self.latest_done_state = [False for _ in range(self.num_agents)]
self.latest_obs = [None for _ in range(self.num_agents)]
self.model_state[0] = self.map_matrix
self.model_state[1] = self.pursuer_layer.get_state_matrix()
self.model_state[2] = self.evader_layer.get_state_matrix()
self.frames = 0
self.renderOn = False
return self.safely_observe(0)
def step(self, action, agent_id, is_last):
if self.train_pursuit:
agent_layer = self.pursuer_layer
opponent_layer = self.evader_layer
opponent_controller = self.evader_controller
else:
agent_layer = self.evader_layer
opponent_layer = self.pursuer_layer
opponent_controller = self.pursuer_controller
self.latest_reward_state = self.reward()
# actual action application
agent_layer.move_agent(agent_id, action)
if is_last:
ev_remove, pr_remove, pursuers_who_remove = self.remove_agents()
for i in range(opponent_layer.n_agents()):
# controller input should be an observation, but doesn't matter right now
a = opponent_controller.act(self.model_state)
opponent_layer.move_agent(i, a)
self.latest_reward_state += self.term_pursuit * pursuers_who_remove
self.latest_reward_state += self.urgency_reward
self.model_state[0] = self.map_matrix
self.model_state[1] = self.pursuer_layer.get_state_matrix()
self.model_state[2] = self.evader_layer.get_state_matrix()
if self.reward_mech == 'global' and is_last:
meanVal = self.latest_reward_state.mean()
self.latest_reward_state = [
meanVal for _ in range(len(self.latest_reward_state))]
if self.renderOn:
self.clock.tick(15)
else:
self.clock.tick(2000)
self.frames = self.frames + 1
def draw_model_state(self):
# -1 is building pixel flag
x_len, y_len = self.model_state[0].shape
for x in range(x_len):
for y in range(y_len):
pos = pygame.Rect(
self.pixel_scale * x, self.pixel_scale * y, self.pixel_scale, self.pixel_scale)
col = (0, 0, 0)
if self.model_state[0][x][y] == -1:
col = (255, 255, 255)
pygame.draw.rect(self.screen, col, pos)
def draw_pursuers_observations(self):
for i in range(self.pursuer_layer.n_agents()):
x, y = self.pursuer_layer.get_position(i)
patch = pygame.Surface(
(self.pixel_scale * self.obs_range, self.pixel_scale * self.obs_range))
patch.set_alpha(128)
patch.fill((255, 152, 72))
ofst = self.obs_range / 2.0
self.screen.blit(
patch, (self.pixel_scale * (x - ofst + 1 / 2), self.pixel_scale * (y - ofst + 1 / 2)))
def draw_pursuers(self):
for i in range(self.pursuer_layer.n_agents()):
x, y = self.pursuer_layer.get_position(i)
center = (int(self.pixel_scale * x + self.pixel_scale / 2),
int(self.pixel_scale * y + self.pixel_scale / 2))
col = (255, 0, 0)
pygame.draw.circle(self.screen, col, center, int(self.pixel_scale / 3))
def draw_evaders_observations(self):
for i in range(self.evader_layer.n_agents()):
x, y = self.evader_layer.get_position(i)
patch = pygame.Surface(
(self.pixel_scale * self.obs_range, self.pixel_scale * self.obs_range))
patch.set_alpha(128)
patch.fill((0, 154, 205))
ofst = self.obs_range / 2.0
self.screen.blit(
patch, (self.pixel_scale * (x - ofst), self.pixel_scale * (y - ofst)))
def draw_evaders(self):
for i in range(self.evader_layer.n_agents()):
x, y = self.evader_layer.get_position(i)
center = (int(self.pixel_scale * x + self.pixel_scale / 2),
int(self.pixel_scale * y + self.pixel_scale / 2))
col = (0, 0, 255)
pygame.draw.circle(self.screen, col, center, int(self.pixel_scale / 3))
def render(self):
if not self.renderOn:
pygame.display.init()
self.screen = pygame.display.set_mode(
(self.pixel_scale * self.xs, self.pixel_scale * self.ys))
self.renderOn = True
self.draw_model_state()
if self.train_pursuit:
self.draw_pursuers_observations()
else:
self.draw_evaders_observations()
self.draw_evaders()
self.draw_pursuers()
pygame.display.flip()
def animate(self, act_fn, nsteps, file_name, rate=1.5, verbose=False):
"""
Save an animation to an mp4 file.
"""
# run sim loop
o = self.reset()
file_path = "/".join(file_name.split("/")[0:-1])
temp_name = join(file_path, "temp_0.png")
# generate .pngs
self.save_image(temp_name)
removed = 0
for i in range(nsteps):
a = act_fn(o)
o, r, done, info = self.step(a)
temp_name = join(file_path, "temp_" + str(i + 1) + ".png")
self.save_image(temp_name)
removed += info['removed']
if done:
break
# use ffmpeg to create .pngs to .mp4 movie
ffmpeg_cmd = "ffmpeg -framerate " + str(rate) + " -i " + join(
file_path, "temp_%d.png") + " -c:v libx264 -pix_fmt yuv420p " + file_name
call(ffmpeg_cmd.split())
# clean-up by removing .pngs
map(os.remove, glob.glob(join(file_path, "temp_*.png")))
def save_image(self, file_name):
self.render()
capture = pygame.surfarray.array3d(self.screen)
xl, xh = -self.obs_offset - 1, self.xs + self.obs_offset + 1
yl, yh = -self.obs_offset - 1, self.ys + self.obs_offset + 1
window = pygame.Rect(xl, yl, xh, yh)
subcapture = capture.subsurface(window)
pygame.image.save(subcapture, file_name)
def reward(self):
es = self.evader_layer.get_state_matrix() # evader positions
rewards = [
self.catchr * np.sum(es[np.clip(
self.pursuer_layer.get_position(
i)[0] + self.surround_mask[:, 0], 0, self.xs - 1
), np.clip(
self.pursuer_layer.get_position(i)[1] + self.surround_mask[:, 1], 0, self.ys - 1)])
for i in range(self.n_pursuers)
]
return np.array(rewards)
@property
def is_terminal(self):
# ev = self.evader_layer.get_state_matrix() # evader positions
# if np.sum(ev) == 0.0:
if self.evader_layer.n_agents() == 0:
return True
return False
def update_ally_controller(self, controller):
self.ally_controller = controller
def update_opponent_controller(self, controller):
self.opponent_controller = controller
def n_agents(self):
return self.pursuer_layer.n_agents()
def safely_observe(self, i):
if self.train_pursuit:
agent_layer = self.pursuer_layer
else:
agent_layer = self.evader_layer
obs = self.collect_obs(agent_layer)
return obs[i]
def collect_obs(self, agent_layer):
if self.train_pursuit:
gone_flags = self.pursuers_gone
else:
gone_flags = self.evaders_gone
obs = []
nage = 0
for i in range(self.n_agents()):
if gone_flags[i]:
obs.append(None)
else:
o = self.collect_obs_by_idx(agent_layer, nage)
obs.append(o)
nage += 1
return obs
def collect_obs_by_idx(self, agent_layer, agent_idx):
# returns a flattened array of all the observations
xp, yp = agent_layer.get_position(agent_idx)
xlo, xhi, ylo, yhi, xolo, xohi, yolo, yohi = self.obs_clip(xp, yp)
raw_model_state = np.abs(
self.model_state[0:3, xlo:xhi, ylo:yhi])
# need to compile all 3 layers into a single layer
# 0 is empty
# 1 is a pursuer
# 2 is an evader
# 3 is both pursuer and evader
# 4 is a wall
self.local_obs[agent_idx, xolo:xohi, yolo:yohi] = raw_model_state[0] * (4)
self.local_obs[agent_idx, xolo:xohi, yolo:yohi] += raw_model_state[1]
self.local_obs[agent_idx, xolo:xohi, yolo:yohi] += raw_model_state[2] * 2
self.local_obs = self.local_obs / self.layer_norm
return self.local_obs[agent_idx]
def obs_clip(self, x, y):
xld = x - self.obs_offset
xhd = x + self.obs_offset
yld = y - self.obs_offset
yhd = y + self.obs_offset
xlo, xhi, ylo, yhi = (np.clip(xld, 0, self.xs - 1), np.clip(xhd, 0, self.xs - 1),
np.clip(yld, 0, self.ys - 1), np.clip(yhd, 0, self.ys - 1))
xolo, yolo = abs(np.clip(xld, -self.obs_offset, 0)
), abs(np.clip(yld, -self.obs_offset, 0))
xohi, yohi = xolo + (xhi - xlo), yolo + (yhi - ylo)
return xlo, xhi + 1, ylo, yhi + 1, xolo, xohi + 1, yolo, yohi + 1
def remove_agents(self):
"""
Remove agents that are caught. Return tuple (n_evader_removed, n_pursuer_removed, purs_sur)
purs_sur: bool array, which pursuers surrounded an evader
"""
n_pursuer_removed = 0
n_evader_removed = 0
removed_evade = []
removed_pursuit = []
ai = 0
rems = 0
xpur, ypur = np.nonzero(self.model_state[1])
purs_sur = np.zeros(self.n_pursuers, dtype=np.bool)
for i in range(self.n_evaders):
if self.evaders_gone[i]:
continue
x, y = self.evader_layer.get_position(ai)
if self.surround:
pos_that_catch = self.surround_mask + \
self.evader_layer.get_position(ai)
truths = np.array(
[np.equal([xi, yi], pos_that_catch).all(axis=1) for xi, yi in zip(xpur, ypur)])
if np.sum(truths.any(axis=0)) == self.need_to_surround(x, y):
removed_evade.append(ai - rems)
self.evaders_gone[i] = True
rems += 1
tt = truths.any(axis=1)
for j in range(self.n_pursuers):
xpp, ypp = self.pursuer_layer.get_position(j)
tes = np.concatenate(
(xpur[tt], ypur[tt])).reshape(2, len(xpur[tt]))
tem = tes.T == np.array([xpp, ypp])
if np.any(np.all(tem, axis=1)):
purs_sur[j] = True
ai += 1
else:
if self.model_state[1, x, y] >= self.n_catch:
# add prob remove?
removed_evade.append(ai - rems)
self.evaders_gone[i] = True
rems += 1
for j in range(self.n_pursuers):
xpp, ypp = self.pursuer_layer.get_position(j)
if xpp == x and ypp == y:
purs_sur[j] = True
ai += 1
ai = 0
for i in range(self.pursuer_layer.n_agents()):
if self.pursuers_gone[i]:
continue
x, y = self.pursuer_layer.get_position(i)
# can remove pursuers probabilitcally here?
for ridx in removed_evade:
self.evader_layer.remove_agent(ridx)
n_evader_removed += 1
for ridx in removed_pursuit:
self.pursuer_layer.remove_agent(ridx)
n_pursuer_removed += 1
return n_evader_removed, n_pursuer_removed, purs_sur
def need_to_surround(self, x, y):
"""
Compute the number of surrounding grid cells in x,y position that are open
(no wall or obstacle)
"""
tosur = 4
if x == 0 or x == (self.xs - 1):
tosur -= 1
if y == 0 or y == (self.ys - 1):
tosur -= 1
neighbors = self.surround_mask + np.array([x, y])
for n in neighbors:
xn, yn = n
if not 0 < xn < self.xs or not 0 < yn < self.ys:
continue
if self.model_state[0][xn, yn] == -1:
tosur -= 1
return tosur
| 39.576493 | 115 | 0.582756 |
f1f30b400c4ce4453f354406633f769b52dd0b69 | 292 | py | Python | textblob/__init__.py | casatir/TextBlob | 6c5e92efa4fdb9aa9425338a0c0fd120d4521dda | [
"MIT"
] | 1 | 2021-12-09T18:37:24.000Z | 2021-12-09T18:37:24.000Z | textblob/__init__.py | Neucro/TextBlob | 6c5e92efa4fdb9aa9425338a0c0fd120d4521dda | [
"MIT"
] | 1 | 2021-03-26T00:30:46.000Z | 2021-03-26T00:30:46.000Z | textblob/__init__.py | Neucro/TextBlob | 6c5e92efa4fdb9aa9425338a0c0fd120d4521dda | [
"MIT"
] | 1 | 2021-11-03T12:47:13.000Z | 2021-11-03T12:47:13.000Z | import os
from .blob import TextBlob, Word, Sentence, Blobber, WordList
__version__ = '0.16.0'
__license__ = 'MIT'
__author__ = 'Steven Loria'
PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__))
__all__ = [
'TextBlob',
'Word',
'Sentence',
'Blobber',
'WordList',
]
| 17.176471 | 61 | 0.664384 |
e51461c5238d7e3c6df7073591f94c267d552f82 | 1,577 | py | Python | packages/tests/stuff/data/valid_gppi/odahuflow_model/entrypoint.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | 12 | 2020-10-13T15:39:52.000Z | 2021-10-11T17:13:42.000Z | packages/tests/stuff/data/valid_gppi/odahuflow_model/entrypoint.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | 475 | 2019-11-18T12:40:47.000Z | 2022-03-29T21:17:38.000Z | packages/tests/stuff/data/valid_gppi/odahuflow_model/entrypoint.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-02-25T11:26:10.000Z | 2021-03-10T12:01:00.000Z | import functools
from typing import Tuple, List, Dict, Any, Optional, Type
def init() -> str:
"""
Initialize model and return prediction type
:return: prediction type (matrix or objects)
"""
print('init')
return 'matrix'
def predict_on_matrix(input_matrix: List[List[Any]], provided_columns_names: Optional[List[str]] = None) \
-> Tuple[List[List[Any]], Tuple[str, ...]]: # pylint: disable=unused-argument
"""
Make prediction on a Matrix of values
:param input_matrix: data for prediction
:param provided_columns_names: (Optional). Name of columns for provided matrix.
:return: result matrix and result column names
"""
return [[42]], ('result',)
@functools.lru_cache()
def info() -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
"""
Get input and output schemas
:return: OpenAPI specifications. Each specification is assigned as (input / output)
"""
input_sample = [
{
'name': "a",
'type': "string",
'required': True,
'example': "1"
},
{
'name': "b",
'type': "string",
'example': "2"
}
]
output_sample = [
{
'name': "integer",
'type': "string",
'required': True,
'example': '42'
}
]
return input_sample, output_sample
def get_output_json_serializer() -> Optional[Type]:
"""
Returns JSON serializer to be used in output
:return: JSON serializer
"""
return None
| 23.537313 | 106 | 0.566265 |
653d22535e43b40381bb3b3add869cfe9c35c8b5 | 2,295 | py | Python | sfmutils/result.py | NGTmeaty/sfm-utils | 47ac6b8a894f5b02d947d76c74aa61d59cb5d48d | [
"MIT"
] | 2 | 2016-05-08T06:44:13.000Z | 2016-05-16T15:07:22.000Z | sfmutils/result.py | NGTmeaty/sfm-utils | 47ac6b8a894f5b02d947d76c74aa61d59cb5d48d | [
"MIT"
] | 13 | 2015-12-02T22:00:22.000Z | 2021-10-29T21:01:01.000Z | sfmutils/result.py | NGTmeaty/sfm-utils | 47ac6b8a894f5b02d947d76c74aa61d59cb5d48d | [
"MIT"
] | 4 | 2020-05-27T05:05:05.000Z | 2021-02-12T22:28:47.000Z | STATUS_SUCCESS = "completed success"
STATUS_FAILURE = "completed failure"
STATUS_RUNNING = "running"
STATUS_PAUSED = "paused"
STATUS_STOPPING = "stopping"
class BaseResult:
"""
Keeps track of results.
Subclasses should implement _result_name and _addl_str.
"""
def __init__(self):
self.success = True
self.started = None
self.ended = None
self.infos = []
self.warnings = []
self.errors = []
def __nonzero__(self):
return 1 if self.success else 0
@property
def _result_name(self):
"""
Returns name of the result for including in __str__.
For example, "Harvest"
"""
return ""
def _addl_str(self):
"""
Additional text to add to __str__.
"""
return ""
def __str__(self):
str_text = "{} response is {}.".format(self._result_name, self.success)
if self.started:
str_text += " Started: {}".format(self.started)
if self.ended:
str_text += " Ended: {}".format(self.ended)
str_text += self._str_messages(self.infos, "Informational")
str_text += self._str_messages(self.warnings, "Warning")
str_text += self._str_messages(self.errors, "Error")
str_text += self._addl_str()
return str_text
@staticmethod
def _str_messages(messages, name):
msg_str = ""
if messages:
msg_str += " {} messages are:".format(name)
for (i, msg) in enumerate(messages, start=1):
msg_str += "({}) [{}] {}".format(i, msg.code, msg.message)
return msg_str
class Msg:
"""
An informational, warning, or error message to be included in a result.
"""
def __init__(self, code, message, **kwargs):
"""
:param code: code, which should be some sort of a constant
:param message: a textual description of the message
:param kwargs: additional fields and values to be included in the message
"""
assert code
assert message
self.code = code
self.message = message
self.extras = kwargs
def to_map(self):
m = self.extras
m['code'] = self.code
m['message'] = self.message
return m
| 26.686047 | 81 | 0.581264 |
a7089055bfd4f866bcd8ebdbef4eb0f53696fc61 | 5,609 | py | Python | waveshare-driver/lib/waveshare_epd/epd4in2bc.py | jameslawler/pivsmp | 4c16f416d72b155facb6cad4dcf2e7f012bd9495 | [
"MIT"
] | 1 | 2021-10-29T21:08:29.000Z | 2021-10-29T21:08:29.000Z | waveshare-driver/lib/waveshare_epd/epd4in2bc.py | jameslawler/pivsmp | 4c16f416d72b155facb6cad4dcf2e7f012bd9495 | [
"MIT"
] | null | null | null | waveshare-driver/lib/waveshare_epd/epd4in2bc.py | jameslawler/pivsmp | 4c16f416d72b155facb6cad4dcf2e7f012bd9495 | [
"MIT"
] | null | null | null | # *****************************************************************************
# * | File : epd4in2bc.py
# * | Author : Waveshare team
# * | Function : Electronic paper driver
# * | Info :
# *----------------
# * | This version: V4.0
# * | Date : 2019-06-20
# # | Info : python demo
# -----------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from . import epdconfig
# Display resolution
EPD_WIDTH = 400
EPD_HEIGHT = 300
class EPD:
def __init__(self):
self.reset_pin = epdconfig.RST_PIN
self.dc_pin = epdconfig.DC_PIN
self.busy_pin = epdconfig.BUSY_PIN
self.cs_pin = epdconfig.CS_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
# Hardware reset
def reset(self):
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
epdconfig.digital_write(self.reset_pin, 0)
epdconfig.delay_ms(10)
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
def send_command(self, command):
epdconfig.digital_write(self.dc_pin, 0)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([command])
epdconfig.digital_write(self.cs_pin, 1)
def send_data(self, data):
epdconfig.digital_write(self.dc_pin, 1)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([data])
epdconfig.digital_write(self.cs_pin, 1)
def ReadBusy(self):
logging.debug("e-Paper busy")
while(epdconfig.digital_read(self.busy_pin) == 0): # 0: idle, 1: busy
epdconfig.delay_ms(100)
logging.debug("e-Paper busy release")
def init(self):
if (epdconfig.module_init() != 0):
return -1
self.reset()
self.send_command(0x06) # BOOSTER_SOFT_START
self.send_data (0x17)
self.send_data (0x17)
self.send_data (0x17) # 07 0f 17 1f 27 2F 37 2f
self.send_command(0x04) # POWER_ON
self.ReadBusy()
self.send_command(0x00) # PANEL_SETTING
self.send_data(0x0F) # LUT from OTP
return 0
def getbuffer(self, image):
# logging.debug("bufsiz = ",int(self.width/8) * self.height)
buf = [0xFF] * (int(self.width/8) * self.height)
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
pixels = image_monocolor.load()
# logging.debug("imwidth = %d, imheight = %d",imwidth,imheight)
if(imwidth == self.width and imheight == self.height):
logging.debug("Horizontal")
for y in range(imheight):
for x in range(imwidth):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] == 0:
buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
elif(imwidth == self.height and imheight == self.width):
logging.debug("Vertical")
for y in range(imheight):
for x in range(imwidth):
newx = y
newy = self.height - x - 1
if pixels[x, y] == 0:
buf[int((newx + newy*self.width) / 8)] &= ~(0x80 >> (y % 8))
return buf
def display(self, imageblack, imagered):
self.send_command(0x10)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(imageblack[i])
self.send_command(0x13)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(imagered[i])
self.send_command(0x12)
self.ReadBusy()
def Clear(self):
self.send_command(0x10)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(0xFF)
self.send_command(0x13)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(0xFF)
self.send_command(0x12)
self.ReadBusy()
def sleep(self):
self.send_command(0x02) # POWER_OFF
self.ReadBusy()
self.send_command(0x07) # DEEP_SLEEP
self.send_data(0xA5) # check code
epdconfig.module_exit()
### END OF FILE ###
| 37.644295 | 85 | 0.567481 |
0f86b8fab39ab1da368cd89ef2639133c236e8b1 | 400 | py | Python | costar_task_plan/python/costar_task_plan/robotics/tom/task.py | cpaxton/costar_plan | be5c12f9d0e9d7078e6a5c283d3be059e7f3d040 | [
"Apache-2.0"
] | 66 | 2018-10-31T04:58:53.000Z | 2022-03-17T02:32:25.000Z | costar_task_plan/python/costar_task_plan/robotics/tom/task.py | cpaxton/costar_plan | be5c12f9d0e9d7078e6a5c283d3be059e7f3d040 | [
"Apache-2.0"
] | 8 | 2018-10-23T21:19:25.000Z | 2018-12-03T02:08:41.000Z | costar_task_plan/python/costar_task_plan/robotics/tom/task.py | cpaxton/costar_plan | be5c12f9d0e9d7078e6a5c283d3be059e7f3d040 | [
"Apache-2.0"
] | 25 | 2018-10-19T00:54:17.000Z | 2021-10-10T08:28:15.000Z |
# Create the task model for the oranges task.
# This will create a number of oranges and instantiate all the appropriate
# DMP option distributions with pre and post condiitons, so that we can
# test things out in the toy sim.
def GetOrangesTask(num_oranges=1):
if num_oranges < 1:
raise RuntimeError('Must have at least one orange'
'to be able to plan the TOM oranges task.')
| 28.571429 | 74 | 0.73 |
6ce85e9179ca177dae7ddd1037384bfac7b1b686 | 6,366 | py | Python | bundlebuilder/models/entity.py | CiscoSecurity/tr-05-ctim-bundle-builder | fe4f1e877112aebc5f79ec04b6c74dc9e9f0113a | [
"MIT"
] | 3 | 2019-12-11T00:21:26.000Z | 2020-11-02T18:16:36.000Z | bundlebuilder/models/entity.py | CiscoSecurity/tr-05-ctim-bundle-builder | fe4f1e877112aebc5f79ec04b6c74dc9e9f0113a | [
"MIT"
] | null | null | null | bundlebuilder/models/entity.py | CiscoSecurity/tr-05-ctim-bundle-builder | fe4f1e877112aebc5f79ec04b6c74dc9e9f0113a | [
"MIT"
] | 2 | 2020-06-29T18:40:37.000Z | 2020-07-31T20:16:47.000Z | from abc import (
ABCMeta,
abstractmethod,
)
from hashlib import sha256
from inspect import (
Signature,
Parameter,
)
from itertools import chain
from typing import (
Optional,
Any,
List,
Iterator,
Tuple,
)
from uuid import uuid4
from inflection import underscore
from marshmallow.exceptions import (
ValidationError as MarshmallowValidationError
)
from marshmallow.schema import Schema
from ..constants import SCHEMA_VERSION
from ..exceptions import (
SchemaError,
ValidationError as BundleBuilderValidationError,
)
class EntitySchema(Schema):
class Meta:
ordered = True
class EntityMeta(ABCMeta):
def __init__(cls, cls_name, cls_bases, cls_dict):
cls_type = cls_dict.get('type')
if cls_type is None:
cls.type = underscore(cls_name) # CamelCase -> snake_case
cls_schema = cls_dict.get('schema')
if cls_schema is None:
# If there is no schema then make the class kind of abstract by not
# allowing to instantiate it although allowing inheritance from it.
def __init__(self, **kwargs):
raise SchemaError(
f'{cls}.schema must be a subclass of {EntitySchema}.'
)
cls.schema = type(
f'{cls_name}Schema',
(Schema,),
{
'__init__': __init__,
'__module__': cls.__module__,
},
)
super().__init__(cls_name, cls_bases, cls_dict)
return
if not (
isinstance(cls_schema, type) and
issubclass(cls_schema, EntitySchema)
):
raise SchemaError(
f'{cls}.schema must be a subclass of {EntitySchema}.'
)
cls.__signature__ = Signature([
Parameter(field_name, Parameter.KEYWORD_ONLY, annotation=field)
for field_name, field in cls_schema().declared_fields.items()
])
super().__init__(cls_name, cls_bases, cls_dict)
class BaseEntity(metaclass=EntityMeta):
"""Abstract base class for arbitrary CTIM entities."""
def __init__(self, **data):
try:
self.json = self.schema().load(data)
except MarshmallowValidationError as error:
raise BundleBuilderValidationError(*error.args) from error
self._initialize_missing_fields()
def __getattr__(self, field: str) -> Optional[Any]:
return self.json.get(field)
def __getitem__(self, field: str) -> Any:
return self.json[field]
def __str__(self):
return self.__class__.__name__
def get(self, field: str, default: Any = None) -> Any:
return self.json.get(field, default)
@abstractmethod
def _initialize_missing_fields(self) -> None:
pass
class PrimaryEntity(BaseEntity):
"""Abstract base class for top-level CTIM entities."""
def _initialize_missing_fields(self) -> None:
self.json['type'] = self.type
self.json['schema_version'] = SCHEMA_VERSION
# Use a dynamic import to break the circular dependency.
from ..session import get_session
session = get_session()
external_id_prefix = session.external_id_prefix
self.json.setdefault('source', session.source)
self.json.setdefault('source_uri', session.source_uri)
# This isn't really a part of the CTIM JSON payload, so extract it out.
external_id_salt_values: List[str] = sorted(
self.json.pop('external_id_salt_values', [])
)
# Generate and set a transient ID and a list of XIDs only after all the
# other attributes are already set properly.
self.json['id'] = self._generate_transient_id(external_id_prefix)
self.json['external_ids'] = (
self._generate_external_ids(
external_id_prefix,
external_id_salt_values,
) + self.json.get('external_ids', [])
)
# Make the automatically populated fields be listed before the ones
# manually specified by the user.
self.json = {
'type': self.json.pop('type'),
'schema_version': self.json.pop('schema_version'),
'source': self.json.pop('source'),
'source_uri': self.json.pop('source_uri'),
'id': self.json.pop('id'),
'external_ids': self.json.pop('external_ids'),
**self.json
}
def _generate_transient_id(self, external_id_prefix: str) -> str:
return 'transient:{prefix}-{type}-{uuid}'.format(
prefix=external_id_prefix,
type=self.type,
uuid=uuid4().hex,
)
def _generate_external_ids(
self,
external_id_prefix: str,
external_id_salt_values: List[str],
) -> List[str]:
return [
'{prefix}-{type}-{sha256}'.format(
prefix=external_id_prefix,
type=self.type,
sha256=sha256(
bytes(external_id_deterministic_value, 'utf-8')
).hexdigest(),
)
for external_id_deterministic_value
in self._generate_external_id_deterministic_values(
external_id_prefix,
external_id_salt_values,
)
]
def _generate_external_id_deterministic_values(
self,
external_id_prefix: str,
external_id_salt_values: List[str],
) -> Iterator[str]:
for external_id_seed_values in (
self._generate_external_id_seed_values()
):
# Chain together all the values available.
# Filter out any empty values.
# Join up all the values left.
yield '|'.join(
filter(
bool,
chain(
(external_id_prefix,) + external_id_seed_values,
external_id_salt_values,
)
)
)
@abstractmethod
def _generate_external_id_seed_values(self) -> Iterator[Tuple[str]]:
pass
class SecondaryEntity(BaseEntity):
"""Abstract base class for in-line CTIM entities."""
def _initialize_missing_fields(self) -> None:
pass
| 29.472222 | 79 | 0.591737 |
4c48c82d2c2fdd693b770e7f91b7f345409f0667 | 12,863 | py | Python | src/pathme/reactome/rdf_sparql.py | brucetony/PathMe | c7d758ff76f6787a4eb349b95f9c06bf1afb0754 | [
"Apache-2.0"
] | null | null | null | src/pathme/reactome/rdf_sparql.py | brucetony/PathMe | c7d758ff76f6787a4eb349b95f9c06bf1afb0754 | [
"Apache-2.0"
] | null | null | null | src/pathme/reactome/rdf_sparql.py | brucetony/PathMe | c7d758ff76f6787a4eb349b95f9c06bf1afb0754 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module contains the methods that run SPARQL queries to convert the Reactome Pathways to BEL."""
import logging
import os
from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple, Union
import rdflib
from pathme.constants import REACTOME_BEL
from pathme.reactome.convert_to_bel import convert_to_bel
from pathme.utils import get_pathway_statitics, parse_rdf, query_result_to_dict
from pybel import to_pickle
from rdflib import URIRef
from rdflib.namespace import DC, DCTERMS, Namespace, OWL, RDF, RDFS, SKOS, XSD
from tqdm import tqdm
from ..constants import REACTOME_SPECIES_TO_ID
log = logging.getLogger(__name__)
"""SPARQL string queries"""
#: SPARQL prefixes.
PREFIXES = {
'owl': OWL,
'xsd': XSD,
'rdfs': RDFS,
'rdf': RDF,
'dcterms': DCTERMS,
'dc': DC,
'skos': SKOS,
'foaf': Namespace('http://xmlns.com/foaf/0.1/'),
'dbpedia': Namespace('http://dbpedia.org/property/'),
'dbpedia2': Namespace('http://dbpedia.org/'),
'biopax3': Namespace('http://www.biopax.org/release/biopax-level3.owl#'),
}
#: SPARQL query string to get all the primary types of entries (Pathway, BiochemicalReaction) in a pathway network.
GET_ALL_TYPES = """
SELECT DISTINCT (STRAFTER(STR(?rdf_type), str(biopax3:)) AS ?entry_type)
WHERE {
?uri_id rdf:type ?rdf_type .
}
"""
#: SPARQL query string to get pathway URIs and names in the RDF file.
GET_ALL_PATHWAYS = """
SELECT DISTINCT ?uri_id ?name
WHERE {
?uri_id rdf:type biopax3:Pathway .
?uri_id biopax3:displayName ?name .
}
"""
#: SPARQL query string to get all components of a pathway (predicate biopax3:pathwayComponent).
GET_ALL_PATHWAY_COMPONENTS = """
SELECT DISTINCT ?uri_id ?name ?comment (STRAFTER(STR(?uri_type), str(biopax3:)) AS ?component_type)
WHERE {
?pathway biopax3:pathwayComponent ?uri_id .
?uri_id rdf:type ?uri_type .
optional {?uri_id biopax3:displayName ?name .}
optional {?uri_id biopax3:comment ?comment .}
}
"""
#: SPARQL query string to get all participants in an interaction and its controlType (ACTIVATION or INHIBITION).
GET_INTERACTION_PARTICIPANTS_AND_TYPE = """
SELECT DISTINCT
(STRAFTER(STR(?component), '#') AS ?identifier)
?reactant
?product
(STR(?control_type) AS ?interaction_type)
WHERE {
?component biopax3:left ?reactant .
?component biopax3:right ?product .
optional {?control biopax3:controlled ?component .}
optional {?control biopax3:controlType ?control_type }
}
"""
#: SPARQL query to get all the possible metadate (optional statements) of an entity (Protein, Dna, Pathway...).
GET_ENTITY_METADATA = """
SELECT DISTINCT
(STRAFTER (STR(?uri_type), str(biopax3:)) AS ?entity_type)
(STRAFTER(STR(?entity), '#') AS ?identifier)
(STR(?entity) AS ?uri_id)
(STRAFTER(STR(?entity), '#') AS ?reactome_id)
(STR(?entity) AS ?uri_reactome_id)
(STR(?entity_reference) AS ?uri_id)
?name
?cell_locat
?display_name
?complex_components
?comment
WHERE {
?entity rdf:type ?uri_type .
optional {?entity biopax3:comment ?comment .}
optional {?entity biopax3:entityReference ?entity_reference .}
optional {?entity biopax3:name ?name .}
optional {?entity biopax3:displayName ?display_name .}
optional {?entity biopax3:cellularLocation ?cell_locat .}
optional {?entity biopax3:organism ?organism .}
optional {?entity biopax3:component ?complex_components .}
}
"""
"""Queries managers"""
def _get_all_entry_types(rdf_graph: rdflib.Graph) -> Set[str]:
"""Get all entries primary types.
:param rdf_graph:
:return: set with all entry primary types.
"""
types_query = rdf_graph.query(
GET_ALL_TYPES,
initNs=PREFIXES,
)
return {
str(entry.entry_type)
for entry in types_query
}
def _get_pathway_metadata(pathway_uri: str, rdf_graph: rdflib.Graph) -> Dict[str, Dict[str, Dict[str, str]]]:
"""Get metadata for a pathway entry.
:param pathway_uri: URI reference of the queried graph
:param rdf_graph: RDF Reactome Universe graph object
:returns: Metadata of a pathway as a dictionary, if empty 'unknown' will be assigned by default
"""
return query_result_to_dict(
rdf_graph.query(
GET_ENTITY_METADATA,
initNs=PREFIXES,
initBindings={'entity': pathway_uri}
),
attr_empty=['display_name', 'identifier', 'uri_id', 'uri_reactome_id', 'comment'],
id_dict=False
)
def _get_entity_metadata(entity: rdflib.URIRef, rdf_graph: rdflib.Graph) -> Dict[str, Union[str, Set[str]]]:
"""Get the metadata for an entity (Protein, Dna, Complex...).
:param entity: URI reference of the queried entity
:param rdf_graph: RDF Reactome Universe graph object
:returns: Metadata of a pathway as a dictionary, if empty 'unknown' will be assigned by default
"""
entity_metadata = query_result_to_dict(rdf_graph.query(
GET_ENTITY_METADATA,
initNs=PREFIXES,
initBindings={'entity': entity}
),
attr_empty=['entity_type'],
id_dict=False
)
# Complexes might contain multiple components entities so we iterate over the complex components to fetch that information
if entity_metadata['entity_type'] == 'Complex':
complex_components = entity_metadata.get('complex_components')
entity_metadata['complex_components'] = []
if isinstance(complex_components, str):
complex_component = _get_entity_metadata(URIRef(complex_components), rdf_graph)
entity_metadata['complex_components'].append(complex_component)
elif complex_components:
for complex_component in complex_components:
complex_component = _get_entity_metadata(URIRef(complex_component), rdf_graph)
entity_metadata['complex_components'].append(complex_component)
return entity_metadata
def _get_reaction_participants(component_uri: str, component, rdf_graph: rdflib.Graph) -> Tuple[
Dict[Union[str, Set[str]], Dict[str, Union[str, Set[str]]]], Dict[Any, Dict[str, Any]]]:
"""Get reaction participants (nodes and interactions) for a given reaction.
:param component_uri: URI reference of the queried reaction component
:param component: Reaction component metadata
:param rdf_graph: RDF Reactome Universe graph object
:return: returns the reaction participants as entities (Proteins, Complex, SmallMolecule...) and proteins (the reaction link)
"""
interactions = {}
nodes = {}
spaqrl_reaction_participants = rdf_graph.query(
GET_INTERACTION_PARTICIPANTS_AND_TYPE,
initNs=PREFIXES,
initBindings={'component': component_uri}
)
for interaction in spaqrl_reaction_participants:
reactant_metadata = _get_entity_metadata(interaction.reactant, rdf_graph)
product_metadata = _get_entity_metadata(interaction.product, rdf_graph)
reactant_id = reactant_metadata['identifier']
product_id = product_metadata['identifier']
nodes[reactant_id] = reactant_metadata
nodes[product_id] = product_metadata
if interaction.identifier not in interactions:
interactions[interaction.identifier] = {'metadata': component}
if 'participants' not in interactions[interaction.identifier]:
interactions[interaction.identifier]['participants'] = (reactant_id, product_id)
else:
interaction_participants = interactions[interaction.identifier]['participants']
if isinstance(interaction_participants, tuple):
interactions[interaction.identifier]['participants'] = {
'reactants': {interaction_participants[0], reactant_id},
'products': {interaction_participants[1], product_id}
}
else:
interactions[interaction.identifier]['participants']['reactants'].add(reactant_id)
interactions[interaction.identifier]['participants']['products'].add(product_id)
interactions[interaction.identifier]['metadata']['interaction_type'] = str(interaction.interaction_type)
return nodes, interactions
def _get_pathway_components(pathway_uri: rdflib.URIRef, rdf_graph: rdflib.Graph) -> Tuple[
Dict[str, Dict[str, Union[str, Set[str]]]], List[Dict[str, Union[str, Set[str]]]]]:
"""Get components (nodes and interactions) for a given pathway.
:param pathway_uri: URI reference of the queried pathway
:param rdf_graph: RDF Reactome Universe graph object
:return: returns the pathway components as entities (Proteins, Complex, SmallMolecule...) and proteins (their links)
"""
interactions = {}
nodes = {}
spaqrl_pathway_components = rdf_graph.query(
GET_ALL_PATHWAY_COMPONENTS,
initNs=PREFIXES,
initBindings={'pathway': pathway_uri}
)
pathway_components = query_result_to_dict(spaqrl_pathway_components)
for component_uri, component in pathway_components.items():
if component['component_type'] == 'BiochemicalReaction':
component_nodes, component_interactions = _get_reaction_participants(component_uri, component, rdf_graph)
nodes.update(component_nodes)
interactions.update(component_interactions)
elif component['component_type'] == 'Pathway':
pathway_metadata = _get_pathway_metadata(component_uri, rdf_graph)
nodes[pathway_metadata['uri_reactome_id']] = pathway_metadata
return nodes, list(interactions.values())
def get_reactome_statistics(resource_file, hgnc_manager, chebi_manager):
"""Get types statistics for Reactome.
:param str resource_file: RDF file
:param bio2bel_hgnc.Manager hgnc_manager: Hgnc Manager
"""
log.info('Parsing Reactome RDF file')
rdf_graph = parse_rdf(resource_file, format='xml')
species_name = os.path.basename(resource_file).split(".")[0]
species_id = REACTOME_SPECIES_TO_ID[species_name]
spaqrl_all_pathways = rdf_graph.query(GET_ALL_PATHWAYS, initNs=PREFIXES)
global_statistics = defaultdict(lambda: defaultdict(int))
for pathway_uri, pathway_title in tqdm(spaqrl_all_pathways, desc='Generating Reactome Statistics'):
nodes, edges = _get_pathway_components(pathway_uri, rdf_graph)
pathway_metadata = _get_pathway_metadata(pathway_uri, rdf_graph)
nodes_types = [
node['entity_type'] for node in nodes.values()
]
edges_types = [
edge['metadata']['interaction_type'] for edge in edges
]
bel_graph = convert_to_bel(nodes, edges, pathway_metadata, hgnc_manager, chebi_manager, species_id)
global_statistics, pathway_statistics = get_pathway_statitics(
nodes_types, edges_types, bel_graph, global_statistics=global_statistics
)
return global_statistics
def reactome_pathway_to_bel(pathway_uri, rdf_graph, hgnc_manager, chebi_manager, species):
"""Convert a Reactome pathway to BEL.
:param str filepath: path to the file
:rtype: pybel.BELGraph
:param bio2bel_hgnc.Manager hgnc_manager: Bio2BEL HGNC Manager
"""
pathway_metadata = _get_pathway_metadata(pathway_uri, rdf_graph)
nodes, interactions = _get_pathway_components(pathway_uri, rdf_graph)
return convert_to_bel(nodes, interactions, pathway_metadata, hgnc_manager, chebi_manager, species)
def reactome_to_bel(resource_file, hgnc_manager, chebi_manager, export_folder=REACTOME_BEL):
"""Create Reactome BEL graphs.
:param str resource_file: rdf reactome file (there is only one)
:param bio2bel_hgnc.Manager hgnc_manager: uniprot id to hgnc symbol dictionary
:return:
"""
log.info('Parsing Reactome RDF file')
rdf_graph = parse_rdf(resource_file, format='xml')
species_name = os.path.basename(resource_file).split(".")[0]
species_id = REACTOME_SPECIES_TO_ID[species_name]
pathways_uris_to_names = rdf_graph.query(GET_ALL_PATHWAYS, initNs=PREFIXES)
for pathway_uri, pathway_name in tqdm(pathways_uris_to_names,
desc=f'Exporting Reactome BEL for species {species_id}'):
# Take the identifier of the pathway which is placed at the end of the URL and also strip the number
# next to it. (probably version of pathway)
file_name = pathway_uri.split('/')[-1].split('.')[0]
pickle_file = os.path.join(export_folder, '{}.pickle'.format(file_name))
# Skip if BEL file already exists
if os.path.exists(pickle_file):
continue
bel_graph = reactome_pathway_to_bel(pathway_uri, rdf_graph, hgnc_manager, chebi_manager, species_id)
# Export BELGraph to pickle
to_pickle(bel_graph, pickle_file)
| 37.069164 | 129 | 0.703802 |
f66327e73e348909a904449c521d2f1ff394025d | 4,455 | py | Python | zPE/GUI/io_encap.py | T-Tony-T/mainframe-env-simulator | 9ca8b726b5962502d53c7e8483c5e4fd89ce5ac6 | [
"BSD-3-Clause"
] | 3 | 2015-07-20T20:11:38.000Z | 2019-07-17T01:53:50.000Z | zPE/GUI/io_encap.py | T-Tony-T/mainframe-env-simulator | 9ca8b726b5962502d53c7e8483c5e4fd89ce5ac6 | [
"BSD-3-Clause"
] | null | null | null | zPE/GUI/io_encap.py | T-Tony-T/mainframe-env-simulator | 9ca8b726b5962502d53c7e8483c5e4fd89ce5ac6 | [
"BSD-3-Clause"
] | 2 | 2019-11-14T14:40:09.000Z | 2021-01-21T21:58:58.000Z | # this is the zPE IO encapsulation for GUI
import os, sys
# this module implements the following APIs:
#
# is_binary(fn_list): test if the fn_list corresponding to a binary file
#
# is_file(fn_list): test if the fn_list corresponding to a file
# is_dir(fn_list): test if the fn_list corresponding to a directory
#
# norm_path_list(fn_list): return the normalized absolute path
# norm_path(full_path): same as above; take a string as argument
#
# new_file(fn_list): create the file unless the fn_list corresponding to a file
# new_dir(fn_list): create the dir unless the fn_list corresponding to a directory
#
# open_file(fn_list, mode): open the file with the indicated mode
# list_dir(dir_name): list the file(s) in the indicated directory, create an empty one if not exsits
#
# fetch(buff): read content from the corresponding file to the zEditBuffer
# flush(buff): write content from the zEditBuffer to the corresponding file
#
def is_binary(fn_list):
if is_file(fn_list):
return __IS_BINARY(os.path.join(* fn_list))
else:
raise ValueError
def is_file(fn_list):
return os.path.isfile(os.path.join(* fn_list))
def is_dir(fn_list):
return os.path.isdir(os.path.join(* fn_list))
def norm_path_list(fn_list):
return norm_path(os.path.join(* fn_list))
def norm_path(full_path):
if not full_path:
return '' # indicates no path is given
return os.path.normcase( # on Windows, convert all letters to lowercase
os.path.abspath( # normalize the path to standard form
os.path.realpath( # trace and eliminates any symbolic links (need to be done before normpath/abspath)
os.path.expanduser( # expand ~ or ~user (need to be done first)
full_path
))))
def new_file(fn_list):
if is_file(fn_list):
raise IOError('File already exists.')
elif is_dir(fn_list):
raise IOError('File name conflict with a folder.')
open_file(fn_list, 'w')
def new_dir(fn_list):
if is_file(fn_list):
raise IOError('Folder name conflict with a file.')
elif is_dir(fn_list):
raise IOError('Folder already exists.')
__CREATE_DIR(os.path.join(* fn_list))
def open_file(fn_list, mode):
'''Open the target file in regardless of the existance'''
if isinstance(fn_list, str):
path = fn_list
else:
path = os.path.join(* fn_list)
__CREATE_DIR(os.path.dirname(path))
return open(path, mode)
def list_dir(dir_path):
__CREATE_DIR(dir_path)
return os.listdir(dir_path)
def fetch(buff):
'''Fetch the corresponding file to the indicated MainWindowBuffer'''
if buff.path == None:
return False
if is_binary(buff.path):
raise TypeError('Cannot fetch content out of a binary file.')
fp = open_file(buff.path, 'r')
tb = buff.buffer
try:
tb.set_text(fp.read().decode('utf8'))
except:
raise UnicodeError('File is not in UTF-8 encoding! Convert it to UTF-8 first.')
return True
def flush(buff):
'''Flush the indicated MainWindowBuffer to the corresponding file'''
if buff.path == None:
return False
try:
fp = open_file(buff.path, 'w')
except:
return False
tb = buff.buffer
fp.write(tb.get_text(tb.get_start_iter(), tb.get_end_iter(), True))
return True
# supporting function
def __CREATE_DIR(path):
'''creates (recursively) the target directory if not exists'''
if not os.path.isdir(path):
os.makedirs(path)
def __IS_BINARY(filename):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
fin = open(filename, 'rb')
try:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
# A-wooo! Mira, python no necesita el "except:". Achis... Que listo es.
finally:
fin.close()
return False
| 30.724138 | 119 | 0.644669 |
280c5c5856ccff93d7609b6f00c9a27513bab075 | 1,500 | py | Python | webgame/sqlstore/test/NetMessages.py | breezechen/zevent | f86219b3ce4afbb516b22c41ce125b0f916c490c | [
"Apache-2.0"
] | 1 | 2021-11-19T06:14:16.000Z | 2021-11-19T06:14:16.000Z | webgame/sqlstore/test/NetMessages.py | geniejl/zevent | 5dd3de57adf1deecc8f008db387ebfa460767fda | [
"Apache-2.0"
] | null | null | null | webgame/sqlstore/test/NetMessages.py | geniejl/zevent | 5dd3de57adf1deecc8f008db387ebfa460767fda | [
"Apache-2.0"
] | 2 | 2017-07-15T02:40:52.000Z | 2021-11-19T06:14:18.000Z | # -*- coding: utf-8 -*-
class Packets:
DEF_MSGTYPE_CONFIRM = 0x0000
DEF_MSGTYPE_REJECT = 0x0001
MSGID_REQUEST_LOGIN = 0x00000001
MSGID_RESPONSE_LOGIN = 0x10000001
MSGID_REQUEST_ENTERGAME = 0x00000002
MSGID_RESPONSE_ENTERGAME = 0x10000002
MSGID_REQUEST_LEAVEGAME = 0x00000004
MSGID_RESPONSE_LEAVEGAME = 0x10000004
MSGID_REQUEST_NEWACCOUNT = 0x00000008
MSGID_RESPONSE_NEWACCOUNT = 0x10000008
MSGID_REQUEST_NEWCHARACTER = 0x00000010
MSGID_RESPONSE_NEWCHARACTER = 0x10000010
MSGID_REQUEST_GETCHARLIST = 0x00000011
MSGID_RESPONSE_GETCHARLIST = 0x10000011
MSGID_REQUEST_BINDGS = 0x00000111
MSGID_RESPONSE_BINDGS = 0x10000111
#for gs
MSGID_REQUEST_REGGS = 0x01000001
MSGID_RESPONSE_REGGS = 0x11000001
#转发消息
MSGID_REQUEST_DATA2GS = 0x0000000F
MSGID_RESPONSE_DATA2GS = 0x1000000F
MSGID_REQUEST_DATA2CLIENTS = 0x000000FF
#sqlstore
MSGID_REQUEST_EXECSQL = 0x0000002F
MSGID_RESPONSE_EXECSQL = 0x1000002F
MSGID_REQUEST_EXECPROC = 0x0000003F
MSGID_RESPONSE_EXECPROC = 0x1000003F
MSGID_REQUEST_QUERY = 0x0000004F
MSGID_RESPONSE_QUERY = 0x1000004F
#disconnect
MSGID_NOTIFY_DISCONNECT = 0x11111111
DEF_LOGRESMSGTYPE_PASSWORDMISMATCH = 0x0001
DEF_LOGRESMSGTYPE_NOTEXISTINGACCOUNT = 0x0002
DEF_LOGRESMSGTYPE_NOTEXISTINGCHARACTER = 0x003
DEF_ENTERGAMEMSGTYPE_NEW = 0x0F1C
DEF_ENTERGAMERESTYPE_PLAYING = 0x0F20
DEF_ENTERGAMERESTYPE_REJECT = 0x0F21
DEF_ENTERGAMERESTYPE_CONFIRM = 0x0F22
DEF_ENTERGAMERESTYPE_FORCEDISCONN = 0x0F23
| 26.785714 | 48 | 0.825333 |
5acce67eee2b5fc1ae9fcb668c5f3d04de57f90c | 1,720 | py | Python | mi/idk/platform/package_driver.py | ronkyo/mi-instrument | ad6188ea9d7d941292f078a58718d09b46cc4fed | [
"BSD-2-Clause"
] | 1 | 2018-09-14T23:28:29.000Z | 2018-09-14T23:28:29.000Z | mi/idk/platform/package_driver.py | ronkyo/mi-instrument | ad6188ea9d7d941292f078a58718d09b46cc4fed | [
"BSD-2-Clause"
] | 33 | 2017-04-25T19:53:45.000Z | 2022-03-18T17:42:18.000Z | mi/idk/platform/package_driver.py | ronkyo/mi-instrument | ad6188ea9d7d941292f078a58718d09b46cc4fed | [
"BSD-2-Clause"
] | 31 | 2015-03-04T01:01:09.000Z | 2020-10-28T14:42:12.000Z | """
@file coi-services/mi.idk.platform/package_driver.py
@author Emily Hahn
@brief Main script class for running the package_driver process
"""
import os
import sys
import subprocess
from mi.core.log import get_logger ; log = get_logger()
import mi.idk.package_driver
from mi.idk.exceptions import InvalidParameters
from mi.idk import prompt
from mi.idk.platform.metadata import Metadata
from mi.idk.platform.nose_test import NoseTest
from mi.idk.platform.driver_generator import DriverGenerator
from mi.idk.platform.egg_generator import EggGenerator
REPODIR = '/tmp/repoclone'
class PackageDriver(mi.idk.package_driver.PackageDriver):
def _driver_prefix(self):
return "platform"
def archive_file(self):
return "%s-%s-driver.zip" % (self.metadata.driver_name,
self.metadata.version)
def build_name(self):
return "platform_%s" % self.metadata.driver_name
def get_metadata(self):
# get which platform agent is selected from the current metadata, use
# this to get metadata from the cloned repo
tmp_metadata = Metadata()
log.debug("Current Metadata: %s", tmp_metadata.serialize())
# read metadata from the cloned repo
self.metadata = Metadata(tmp_metadata.driver_path,
REPODIR + '/marine-integrations')
log.debug("Result Metadata: %s", self.metadata.serialize())
return self.metadata
def get_nose_test(self):
return NoseTest(self.metadata, log_file=self.log_path())
def get_driver_generator(self):
return DriverGenerator(self.metadata)
def get_egg_generator(self):
return EggGenerator(self.metadata)
| 31.272727 | 77 | 0.702326 |
976c6cff54c2072bcf1e4f787a7c4075ea5a61ba | 5,534 | py | Python | tools/apachize.py | orlyatomics/orly | d413f999f51a8e553832dab4e3baa7ca68928840 | [
"Apache-2.0"
] | 69 | 2015-01-06T05:12:57.000Z | 2021-11-06T20:34:10.000Z | tools/apachize.py | waderly/orly | 9d7660ea9d07591f8cc6b1b92d8e6c3b8b78eeee | [
"Apache-2.0"
] | 5 | 2015-07-09T02:21:50.000Z | 2021-08-13T11:10:26.000Z | tools/apachize.py | waderly/orly | 9d7660ea9d07591f8cc6b1b92d8e6c3b8b78eeee | [
"Apache-2.0"
] | 15 | 2015-01-23T13:34:05.000Z | 2020-06-15T16:46:50.000Z | #!/usr/bin/python2
#
# Copyright 2010-2014 OrlyAtomics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse, os, re, sys
COPYRIGHT_PATTERN = re.compile(R'copyright[ \t]*\d+(\-\d*)?[ \t]*orly', re.I)
APACHE_PATTERN = re.compile(R'Licensed under the Apache License')
APACHE_NOTICE_LINES = [
'Copyright 2010-2014 OrlyAtomics, Inc.',
'',
'Licensed under the Apache License, Version 2.0 (the "License");',
'you may not use this file except in compliance with the License.',
'You may obtain a copy of the License at',
'',
' http://www.apache.org/licenses/LICENSE-2.0',
'',
'Unless required by applicable law or agreed to in writing, software',
'distributed under the License is distributed on an "AS IS" BASIS,',
'WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.',
'See the License for the specific language governing permissions and',
'limitations under the License.' ]
Root = ''
IgnoredRelPaths = []
IgnoredExts = []
def CHandler(rel_path, text):
if not text.startswith('/*'):
return None
star_slash = text.find('*/')
if star_slash < 0:
return None
lines = text[:star_slash].splitlines()
if not COPYRIGHT_PATTERN.search(lines[-1]):
return None
del lines[-1]
if lines:
del lines[0]
else:
lines = [ '' ]
lines.insert(0, '/* <%s> ' % rel_path)
lines.extend([ ' ' + line for line in APACHE_NOTICE_LINES ])
return '%s %s' % ('\n'.join(lines), text[star_slash:])
def ScriptHandler(rel_path, text):
lines = text.splitlines()
if not lines[0].startswith('#!'):
return None
found = False
for idx in xrange(1, len(lines)):
if not lines[idx].startswith('#'):
break
if COPYRIGHT_PATTERN.search(lines[idx]):
lines[idx:idx + 1] = [ '# ' + line for line in APACHE_NOTICE_LINES ]
found = True
break
return '\n'.join(lines) if found else None
def NoHandler(rel_path, text): return None
HANDLERS = {
'.cc': CHandler,
'.h': CHandler,
'.orly': CHandler,
'.nycr': CHandler,
'.sh': ScriptHandler,
'.py': ScriptHandler,
}
def main():
def ParseArgs():
global Root, IgnoredRelPaths, IgnoredExts
parser = argparse.ArgumentParser(description='Update Orly source files to include Apache license.')
parser.add_argument('root', help='The root of the tree to scan for source files.')
parser.add_argument('--ignore_rel_path', help='A relative path to ignore while scanning for source files.', action='append')
parser.add_argument('--ignore_ext', help='A file extension to ignore while scanning for source files.', action='append')
args = parser.parse_args()
Root = os.path.realpath(args.root)
IgnoredRelPaths = args.ignore_rel_path or []
IgnoredRelPaths.append('.git')
IgnoredRelPaths = frozenset(IgnoredRelPaths)
IgnoredExts = args.ignore_ext or []
IgnoredExts = frozenset(IgnoredExts)
ParseArgs()
root_prefix_len = len(Root) + 1
for abs_path, dir_names, file_names in os.walk(Root):
# Strip the root prefix from the abs path to make the rel path.
rel_path = abs_path[root_prefix_len:]
# Drop dir names that should be ignored.
filtered_dir_names = []
for dir_name in dir_names:
if os.path.join(rel_path, dir_name) not in IgnoredRelPaths:
filtered_dir_names.append(dir_name)
dir_names[:] = filtered_dir_names
# Scan files.
for file_name in file_names:
# Strip any '.hold' ext.
base = file_name
while True:
base, ext = os.path.splitext(base)
if ext != '.hold':
break
# If the ext should be ignored, skip the file.
if ext in IgnoredExts:
continue
ex = None
is_handled = False
try:
# Read the text of the file into memory.
file_rel_path = os.path.join(rel_path, file_name)
file_abs_path = os.path.join(Root, file_rel_path)
text = open(file_abs_path, 'r').read()
# If the file is empty, skip the file.
if not text:
continue
# If the text of the file doesn't match our copyright pattern, skip the file.
if not COPYRIGHT_PATTERN.search(text):
continue
# If the text already matches the Apache pattern, skip the file.
if APACHE_PATTERN.search(text):
continue
# If we can transform the text, write it back out.
handler = HANDLERS.get(ext)
text = handler(file_rel_path, text) if handler else None
if text is not None:
temp_abs_path = file_abs_path + '.safe'
open(temp_abs_path, 'w').write(text)
os.rename(temp_abs_path, file_abs_path) # This *should* be atomic on Linux. Right... ?
is_handled = True
except Exception, ex:
pass
# Report our results.
print(
'%s %r%s' % ('fixed' if is_handled else 'check', file_abs_path, ' %r' % ex if ex else ''),
file=sys.stdout if is_handled else sys.stderr)
if __name__ == '__main__':
exit(main())
| 33.539394 | 128 | 0.663896 |
bfd5720f50676135a2d8ae991a82639099f509e2 | 12,251 | py | Python | release/scripts/startup/bl_ui/properties_physics_common.py | naetherm/Bforartists | 4d78856b76544b9eeb49e7dd388b4cf41d58d7e4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2019-09-16T10:29:19.000Z | 2022-02-11T14:43:18.000Z | release/scripts/startup/bl_ui/properties_physics_common.py | naetherm/Bforartists | 4d78856b76544b9eeb49e7dd388b4cf41d58d7e4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/startup/bl_ui/properties_physics_common.py | naetherm/Bforartists | 4d78856b76544b9eeb49e7dd388b4cf41d58d7e4 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import (
Panel,
)
from bpy.app.translations import contexts as i18n_contexts
class PhysicButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
@classmethod
def poll(cls, context):
return (context.object) and context.engine in cls.COMPAT_ENGINES
def physics_add(layout, md, name, type, typeicon, toggles):
row = layout.row(align=True)
if md:
row.context_pointer_set("modifier", md)
row.operator(
"object.modifier_remove",
text=name,
text_ctxt=i18n_contexts.default,
icon='X',
)
if toggles:
row.prop(md, "show_render", text="")
row.prop(md, "show_viewport", text="")
else:
row.operator(
"object.modifier_add",
text=name,
text_ctxt=i18n_contexts.default,
icon=typeicon,
).type = type
def physics_add_special(layout, data, name, addop, removeop, typeicon):
row = layout.row(align=True)
if data:
row.operator(removeop, text=name, text_ctxt=i18n_contexts.default, icon='X')
else:
row.operator(addop, text=name, text_ctxt=i18n_contexts.default, icon=typeicon)
class PHYSICS_PT_add(PhysicButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.alignment = 'LEFT'
row.label(text="Enable physics for:")
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
obj = context.object
col = flow.column()
if obj.field.type == 'NONE':
col.operator("object.forcefield_toggle", text="Force Field", icon='FORCE_FORCE')
else:
col.operator("object.forcefield_toggle", text="Force Field", icon='X')
if obj.type == 'MESH':
physics_add(col, context.collision, "Collision", 'COLLISION', 'MOD_PHYSICS', False)
physics_add(col, context.cloth, "Cloth", 'CLOTH', 'MOD_CLOTH', True)
physics_add(col, context.dynamic_paint, "Dynamic Paint", 'DYNAMIC_PAINT', 'MOD_DYNAMICPAINT', True)
col = flow.column()
if obj.type in {'MESH', 'LATTICE', 'CURVE', 'SURFACE', 'FONT'}:
physics_add(col, context.soft_body, "Soft Body", 'SOFT_BODY', 'MOD_SOFT', True)
if obj.type == 'MESH':
physics_add(col, context.fluid, "Fluid", 'FLUID_SIMULATION', 'MOD_FLUIDSIM', True)
physics_add(col, context.smoke, "Smoke", 'SMOKE', 'MOD_SMOKE', True)
physics_add_special(
col, obj.rigid_body, "Rigid Body",
"rigidbody.object_add",
"rigidbody.object_remove",
'RIGID_BODY'
)
# all types of objects can have rigid body constraint.
physics_add_special(
col, obj.rigid_body_constraint, "Rigid Body Constraint",
"rigidbody.constraint_add",
"rigidbody.constraint_remove",
'RIGID_BODY_CONSTRAINT'
)
# cache-type can be 'PSYS' 'HAIR' 'SMOKE' etc.
def point_cache_ui(self, cache, enabled, cachetype):
layout = self.layout
layout.use_property_split = True
layout.context_pointer_set("point_cache", cache)
is_saved = bpy.data.is_saved
# NOTE: TODO temporarily used until the animate properties are properly skipped.
layout.use_property_decorate = False # No animation (remove this later on).
if not cachetype == 'RIGID_BODY':
row = layout.row()
row.template_list(
"UI_UL_list", "point_caches", cache, "point_caches",
cache.point_caches, "active_index", rows=1,
)
col = row.column(align=True)
col.operator("ptcache.add", icon='ADD', text="")
col.operator("ptcache.remove", icon='REMOVE', text="")
if cachetype in {'PSYS', 'HAIR', 'SMOKE'}:
col = layout.column()
if cachetype == 'SMOKE':
col.prop(cache, "use_library_path", text="Use Library Path")
col.prop(cache, "use_external")
if cache.use_external:
col = layout.column()
col.prop(cache, "index", text="Index")
col.prop(cache, "filepath", text="Path")
cache_info = cache.info
if cache_info:
col = layout.column()
col.alignment = 'RIGHT'
col.label(text=cache_info)
else:
if cachetype in {'SMOKE', 'DYNAMIC_PAINT'}:
if not is_saved:
col = layout.column(align=True)
col.alignment = 'RIGHT'
col.label(text="Cache is disabled until the file is saved")
layout.enabled = False
if not cache.use_external or cachetype == 'SMOKE':
col = layout.column(align=True)
if cachetype not in {'PSYS', 'DYNAMIC_PAINT'}:
col.enabled = enabled
col.prop(cache, "frame_start", text="Simulation Start")
col.prop(cache, "frame_end")
if cachetype not in {'SMOKE', 'CLOTH', 'DYNAMIC_PAINT', 'RIGID_BODY'}:
col.prop(cache, "frame_step")
cache_info = cache.info
if cachetype != 'SMOKE' and cache_info: # avoid empty space.
col = layout.column(align=True)
col.alignment = 'RIGHT'
col.label(text=cache_info)
can_bake = True
if cachetype not in {'SMOKE', 'DYNAMIC_PAINT', 'RIGID_BODY'}:
if not is_saved:
col = layout.column(align=True)
col.alignment = 'RIGHT'
col.label(text="Options are disabled until the file is saved")
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
flow.enabled = enabled and is_saved
col = flow.column(align=True)
col.use_property_split = False
col.prop(cache, "use_disk_cache")
subcol = col.column()
subcol.use_property_split = False
subcol.active = cache.use_disk_cache
subcol.prop(cache, "use_library_path", text="Use Library Path")
col = flow.column()
col.active = cache.use_disk_cache
col.prop(cache, "compression", text="Compression")
if cache.id_data.library and not cache.use_disk_cache:
can_bake = False
col = layout.column(align=True)
col.alignment = 'RIGHT'
col.separator()
col.label(text="Linked object baking requires Disk Cache to be enabled")
else:
layout.separator()
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=False)
col = flow.column()
col.active = can_bake
if cache.is_baked is True:
col.operator("ptcache.free_bake", text="Delete Bake")
else:
col.operator("ptcache.bake", text="Bake").bake = True
sub = col.row()
sub.enabled = (cache.is_frame_skip or cache.is_outdated) and enabled
sub.operator("ptcache.bake", text="Calculate To Frame").bake = False
sub = col.column()
sub.enabled = enabled
sub.operator("ptcache.bake_from_cache", text="Current Cache to Bake")
col = flow.column()
col.operator("ptcache.bake_all", text="Bake All Dynamics").bake = True
col.operator("ptcache.free_bake_all", text="Delete All Bakes")
col.operator("ptcache.bake_all", text="Update All To Frame").bake = False
def effector_weights_ui(self, weights, weight_type):
layout = self.layout
layout.use_property_split = True
# NOTE: TODO temporarily used until the animate properties are properly skipped.
layout.use_property_decorate = False # No animation (remove this later on).
layout.prop(weights, "collection")
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(weights, "gravity", slider=True)
col.prop(weights, "all", slider=True)
col.prop(weights, "force", slider=True)
col.prop(weights, "vortex", slider=True)
col = flow.column()
col.prop(weights, "magnetic", slider=True)
col.prop(weights, "harmonic", slider=True)
col.prop(weights, "charge", slider=True)
col.prop(weights, "lennardjones", slider=True)
col = flow.column()
col.prop(weights, "wind", slider=True)
col.prop(weights, "curve_guide", slider=True)
col.prop(weights, "texture", slider=True)
if weight_type != 'SMOKE':
col.prop(weights, "smokeflow", slider=True)
col = flow.column()
col.prop(weights, "turbulence", slider=True)
col.prop(weights, "drag", slider=True)
col.prop(weights, "boid", slider=True)
def basic_force_field_settings_ui(self, field):
layout = self.layout
layout.use_property_split = True
if not field or field.type == 'NONE':
return
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if field.type == 'DRAG':
col.prop(field, "linear_drag", text="Linear")
else:
col.prop(field, "strength")
if field.type == 'TURBULENCE':
col.prop(field, "size")
col.prop(field, "flow")
elif field.type == 'HARMONIC':
col.prop(field, "harmonic_damping", text="Damping")
col.prop(field, "rest_length")
elif field.type == 'VORTEX' and field.shape != 'POINT':
col.prop(field, "inflow")
elif field.type == 'DRAG':
col.prop(field, "quadratic_drag", text="Quadratic")
else:
col.prop(field, "flow")
col.prop(field, "apply_to_location", text="Affect Location")
col.prop(field, "apply_to_rotation", text="Affect Rotation")
col = flow.column()
sub = col.column(align=True)
sub.prop(field, "noise", text="Noise Amount")
sub.prop(field, "seed", text="Seed")
if field.type == 'TURBULENCE':
col.prop(field, "use_global_coords", text="Global")
elif field.type == 'HARMONIC':
col.prop(field, "use_multiple_springs")
if field.type == 'FORCE':
col.prop(field, "use_gravity_falloff", text="Gravitation")
col.prop(field, "use_absorption")
def basic_force_field_falloff_ui(self, field):
layout = self.layout
if not field or field.type == 'NONE':
return
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(field, "z_direction")
col.prop(field, "falloff_power", text="Power")
col = flow.column()
col.prop(field, "use_min_distance", text="Use Minimum")
sub = col.column(align=True)
sub.active = field.use_min_distance
sub.prop(field, "distance_min", text="Min Distance")
col = flow.column()
col.prop(field, "use_max_distance", text="Use Maximum")
sub = col.column(align=True)
sub.active = field.use_max_distance
sub.prop(field, "distance_max", text="Max Distance")
classes = (
PHYSICS_PT_add,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 32.932796 | 111 | 0.625418 |
78704170cc151afc877417a78e8db9ebb21746cf | 15,394 | py | Python | dreamcoder/domains/regex/regexPrimitives.py | Hitoshi-Nakanishi/ec2 | b65905cdfb1a2451ad553fd15b937a7fc48fa9cf | [
"Unlicense"
] | null | null | null | dreamcoder/domains/regex/regexPrimitives.py | Hitoshi-Nakanishi/ec2 | b65905cdfb1a2451ad553fd15b937a7fc48fa9cf | [
"Unlicense"
] | null | null | null | dreamcoder/domains/regex/regexPrimitives.py | Hitoshi-Nakanishi/ec2 | b65905cdfb1a2451ad553fd15b937a7fc48fa9cf | [
"Unlicense"
] | null | null | null | import sys
from dreamcoder.program import Primitive
from dreamcoder.grammar import Grammar
from dreamcoder.type import arrow, tpregex
from string import printable
try:
from pregex import pregex
except:
print("Failure to load pregex. This is only acceptable if using pypy", file=sys.stderr)
# evaluation to regular regex form. then I can unflatten using Luke's stuff.
def _kleene(x):
return pregex.KleeneStar(x, p=0.25)
def _plus(x):
return pregex.Plus(x, p=0.25)
def _maybe(x):
return pregex.Maybe(x)
# maybe should be reversed#"(" + x + "|" + y + ")"
def _alt(x):
return lambda y: pregex.Alt([x, y])
def _concat(x):
return lambda y: pregex.Concat([x, y]) # "(" + x + y + ")"
# For sketch:
def _kleene_5(x):
return pregex.KleeneStar(x)
def _plus_5(x):
return pregex.Plus(x)
disallowed = [
("#", "hash"),
("!", "bang"),
('"', "double_quote"),
("$", "dollar"),
("%", "percent"),
("&", "ampersand"),
("'", "single_quote"),
(")", "left_paren"),
("(", "right_paren"),
("*", "astrisk"),
("+", "plus"),
(",", "comma"),
("-", "dash"),
(".", "period"),
("/", "slash"),
(":", "colon"),
(";", "semicolon"),
("<", "less_than"),
("=", "equal"),
(">", "greater_than"),
("?", "question_mark"),
("@", "at"),
("[", "left_bracket"),
("\\", "backslash"),
("]", "right_bracket"),
("^", "carrot"),
("_", "underscore"),
("`", "backtick"),
("|", "bar"),
("}", "right_brace"),
("{", "left_brace"),
("~", "tilde"),
(" ", "space"),
("\t", "tab"),
]
disallowed_list = [char for char, _ in disallowed]
class PRC: # PregexContinuation
def __init__(self, f, arity=0, args=[]):
self.f = f
self.arity = arity
self.args = args
def __call__(self, pre):
if self.arity == len(self.args):
if self.arity == 0:
return pregex.Concat([self.f, pre])
elif self.arity == 1:
return pregex.Concat([self.f(*self.args), pre])
else:
return pregex.Concat([self.f(self.args), pre]) # this line is bad, need brackets around input to f if f is Alt
else:
return PRC(self.f, self.arity, args=self.args + [pre(pregex.String(""))])
def concatPrimitives():
return (
[
Primitive("string_" + i, arrow(tpregex, tpregex), PRC(pregex.String(i)))
for i in printable[:-4]
if i not in disallowed_list
]
+ [Primitive("string_" + name, arrow(tpregex, tpregex), PRC(pregex.String(char))) for char, name in disallowed]
+ [
Primitive("r_dot", arrow(tpregex, tpregex), PRC(pregex.dot)),
Primitive("r_d", arrow(tpregex, tpregex), PRC(pregex.d)),
Primitive("r_s", arrow(tpregex, tpregex), PRC(pregex.s)),
Primitive("r_w", arrow(tpregex, tpregex), PRC(pregex.w)),
Primitive("r_l", arrow(tpregex, tpregex), PRC(pregex.l)),
Primitive("r_u", arrow(tpregex, tpregex), PRC(pregex.u)),
# todo
Primitive("r_kleene", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.KleeneStar, 1)),
Primitive("r_plus", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Plus, 1)),
Primitive("r_maybe", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Maybe, 1)),
Primitive(
"r_alt", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Alt, 2)
),
]
)
def strConstConcatPrimitives():
return (
[
Primitive("string_" + i, arrow(tpregex, tpregex), PRC(pregex.String(i)))
for i in printable[:-4]
if i not in disallowed_list
]
+ [Primitive("string_" + name, arrow(tpregex, tpregex), PRC(pregex.String(char))) for char, name in disallowed]
+ [
Primitive("r_dot", arrow(tpregex, tpregex), PRC(pregex.dot)),
Primitive("r_d", arrow(tpregex, tpregex), PRC(pregex.d)),
Primitive("r_s", arrow(tpregex, tpregex), PRC(pregex.s)),
Primitive("r_w", arrow(tpregex, tpregex), PRC(pregex.w)),
Primitive("r_l", arrow(tpregex, tpregex), PRC(pregex.l)),
Primitive("r_u", arrow(tpregex, tpregex), PRC(pregex.u)),
# todo
Primitive("r_kleene", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.KleeneStar, 1)),
Primitive("r_plus", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Plus, 1)),
Primitive("r_maybe", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Maybe, 1)),
Primitive(
"r_alt", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Alt, 2)
),
]
+ [Primitive("r_const", arrow(tpregex, tpregex), None)]
)
def reducedConcatPrimitives():
# uses strConcat!!
# [Primitive("empty_string", arrow(tpregex, tpregex), PRC(pregex.String("")))
# ] + [
return (
[
Primitive("string_" + i, arrow(tpregex, tpregex), PRC(pregex.String(i)))
for i in printable[:-4]
if i not in disallowed_list
]
+ [Primitive("string_" + name, arrow(tpregex, tpregex), PRC(pregex.String(char))) for char, name in disallowed]
+ [
Primitive("r_dot", arrow(tpregex, tpregex), PRC(pregex.dot)),
Primitive("r_d", arrow(tpregex, tpregex), PRC(pregex.d)),
Primitive("r_s", arrow(tpregex, tpregex), PRC(pregex.s)),
# Primitive("r_w", arrow(tpregex, tpregex), PRC(pregex.w)),
Primitive("r_l", arrow(tpregex, tpregex), PRC(pregex.l)),
Primitive("r_u", arrow(tpregex, tpregex), PRC(pregex.u)),
# todo
Primitive("r_kleene", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.KleeneStar, 1)),
# Primitive("r_plus", arrow(arrow(tpregex, tpregex), arrow(tpregex,tpregex)), PRC(pregex.Plus,1)),
Primitive("r_maybe", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Maybe, 1)),
Primitive(
"r_alt", arrow(arrow(tpregex, tpregex), arrow(tpregex, tpregex), arrow(tpregex, tpregex)), PRC(pregex.Alt, 2)
),
]
+ [Primitive("r_const", arrow(tpregex, tpregex), None)]
)
def sketchPrimitives():
return (
[Primitive("string_" + i, tpregex, pregex.String(i)) for i in printable[:-4] if i not in disallowed_list]
+ [Primitive("string_" + name, tpregex, pregex.String(char)) for char, name in disallowed]
+ [
Primitive("r_dot", tpregex, pregex.dot),
Primitive("r_d", tpregex, pregex.d),
Primitive("r_s", tpregex, pregex.s),
Primitive("r_w", tpregex, pregex.w),
Primitive("r_l", tpregex, pregex.l),
Primitive("r_u", tpregex, pregex.u),
Primitive("r_kleene", arrow(tpregex, tpregex), _kleene_5),
Primitive("r_plus", arrow(tpregex, tpregex), _plus_5),
Primitive("r_maybe", arrow(tpregex, tpregex), _maybe),
Primitive("r_alt", arrow(tpregex, tpregex, tpregex), _alt),
Primitive("r_concat", arrow(tpregex, tpregex, tpregex), _concat),
]
)
def basePrimitives():
return (
[Primitive("string_" + i, tpregex, pregex.String(i)) for i in printable[:-4] if i not in disallowed_list]
+ [Primitive("string_" + name, tpregex, pregex.String(char)) for char, name in disallowed]
+ [
Primitive("r_dot", tpregex, pregex.dot),
Primitive("r_d", tpregex, pregex.d),
Primitive("r_s", tpregex, pregex.s),
Primitive("r_w", tpregex, pregex.w),
Primitive("r_l", tpregex, pregex.l),
Primitive("r_u", tpregex, pregex.u),
Primitive("r_kleene", arrow(tpregex, tpregex), _kleene),
Primitive("r_plus", arrow(tpregex, tpregex), _plus),
Primitive("r_maybe", arrow(tpregex, tpregex), _maybe),
Primitive("r_alt", arrow(tpregex, tpregex, tpregex), _alt),
Primitive("r_concat", arrow(tpregex, tpregex, tpregex), _concat),
]
)
def altPrimitives():
return (
[Primitive("empty_string", tpregex, pregex.String(""))]
+ [Primitive("string_" + i, tpregex, pregex.String(i)) for i in printable[:-4] if i not in disallowed_list]
+ [Primitive("string_" + name, tpregex, pregex.String(char)) for char, name in disallowed]
+ [
Primitive("r_dot", tpregex, pregex.dot),
Primitive("r_d", tpregex, pregex.d),
Primitive("r_s", tpregex, pregex.s),
Primitive("r_w", tpregex, pregex.w),
Primitive("r_l", tpregex, pregex.l),
Primitive("r_u", tpregex, pregex.u),
Primitive("r_kleene", arrow(tpregex, tpregex), _kleene),
# Primitive("r_plus", arrow(tpregex, tpregex), _plus),
Primitive("r_maybe", arrow(tpregex, tpregex), _maybe),
Primitive("r_alt", arrow(tpregex, tpregex, tpregex), _alt),
Primitive("r_concat", arrow(tpregex, tpregex, tpregex), _concat),
]
)
def alt2Primitives():
return (
[Primitive("empty_string", tpregex, pregex.String(""))]
+ [Primitive("string_" + i, tpregex, pregex.String(i)) for i in printable[:-4] if i not in disallowed_list]
+ [Primitive("string_" + name, tpregex, pregex.String(char)) for char, name in disallowed]
+ [
Primitive("r_dot", tpregex, pregex.dot),
Primitive("r_d", tpregex, pregex.d),
Primitive("r_s", tpregex, pregex.s),
Primitive("r_w", tpregex, pregex.w),
Primitive("r_l", tpregex, pregex.l),
Primitive("r_u", tpregex, pregex.u),
Primitive("r_kleene", arrow(tpregex, tpregex), _kleene),
# Primitive("r_plus", arrow(tpregex, tpregex), _plus),
# Primitive("r_maybe", arrow(tpregex, tpregex), _maybe),
Primitive("r_alt", arrow(tpregex, tpregex, tpregex), _alt),
Primitive("r_concat", arrow(tpregex, tpregex, tpregex), _concat),
]
)
def easyWordsPrimitives():
return [Primitive("string_" + i, tpregex, pregex.String(i)) for i in printable[10:62] if i not in disallowed_list] + [
Primitive("r_d", tpregex, pregex.d),
Primitive("r_s", tpregex, pregex.s),
# Primitive("r_w", tpregex, pregex.w),
Primitive("r_l", tpregex, pregex.l),
Primitive("r_u", tpregex, pregex.u),
Primitive("r_kleene", arrow(tpregex, tpregex), _kleene),
Primitive("r_plus", arrow(tpregex, tpregex), _plus),
Primitive("r_maybe", arrow(tpregex, tpregex), _maybe),
Primitive("r_alt", arrow(tpregex, tpregex, tpregex), _alt),
Primitive("r_concat", arrow(tpregex, tpregex, tpregex), _concat),
]
# def _wrapper(x): return lambda y: y
# specials = [".","*","+","?","|"]
"""
>>> import pregex as pre
>>> abc = pre.CharacterClass("abc", [0.1, 0.1, 0.8], name="MyConcept")
>>> abc.sample()
'b'
>>> abc.sample()
'c'
>>> abc.sample()
'c'
>>> abc.match("c")
-0.2231435513142097
>>> abc.match("a")
-2.3025850929940455
>>> abc
MyConcept
>>> x = pre.KleeneStar(abc)
>>> x.match("aabbac")
-16.58809928020405
>>> x.sample()
''
>>> x.sample()
''
>>> x.sample()
'cbcacc'
>>> x
(KleeneStar 0.5 MyConcept)
>>> str(x)
'MyConcept*'
"""
def emp_dot(corpus):
return pregex.CharacterClass(printable[:-4], emp_distro_from_corpus(corpus, printable[:-4]), name=".")
def emp_d(corpus):
return pregex.CharacterClass(printable[:10], emp_distro_from_corpus(corpus, printable[:10]), name="\\d")
# emp_s = pre.CharacterClass(slist, [], name="emp\\s") #may want to forgo this one.
def emp_dot_no_letter(corpus):
return pregex.CharacterClass(
printable[:10] + printable[62:], emp_distro_from_corpus(corpus, printable[:10] + printable[62:]), name="."
)
def emp_w(corpus):
return pregex.CharacterClass(printable[:62], emp_distro_from_corpus(corpus, printable[:62]), name="\\w")
def emp_l(corpus):
return pregex.CharacterClass(printable[10:36], emp_distro_from_corpus(corpus, printable[10:36]), name="\\l")
def emp_u(corpus):
return pregex.CharacterClass(printable[36:62], emp_distro_from_corpus(corpus, printable[36:62]), name="\\u")
def emp_distro_from_corpus(corpus, char_list):
from collections import Counter
c = Counter(char for task in corpus for example in task.examples for string in example[1] for char in string)
n = sum(c[char] for char in char_list)
return [c[char] / n for char in char_list]
def matchEmpericalPrimitives(corpus):
return (
lambda: [Primitive("empty_string", tpregex, pregex.String(""))]
+ [Primitive("string_" + i, tpregex, pregex.String(i)) for i in printable[:-4] if i not in disallowed_list]
+ [Primitive("string_" + name, tpregex, pregex.String(char)) for char, name in disallowed]
+ [
Primitive("r_dot", tpregex, emp_dot(corpus)),
Primitive("r_d", tpregex, emp_d(corpus)),
Primitive("r_s", tpregex, pregex.s),
Primitive("r_w", tpregex, emp_w(corpus)),
Primitive("r_l", tpregex, emp_l(corpus)),
Primitive("r_u", tpregex, emp_u(corpus)),
Primitive("r_kleene", arrow(tpregex, tpregex), _kleene),
# Primitive("r_plus", arrow(tpregex, tpregex), _plus),
Primitive("r_maybe", arrow(tpregex, tpregex), _maybe),
Primitive("r_alt", arrow(tpregex, tpregex, tpregex), _alt),
Primitive("r_concat", arrow(tpregex, tpregex, tpregex), _concat),
]
)
def matchEmpericalNoLetterPrimitives(corpus):
return (
lambda: [Primitive("empty_string", tpregex, pregex.String(""))]
+ [
Primitive("string_" + i, tpregex, pregex.String(i))
for i in printable[:-4]
if i not in disallowed_list + list(printable[10:62])
]
+ [Primitive("string_" + name, tpregex, pregex.String(char)) for char, name in disallowed]
+ [
Primitive("r_dot", tpregex, emp_dot_no_letter(corpus)),
Primitive("r_d", tpregex, emp_d(corpus)),
Primitive("r_s", tpregex, pregex.s),
Primitive("r_kleene", arrow(tpregex, tpregex), _kleene),
# Primitive("r_plus", arrow(tpregex, tpregex), _plus),
# Primitive("r_maybe", arrow(tpregex, tpregex), _maybe),
Primitive("r_alt", arrow(tpregex, tpregex, tpregex), _alt),
Primitive("r_concat", arrow(tpregex, tpregex, tpregex), _concat),
]
)
if __name__ == "__main__":
concatPrimitives()
from dreamcoder.program import Program
p = Program.parse("(lambda (r_kleene (lambda (r_maybe (lambda (string_x $0)) $0)) $0))")
print(p)
print(p.runWithArguments([pregex.String("")]))
prims = concatPrimitives()
g = Grammar.uniform(prims)
for i in range(100):
prog = g.sample(arrow(tpregex, tpregex))
preg = prog.runWithArguments([pregex.String("")])
print("preg:", preg.__repr__())
print("sample:", preg.sample())
| 37.093976 | 127 | 0.58958 |
d97970d04e39ed0341edcc68b90d51de5c71dad9 | 2,605 | py | Python | blueprints/aws_ebs_storage/management/attach_to_instance.py | hciudad/cloudbolt-forge | d1109c90dcd189defa70876906d394e0c91feab5 | [
"Apache-2.0"
] | 34 | 2015-08-30T09:26:41.000Z | 2022-03-03T13:18:14.000Z | blueprints/aws_ebs_storage/management/attach_to_instance.py | hciudad/cloudbolt-forge | d1109c90dcd189defa70876906d394e0c91feab5 | [
"Apache-2.0"
] | 22 | 2015-11-23T21:24:17.000Z | 2022-01-31T04:24:24.000Z | blueprints/aws_ebs_storage/management/attach_to_instance.py | hciudad/cloudbolt-forge | d1109c90dcd189defa70876906d394e0c91feab5 | [
"Apache-2.0"
] | 43 | 2015-11-18T16:26:22.000Z | 2021-10-18T13:19:39.000Z | from common.methods import set_progress
import boto3
import time
from botocore.client import ClientError
from infrastructure.models import Environment
from resources.models import Resource
from resourcehandlers.aws.models import AWSHandler
def generate_options_for_instances(resource, **kwargs):
instances = []
rh = AWSHandler.objects.get(id=resource.aws_rh_id)
region = resource.aws_region
ec2 = boto3.client(
'ec2',
region_name=region,
aws_access_key_id=rh.serviceaccount,
aws_secret_access_key=rh.servicepasswd,
)
response = ec2.describe_instances()['Reservations']
for instance in response:
res = instance['Instances'][0]
instances.append(res.get('InstanceId'))
return instances
def run(job, *args, **kwargs):
resource = kwargs.get('resources').first()
instance_id = "{{ instances }}"
device = "{{ device }}"
volume_id = resource.attributes.get(field__name='ebs_volume_id').value
rh_id = resource.attributes.get(field__name='aws_rh_id').value
region = resource.attributes.get(field__name='aws_region').value
handler = AWSHandler.objects.get(id=rh_id)
ec2 = boto3.resource('ec2',
region_name=region,
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd,
)
volume = ec2.Volume(volume_id)
state = volume.state
if state != 'available':
return "FAILURE", f"Can not attach volume to instance since the volume is in '{state.upper()}' state", ""
set_progress("Connecting to Amazon EC2...")
try:
response = volume.attach_to_instance(
Device=device,
InstanceId=instance_id
)
# wait until the attachment process is complete
state = response.get('State')
count = 0
while state == 'attaching':
set_progress("Attaching Instance")
count += 5
time.sleep(5)
state = volume.attachments[0].get('State')
if count > 3600:
# Attaching is taking too long
return "FAILURE", "Failed to attach volume to instance", "Attachment taking too long."
resource.instance_id = instance_id
resource.device_name = device
resource.volume_state = volume.state
resource.save()
except ClientError as e:
return "FAILURE", "Failed to attach volume to instance", f"{e}"
return "SUCCESS", f"Volume {volume_id} has been successfully attached", ""
| 32.974684 | 113 | 0.641075 |
d201bc427dd0b34ea2c0c92d523248728b56ae9e | 243 | py | Python | forecaster/__init__.py | alvis/forecaster | 7f4f6d7b943879bf29c4f2d7aeed9606989c5b5c | [
"MIT"
] | null | null | null | forecaster/__init__.py | alvis/forecaster | 7f4f6d7b943879bf29c4f2d7aeed9606989c5b5c | [
"MIT"
] | null | null | null | forecaster/__init__.py | alvis/forecaster | 7f4f6d7b943879bf29c4f2d7aeed9606989c5b5c | [
"MIT"
] | null | null | null | """
Collection of exports.
moduleauthor:: Alvis HT Tang <alvis@hilbert.space>
"""
from .forecaster import Forecaster
from .lstm import LSTMModel
__all__ = [
"Forecaster",
"LSTMModel",
]
__version__ = "1.0"
__author__ = "Alvis Tang"
| 15.1875 | 50 | 0.699588 |
3da3d0b693e3db9d790a6c4318bdc60f34e0a606 | 24,159 | py | Python | aw_nas/weights_manager/ofa_backbone.py | A-LinCui/Discriminator-Guiding-Knowledge-Distillation-MAR | e8caad8de2a559b9c9532448bdcdedd566cb2cfa | [
"MIT"
] | null | null | null | aw_nas/weights_manager/ofa_backbone.py | A-LinCui/Discriminator-Guiding-Knowledge-Distillation-MAR | e8caad8de2a559b9c9532448bdcdedd566cb2cfa | [
"MIT"
] | null | null | null | aw_nas/weights_manager/ofa_backbone.py | A-LinCui/Discriminator-Guiding-Knowledge-Distillation-MAR | e8caad8de2a559b9c9532448bdcdedd566cb2cfa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Base class definition of OFA Backbone."""
import abc
import copy
import torch
from torch import nn
from aw_nas import Component
from aw_nas.ops import *
from aw_nas.ops.baseline_ops import MobileNetV2Block, MobileNetV3Block
from aw_nas.utils import make_divisible, feature_level_to_stage_index
from aw_nas.utils.common_utils import _get_channel_mask
class FlexibleBlock(Component, nn.Module):
REGISTRY = "ofa_block"
def __init__(self, schedule_cfg=None):
super(FlexibleBlock, self).__init__(schedule_cfg)
nn.Module.__init__(self)
def reset_mask(self):
for m in self.modules():
if isinstance(m, FlexibleLayer):
m.reset_mask()
class FlexibleMobileNetV2Block(MobileNetV2Block, FlexibleBlock):
NAME = "mbv2_block"
def __init__(
self,
expansion,
C,
C_out,
stride,
kernel_sizes=(3, 5, 7),
do_kernel_transform=True,
affine=True,
activation="relu",
schedule_cfg=None,
):
FlexibleBlock.__init__(self, schedule_cfg)
self.activation = activation
C_inner = make_divisible(C * expansion, 8)
self.kernel_sizes = sorted(kernel_sizes)
self.kernel_size = self.kernel_sizes[-1]
self.do_kernel_transform = do_kernel_transform
self.affine = affine
inv_bottleneck = None
if expansion != 1:
inv_bottleneck = nn.Sequential(
FlexiblePointLinear(C, C_inner, 1, 1, 0),
FlexibleBatchNorm2d(C_inner, affine=affine),
get_op(activation)(),
)
depth_wise = nn.Sequential(
FlexibleDepthWiseConv(
C_inner,
self.kernel_sizes,
stride,
do_kernel_transform=do_kernel_transform,
),
FlexibleBatchNorm2d(C_inner, affine=affine),
get_op(activation)(),
)
point_linear = nn.Sequential(
FlexiblePointLinear(C_inner, C_out, 1, 1, 0),
FlexibleBatchNorm2d(C_out, affine=affine),
)
super(FlexibleMobileNetV2Block, self).__init__(
expansion,
C,
C_out,
stride,
self.kernel_size,
affine,
activation,
inv_bottleneck,
depth_wise,
point_linear,
)
self.reset_mask()
def set_mask(self, expansion, kernel_size):
mask = None
if expansion is not None and expansion != self.expansion:
filters = self.point_linear[0].weight.data
mask = _get_channel_mask(filters, make_divisible(self.C *
expansion, 8))
if self.inv_bottleneck:
self.inv_bottleneck[0].set_mask(None, mask)
self.inv_bottleneck[1].set_mask(mask)
self.depth_wise[0].set_mask(mask, kernel_size)
self.depth_wise[1].set_mask(mask)
self.point_linear[0].set_mask(mask, None)
def forward_rollout(self, inputs, expansion, kernel_size):
self.set_mask(expansion, kernel_size)
out = self.forward(inputs)
self.reset_mask()
return out
def finalize(self):
inv_bottleneck = None
if self.inv_bottleneck:
inv_bottleneck = nn.Sequential(
*[
m.finalize() if isinstance(m, FlexibleLayer) else m
for m in self.inv_bottleneck
]
)
depth_wise = nn.Sequential(
*[
m.finalize() if isinstance(m, FlexibleLayer) else m
for m in self.depth_wise
]
)
point_linear = nn.Sequential(
*[
m.finalize() if isinstance(m, FlexibleLayer) else m
for m in self.point_linear
]
)
return MobileNetV2Block(
self.expansion,
self.C,
self.C_out,
self.stride,
self.kernel_size,
self.affine,
self.activation,
inv_bottleneck,
depth_wise,
point_linear,
)
class FlexibleMobileNetV3Block(MobileNetV3Block, FlexibleBlock):
NAME = "mbv3_block"
def __init__(self,
expansion,
C,
C_out,
stride,
kernel_sizes=(3, 5, 7),
do_kernel_transform=True,
affine=True,
activation="relu",
use_se=False,
schedule_cfg=None
):
FlexibleBlock.__init__(self, schedule_cfg)
self.expansion = expansion
self.activation = activation
self.C = C
self.C_out = C_out
self.C_inner = make_divisible(C * expansion, 8)
self.stride = stride
self.kernel_sizes = sorted(kernel_sizes)
self.kernel_size = self.kernel_sizes[-1]
self.do_kernel_transform = do_kernel_transform
self.use_se = use_se
self.affine = affine
inv_bottleneck = None
if expansion != 1:
inv_bottleneck = nn.Sequential(
FlexiblePointLinear(C, self.C_inner, 1, 1, 0),
FlexibleBatchNorm2d(self.C_inner, affine=affine),
get_op(activation)(),
)
depth_wise = nn.Sequential(
FlexibleDepthWiseConv(
self.C_inner,
self.kernel_sizes,
stride,
do_kernel_transform=do_kernel_transform,
),
FlexibleBatchNorm2d(self.C_inner, affine=affine),
get_op(activation)(),
)
point_linear = nn.Sequential(
FlexiblePointLinear(self.C_inner, C_out, 1, 1, 0),
FlexibleBatchNorm2d(C_out, affine=affine),
)
se = None
if self.use_se:
se = FlexibleSEModule(self.C_inner)
super(FlexibleMobileNetV3Block, self).__init__(
expansion,
C,
C_out,
stride,
self.kernel_size,
affine,
activation,
use_se,
inv_bottleneck,
depth_wise,
point_linear,
se,
)
self.reset_mask()
def set_mask(self, expansion, kernel_size):
mask = None
if expansion != self.expansion:
filters = self.point_linear[0].weight.data
mask = _get_channel_mask(filters, make_divisible(self.C *
expansion, 8))
if self.inv_bottleneck:
self.inv_bottleneck[0].set_mask(None, mask)
self.inv_bottleneck[1].set_mask(mask)
self.depth_wise[0].set_mask(mask, kernel_size)
self.depth_wise[1].set_mask(mask)
self.point_linear[0].set_mask(mask, None)
if self.se:
self.se.set_mask(mask)
def forward_rollout(self, inputs, expansion, kernel_size, drop_connect_rate=0.0):
self.set_mask(expansion, kernel_size)
out = self.forward(inputs, drop_connect_rate)
self.reset_mask()
return out
def finalize(self):
inv_bottleneck = None
if self.inv_bottleneck:
inv_bottleneck = nn.Sequential(
*[
m.finalize() if isinstance(m, FlexibleLayer) else m
for m in self.inv_bottleneck
]
)
depth_wise = nn.Sequential(
*[
m.finalize() if isinstance(m, FlexibleLayer) else m
for m in self.depth_wise
]
)
point_linear = nn.Sequential(
*[
m.finalize() if isinstance(m, FlexibleLayer) else m
for m in self.point_linear
]
)
se = None
if self.se:
se = self.se.finalize()
return MobileNetV3Block(
self.expansion,
self.C,
self.C_out,
self.stride,
self.kernel_size,
self.affine,
self.activation,
self.use_se,
inv_bottleneck,
depth_wise,
point_linear,
se,
)
class BaseBackboneArch(Component, nn.Module):
REGISTRY = "ofa_backbone"
def __init__(
self,
device,
blocks=[1, 4, 4, 4, 4, 4],
strides=[1, 2, 2, 1, 2, 1],
expansions=[1, 6, 6, 6, 6, 6],
layer_channels=[16, 24, 40, 80, 96, 192, 320],
mult_ratio=1.0,
kernel_sizes=[3, 5, 7],
do_kernel_transform=True,
num_classes=10,
cell_type="mbv2_cell",
pretrained_path=None,
schedule_cfg=None,
):
super(BaseBackboneArch, self).__init__(schedule_cfg)
nn.Module.__init__(self)
self.device = device
self.blocks = blocks
self.strides = strides
self.expansions = expansions
self.channels = layer_channels
self.mult_ratio = mult_ratio
self.kernel_sizes = kernel_sizes
self.do_kernel_transform = do_kernel_transform
self.num_classes = num_classes
self.pretrained_path = pretrained_path
@abc.abstractmethod
def make_stage(
self, C_in, C_out, depth, stride, expansion, kernel_size, mult_ratio=1.0
):
"""
make a serial of blocks as a stage
"""
class MobileNetV2Arch(BaseBackboneArch):
NAME = "mbv2_backbone"
"""
According to the original papar MobileNet-V2, the standard architecture is:
| input_ch | operator | t | c | n | s |
| 3 | conv2d | - | 32 | 1 | 2 |
| 32 | bottleneck | 1 | 16 | 1 | 1 |
| 16 | bottleneck | 6 | 24 | 2 | 2 |
| 24 | bottleneck | 6 | 32 | 3 | 2 |
| 32 | bottleneck | 6 | 64 | 4 | 2 |
| 64 | bottleneck | 6 | 96 | 3 | 1 |
| 96 | bottleneck | 6 | 160 | 3 | 2 |
| 160 | bottleneck | 6 | 320 | 1 | 1 |
| 320 | conv2d 1x1 | - | 1280 | 1 | 1 |
| 1280 | avgpool7x7 | - | - | 1 | - |
| 1280 | conv2d 1x1 | - | k | - | |
The first `conv2d` is called stem, and the last two `conv2d` are called
"conv_final" and "classifier" respectively.
However, in order to be compatible with MobileNet-V3, which has only 6
rather than 7 bottleneck stages, we fix the last bottleneck(160 -> 320) as
t=6, n=1, k=3.
"""
def __init__(
self,
device,
blocks=[1, 4, 4, 4, 4, 4],
strides=[1, 2, 2, 2, 1, 2],
expansions=[1, 6, 6, 6, 6, 6],
layer_channels=[32, 16, 24, 32, 64, 96, 160, 320, 1280],
mult_ratio=1.0,
kernel_sizes=[3, 5, 7],
do_kernel_transform=True,
num_classes=10,
block_type="mbv2_block",
pretrained_path=None,
stem_stride=2,
schedule_cfg=None,
):
super(MobileNetV2Arch, self).__init__(
device,
blocks,
strides,
expansions,
layer_channels,
mult_ratio,
kernel_sizes,
do_kernel_transform,
num_classes,
block_type,
pretrained_path,
schedule_cfg,
)
self.block_initializer = FlexibleBlock.get_class_(block_type)
self.stem_stride = stem_stride
self.channels = [make_divisible(c * mult_ratio, 8)
for c in layer_channels]
self.stem = nn.Sequential(
nn.Conv2d(
3, self.channels[0], kernel_size=3, stride=self.stem_stride, padding=1, bias=False
),
nn.BatchNorm2d(self.channels[0]),
get_op("relu")(),
)
expect(
blocks[0] == expansions[0] == 1,
"The first conv layer should have single block and no expansion.",
ValueError,
)
self.mult_ratio = mult_ratio
self.cells = [
self.make_stage(
self.channels[0],
self.channels[1],
self.blocks[0],
self.strides[0],
self.expansions[0],
[3],
)
]
for i, depth in enumerate(self.blocks[1:], 1):
self.cells.append(
self.make_stage(
self.channels[i],
self.channels[i + 1],
depth,
self.strides[i],
self.expansions[i],
self.kernel_sizes,
)
)
self.cells = nn.ModuleList(self.cells)
self.conv_head = self.block_initializer(
6,
self.channels[-3],
self.channels[-2],
1,
self.kernel_sizes,
self.do_kernel_transform,
activation="relu",
affine=True,
)
self.conv_final = nn.Sequential(
FlexiblePointLinear(self.channels[-2], self.channels[-1], 1, 1, 0),
nn.BatchNorm2d(self.channels[-1]),
)
self.classifier = nn.Conv2d(self.channels[-1], num_classes, 1, 1, 0)
if self.pretrained_path:
state_dict = torch.load(self.pretrained_path, "cpu")
if state_dict["classifier.weight"].shape[0] != self.num_classes:
del state_dict["classifier.weight"]
del state_dict["classifier.bias"]
self.logger.info(
f"loading pretrained model from path {self.pretrained_path}...")
self.logger.info(self.load_state_dict(state_dict, strict=False))
self.to(self.device)
def make_stage(self, C_in, C_out, block_num, stride, expansion, kernel_sizes):
cell = []
for i in range(block_num):
if i == 0:
s = stride
else:
s = 1
C_in = C_out
cell.append(
self.block_initializer(
expansion,
C_in,
C_out,
s,
kernel_sizes,
self.do_kernel_transform,
activation="relu",
affine=True,
)
)
return nn.ModuleList(cell)
def forward(self, inputs):
return self.forward_rollout(inputs)
def forward_rollout(self, inputs, rollout=None):
out = self.stem(inputs)
for i, cell in enumerate(self.cells):
for j, block in enumerate(cell):
if rollout is None:
out = block(out)
else:
if j >= rollout.depth[i]:
break
out = block.forward_rollout(
out, rollout.width[i][j], rollout.kernel[i][j]
)
out = self.conv_head(out)
out = self.conv_final(out)
out = F.adaptive_avg_pool2d(out, 1)
return self.classifier(out).flatten(1)
def finalize(self, blocks, expansions, kernel_sizes):
cells = []
finalized_model = copy.deepcopy(self)
for i, cell in enumerate(self.cells):
cells.append([])
for j, block in enumerate(cell):
if j >= blocks[i]:
break
block.set_mask(expansions[i][j], kernel_sizes[i][j])
cells[-1].append(block.finalize())
cells[-1] = nn.ModuleList(cells[-1])
finalized_model.cells = nn.ModuleList(cells)
return finalized_model
def extract_features(self, inputs, p_levels, rollout=None, drop_connect_rate=0.0):
out = self.stem(inputs)
level_indexes = feature_level_to_stage_index(self.strides)
features = []
for i, cell in enumerate(self.cells):
for j, block in enumerate(cell):
if rollout is None:
out = block(out, drop_connect_rate)
else:
if j >= rollout.depth[i]:
break
out = block.forward_rollout(
out, rollout.width[i][j], rollout.kernel[i][j], drop_connect_rate
)
features.append(out)
out = self.conv_head(out)
features[-1] = out
return [features[level_indexes[p]] for p in p_levels], out
def get_feature_channel_num(self, p_levels):
level_indexes = feature_level_to_stage_index(self.strides)
return [self.channels[level_indexes[p]] for p in p_levels]
def get_features(self, inputs, p_levels, rollout=None):
out = self.stem(inputs)
level_indexes = feature_level_to_stage_index(self.strides)
features = []
for i, cell in enumerate(self.cells):
for j, block in enumerate(cell):
if rollout is None:
out = block(out)
else:
if j >= rollout.depth[i]:
break
out = block.forward_rollout(
out, rollout.width[i][j], rollout.kernel[i][j]
)
features.append(out)
out = self.conv_head(out)
features[-1] = out
return [features[level_indexes[p]] for p in p_levels], out
def get_feature_channel_num(self, p_levels):
level_indexes = feature_level_to_stage_index(self.strides)
return [self.channels[level_indexes[p]] for p in p_levels]
class MobileNetV3Arch(BaseBackboneArch):
NAME = "mbv3_backbone"
def __init__(
self,
device,
blocks=[1, 4, 4, 4, 4, 4],
strides=[1, 2, 2, 2, 1, 2],
expansions=[1, 6, 6, 6, 6, 6],
layer_channels=[16, 16, 24, 40, 80, 112, 160, 960, 1280],
mult_ratio=1.0,
kernel_sizes=[3, 5, 7],
do_kernel_transform=True,
use_ses=[False, False, True, False, True, True],
acts=["relu", "relu", "relu", "h_swish", "h_swish", "h_swish"],
num_classes=10,
block_type="mbv3_block",
pretrained_path=None,
stem_stride=2,
schedule_cfg=None,
):
super(MobileNetV3Arch, self).__init__(
device,
blocks,
strides,
expansions,
layer_channels,
mult_ratio,
kernel_sizes,
do_kernel_transform,
num_classes,
block_type,
pretrained_path,
schedule_cfg,
)
self.block_initializer = FlexibleBlock.get_class_(block_type)
self.channels = [make_divisible(c * mult_ratio, 8)
for c in layer_channels]
self.stem_stride = stem_stride
self.stem = nn.Sequential(
nn.Conv2d(
3, self.channels[0], kernel_size=3, stride=self.stem_stride, padding=1, bias=False
),
nn.BatchNorm2d(self.channels[0]),
get_op("h_swish")(),
)
expect(
blocks[0] == expansions[0] == 1,
"The first conv layer should have single block and no expansion.",
ValueError,
)
self.mult_ratio = mult_ratio
self.use_ses = use_ses
self.acts = acts
self.cells = [
self.make_stage(
self.channels[0],
self.channels[1],
self.blocks[0],
self.strides[0],
self.expansions[0],
[3],
self.use_ses[0],
self.acts[0],
)
]
for i, depth in enumerate(self.blocks[1:], 1):
self.cells.append(
self.make_stage(
self.channels[i],
self.channels[i + 1],
depth,
self.strides[i],
self.expansions[i],
self.kernel_sizes,
self.use_ses[i],
self.acts[i],
)
)
self.cells = nn.ModuleList(self.cells)
self.conv_head = nn.Sequential(
nn.Conv2d(self.channels[-3],
self.channels[-2], 1, 1, 0, bias=False),
nn.BatchNorm2d(self.channels[-2]),
get_op("h_swish")(),
)
self.conv_final = nn.Sequential(
nn.Conv2d(self.channels[-2],
self.channels[-1], 1, 1, 0, bias=False),
get_op("h_swish")(),
)
self.classifier = nn.Linear(self.channels[-1], num_classes)
if self.pretrained_path:
state_dict = torch.load(self.pretrained_path, "cpu")
if state_dict["classifier.weight"].shape[0] != self.num_classes:
del state_dict["classifier.weight"]
del state_dict["classifier.bias"]
self.logger.info(self.load_state_dict(state_dict, strict=False))
self.to(self.device)
def make_stage(
self, C_in, C_out, block_num, stride, expansion, kernel_sizes, use_se, act
):
cell = []
for i in range(block_num):
if i == 0:
s = stride
else:
s = 1
C_in = C_out
cell.append(
self.block_initializer(
expansion,
C_in,
C_out,
s,
kernel_sizes,
self.do_kernel_transform,
activation=act,
affine=True,
use_se=use_se,
)
)
return nn.ModuleList(cell)
def forward(self, inputs):
return self.forward_rollout(inputs)
def forward_rollout(self, inputs, rollout=None):
out = self.stem(inputs)
for i, cell in enumerate(self.cells):
for j, block in enumerate(cell):
if rollout is None:
out = block(out)
else:
if j >= rollout.depth[i]:
break
out = block.forward_rollout(
out, rollout.width[i][j], rollout.kernel[i][j]
)
out = self.conv_head(out)
out = out.mean(3, keepdim=True).mean(2, keepdim=True)
out = self.conv_final(out)
out = torch.flatten(out, 1)
return self.classifier(out)
def finalize(self, blocks, expansions, kernel_sizes):
cells = []
finalized_model = copy.deepcopy(self)
for i, cell in enumerate(self.cells):
cells.append([])
for j, block in enumerate(cell):
if j >= blocks[i]:
break
block.set_mask(expansions[i][j], kernel_sizes[i][j])
cells[-1].append(block.finalize())
cells[-1] = nn.ModuleList(cells[-1])
finalized_model.cells = nn.ModuleList(cells)
return finalized_model
def extract_features(self, inputs, p_levels, rollout=None, drop_connect_rate=0.0):
out = self.stem(inputs)
level_indexes = feature_level_to_stage_index(self.strides)
features = []
for i, cell in enumerate(self.cells):
for j, block in enumerate(cell):
if rollout is None:
out = block(out, drop_connect_rate)
else:
if j >= rollout.depth[i]:
break
out = block.forward_rollout(
out, rollout.width[i][j], rollout.kernel[i][j], drop_connect_rate
)
features.append(out)
out = self.conv_head(out)
features[-1] = out
return [features[level_indexes[p]] for p in p_levels], out
def get_feature_channel_num(self, p_levels):
level_indexes = feature_level_to_stage_index(self.strides + [1])
return [self.channels[1 + level_indexes[p]] for p in p_levels]
| 32.959072 | 98 | 0.519103 |
e106cd9a4aa5b00c69cfc16758a34c6e4475f771 | 6,480 | py | Python | study/python/pyqt/demo/login.py | cheenwe/blog | a866b3ab98aa58e3ed4a7624fbb72c8fd8dee790 | [
"MIT"
] | 10 | 2016-09-28T03:22:41.000Z | 2020-06-16T08:42:25.000Z | study/python/pyqt/demo/login.py | cheenwe/blog | a866b3ab98aa58e3ed4a7624fbb72c8fd8dee790 | [
"MIT"
] | 12 | 2017-04-18T08:41:04.000Z | 2020-06-10T02:54:58.000Z | study/python/pyqt/demo/login.py | cheenwe/blog | a866b3ab98aa58e3ed4a7624fbb72c8fd8dee790 | [
"MIT"
] | 8 | 2016-09-28T03:03:32.000Z | 2019-09-16T04:22:01.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from db import Db
from main import MainPage
from signup import Ui_SignupWindow
class Ui_LoginWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(480, 360)
MainWindow.setMinimumSize(QtCore.QSize(480, 360))
MainWindow.setMaximumSize(QtCore.QSize(480, 360))
MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("border-color: qconicalgradient(cx:0.5, cy:0.5, angle:0, stop:0 rgba(35, 40, 3, 255), stop:0.16 rgba(136, 106, 22, 255), stop:0.225 rgba(166, 140, 41, 255), stop:0.285 rgba(204, 181, 74, 255), stop:0.345 rgba(235, 219, 102, 255), stop:0.415 rgba(245, 236, 112, 255), stop:0.52 rgba(209, 190, 76, 255), stop:0.57 rgba(187, 156, 51, 255), stop:0.635 rgba(168, 142, 42, 255), stop:0.695 rgba(202, 174, 68, 255), stop:0.75 rgba(218, 202, 86, 255), stop:0.815 rgba(208, 187, 73, 255), stop:0.88 rgba(187, 156, 51, 255), stop:0.935 rgba(137, 108, 26, 255), stop:1 rgba(35, 40, 3, 255));")
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.dial = QtWidgets.QDial(self.centralWidget)
self.dial.setGeometry(QtCore.QRect(200, 10, 61, 64))
self.dial.setObjectName("dial")
self.layoutWidget = QtWidgets.QWidget(self.centralWidget)
self.layoutWidget.setGeometry(QtCore.QRect(120, 120, 261, 124))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.password = QtWidgets.QLineEdit(self.layoutWidget)
self.password.setWhatsThis("")
self.password.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText|QtCore.Qt.ImhSensitiveData)
self.password.setEchoMode(QtWidgets.QLineEdit.Password)
self.password.setClearButtonEnabled(True)
self.password.setObjectName("password")
self.gridLayout.addWidget(self.password, 1, 0, 1, 1)
self.btnLogin = QtWidgets.QPushButton(self.layoutWidget)
self.btnLogin.setObjectName("btnLogin")
self.btnLogin.clicked.connect(self.loginBtnClick)
self.gridLayout.addWidget(self.btnLogin, 2, 0, 1, 1)
self.username = QtWidgets.QLineEdit(self.layoutWidget)
self.username.setToolTip("")
self.username.setWhatsThis("")
self.username.setAccessibleName("")
self.username.setAccessibleDescription("")
self.username.setAutoFillBackground(False)
self.username.setClearButtonEnabled(True)
self.username.setObjectName("username")
self.gridLayout.addWidget(self.username, 0, 0, 1, 1)
self.btnRegister = QtWidgets.QPushButton(self.centralWidget)
self.btnRegister.setGeometry(QtCore.QRect(120, 250, 261, 32))
self.btnRegister.setObjectName("btnRegister")
self.btnRegister.clicked.connect(self.registerBtnClick)
MainWindow.setCentralWidget(self.centralWidget)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 480, 25))
self.menuBar.setObjectName("menuBar")
MainWindow.setMenuBar(self.menuBar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "登录界面"))
self.password.setStatusTip(_translate("MainWindow", " 请输入密码"))
self.password.setPlaceholderText(_translate("MainWindow", "密码"))
self.btnLogin.setText(_translate("MainWindow", "登录"))
self.username.setStatusTip(_translate("MainWindow", "请输入用户名"))
self.username.setPlaceholderText(_translate("MainWindow", "账户名"))
self.btnRegister.setText(_translate("MainWindow", "注册账号"))
self.menuBar.setWhatsThis(_translate("MainWindow", "loginPage"))
def loginBtnClick(self):
username = self.username.text()
password = self.password.text()
getDb = Db()
result = getDb.loginCheck(username,password)
if(result):
self.showMainPage()
self.clearField()
print(result)
else:
print("password wrong")
self.showMessage("Warning","Invalid Username and Password")
def registerBtnClick(self):
print("registerBtnClick")
# self.showRegisterPage()
self.signupDialog = QtWidgets.QMainWindow()
self.ui = Ui_SignupWindow()
self.ui.setupUi(self.signupDialog)
self.signupDialog.show()
def showRegisterPage(self):
self.signupWindow = QtWidgets.QMainWindow()
self.ui = Ui_SignupWindow()
# self.ui.initUI()
self.ui.setupUi(self.signupWindow)
def showMainPage(self):
self.mainWindow = QtWidgets.QMainWindow()
self.ui = MainPage()
# self.ui.initUI()
self.ui.setupUi(self.mainWindow)
# self.homWindow.show()
def clearField(self):
self.username.setText(None)
self.password.setText(None)
def showMessage(self,title,msg):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
#msgBox.setTitle(title)
msgBox.setText(msg)
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec_()
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_LoginWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 41.538462 | 615 | 0.682407 |
d9b031d080c91af033194209d31b3c13ed6c75cd | 71,378 | py | Python | tensorflow/python/keras/engine/network.py | fanshiqing/tensorflow | e7475504094b3018973740df470d2c9ee73a4fd5 | [
"Apache-2.0"
] | 2 | 2018-08-04T14:13:05.000Z | 2018-09-10T03:57:55.000Z | tensorflow/python/keras/engine/network.py | fanshiqing/tensorflow | e7475504094b3018973740df470d2c9ee73a4fd5 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/engine/network.py | fanshiqing/tensorflow | e7475504094b3018973740df470d2c9ee73a4fd5 | [
"Apache-2.0"
] | 1 | 2018-08-21T21:53:14.000Z | 2018-08-21T21:53:14.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""A `Network` is way to compose layers: the topological form of a `Model`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import weakref
import numpy as np
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import saving
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import layer_utils as checkpointable_layer_utils
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
class Network(base_layer.Layer):
"""A `Network` is a composition of layers.
It is the topological form of a "model". A `Model`
is simply a `Network` with added training routines.
"""
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
# Signature detection
if (len(args) == 2 or
len(args) == 1 and 'outputs' in kwargs or
'inputs' in kwargs and 'outputs' in kwargs):
# Graph network
self._init_graph_network(*args, **kwargs)
else:
# Subclassed network
self._init_subclassed_network(**kwargs)
# Several Network methods have "no_automatic_dependency_tracking"
# annotations. Since Network does automatic dependency tracking on attribute
# assignment, including for common data structures such as lists, by default
# we'd have quite a few empty dependencies which users don't care about (or
# would need some way to ignore dependencies automatically, which is confusing
# when applied to user code). Some attributes, such as _layers, would cause
# structural issues (_layers being the place where Layers assigned to tracked
# attributes are stored).
#
# Aside from these aesthetic and structural issues, useless dependencies on
# empty lists shouldn't cause issues; adding or removing them will not break
# checkpoints, but may cause "all Python objects matched" assertions to fail
# (in which case less strict assertions may be substituted if necessary).
@checkpointable.no_automatic_dependency_tracking
def _base_init(self, name=None):
# The following are implemented as property functions:
# self.trainable_weights
# self.non_trainable_weights
# self.input_spec
# self.losses
# self.updates
self._init_set_name(name, zero_based=True)
self._activity_regularizer = None
# This acts just like the `trainable` attribute of any layer instance.
# It does not affect users of the underlying layers, only users of the
# Network instance.
self.trainable = True
self._is_compiled = False
self._expects_training_arg = False
# A list of "extra" variables assigned to attributes of this class, included
# in self.weights and self.variables. Always empty for graph networks (but
# included in base_init to avoid excessive special casing when retrieving
# the value).
self._extra_variables = []
self.supports_masking = False
if not hasattr(self, 'optimizer'):
# Don't reset optimizer if already set.
self.optimizer = None
# Private attributes to implement compatibility with Layer.
self._updates = [] # Used in symbolic mode only.
self._losses = [] # Used in symbolic mode only.
self._scope = None # Never used.
self._reuse = None # Never used.
if context.executing_eagerly():
self._graph = None
else:
self._graph = ops.get_default_graph() # Used in symbolic mode only.
# A Network does not create weights of its own, thus has no dtype.
self._dtype = None
# All layers in order of horizontal graph traversal.
# Entries are unique. Includes input and output layers.
self._layers = []
# Used in symbolic mode only, only in conjunction with graph-networks
self._outbound_nodes = []
self._inbound_nodes = []
self._checkpointable_saver = checkpointable_utils.CheckpointableSaver(
weakref.ref(self))
@checkpointable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs, name=None):
self._call_convention = base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, (list, tuple)):
self.inputs = list(inputs) # Tensor or list of tensors.
else:
self.inputs = [inputs]
if isinstance(outputs, (list, tuple)):
self.outputs = list(outputs)
else:
self.outputs = [outputs]
# User-provided argument validation.
if context.executing_eagerly():
# Check that all inputs/outputs are DeferredTensors.
for tensor in self.inputs:
if not isinstance(tensor, base_layer.DeferredTensor): # pylint: disable=protected-access
raise TypeError('When eager execution is enabled, '
'inputs must come from a call to '
'`tf.keras.Input` (called after '
'tf.enable_eager_execution()). '
'Received invalid input: ' + str(tensor))
for tensor in self.outputs:
if not isinstance(tensor, base_layer.DeferredTensor): # pylint: disable=protected-access
raise TypeError('When eager execution is enabled, '
'outputs must come from a call to '
'a layer (called after '
'tf.enable_eager_execution()). '
'Received invalid output: ' + str(tensor))
# Check for redundancy in inputs.
if len(set(self.inputs)) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.layers.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer, node_index, tensor_index = x._keras_history
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.layers.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.layers.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
self._base_init(name=name)
self._compute_previous_mask = (
'mask' in tf_inspect.getargspec(self.call).args or
hasattr(self, 'compute_mask'))
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._is_graph_network = True
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Keep track of the network's nodes and layers.
nodes, nodes_by_depth, layers, layers_by_depth = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._layers = layers
self._layers_by_depth = layers_by_depth
self._track_layers(layers)
# Create the node linking internal inputs to internal outputs.
base_layer.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self.inputs,
output_tensors=self.outputs)
# Fill in the output mask cache.
masks = []
for x in self.inputs:
mask = x._keras_mask if hasattr(x, '_keras_mask') else None # pylint: disable=protected-access
masks.append(mask)
mask_cache_key = (generic_utils.object_list_uid(self.inputs) + '_' +
generic_utils.object_list_uid(masks))
masks = []
for x in self.outputs:
mask = x._keras_mask if hasattr(x, '_keras_mask') else None # pylint: disable=protected-access
masks.append(mask)
if len(masks) == 1:
mask = masks[0]
else:
mask = masks
self._output_mask_cache[mask_cache_key] = mask
# Build self.input_names and self.output_names.
self.input_names = []
self.output_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for i, layer in enumerate(self._input_layers):
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
self._feed_input_shapes.append(backend.int_shape(self.inputs[i]))
# layer.input gives an error in eager mode
if not context.executing_eagerly():
self._feed_inputs.append(layer.input)
for layer in self._output_layers:
self.output_names.append(layer.name)
@checkpointable.no_automatic_dependency_tracking
def _init_subclassed_network(self, name=None):
self._base_init(name=name)
self._is_graph_network = False
call_argspec = tf_inspect.getargspec(self.call)
if 'training' in call_argspec.args:
self._expects_training_arg = True
else:
self._expects_training_arg = False
self._call_convention = self._determine_call_convention(call_argspec)
self.outputs = []
self.inputs = []
self.built = False
def _determine_call_convention(self, call_argspec):
"""Decides how `self.call()` is invoked. See base_layer.CallConvention."""
if call_argspec.varargs:
may_take_single_argument = False
else:
try:
# Note: tf_inspect doesn't raise a TypeError when regular inspect would,
# so we need to keep in mind that "getcallargs" may have returned
# something even though we under-specified positional arguments.
all_args = tf_inspect.getcallargs(self.call, None)
self_args = set()
for arg_name, obj in all_args.items():
if obj is self:
self_args.add(arg_name)
may_take_single_argument = True
except TypeError:
may_take_single_argument = False
if may_take_single_argument:
# A single positional argument (plus "self") is considered equivalent to
# an "inputs" argument.
all_positional_args = len(call_argspec.args)
if call_argspec.defaults is not None:
all_positional_args -= len(call_argspec.defaults)
non_self_positional_args = all_positional_args
for positional_arg_name in call_argspec.args[:all_positional_args]:
if positional_arg_name in self_args:
non_self_positional_args -= 1
if non_self_positional_args == 1:
if 'inputs' in call_argspec.args[all_positional_args:]:
raise TypeError(
"Model.call() takes a single positional argument (to which "
"inputs are passed by convention) and a separate 'inputs' "
"argument. Unable to determine which arguments are inputs.")
return base_layer.CallConvention.SINGLE_POSITIONAL_ARGUMENT
if 'inputs' in call_argspec.args:
return base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT
else:
return base_layer.CallConvention.POSITIONAL_ARGUMENTS_ARE_INPUTS
def _track_layers(self, layers):
"""Add Checkpointable dependencies on a list of Layers."""
weight_layer_index = 0
for layer_index, layer in enumerate(layers):
if layer.weights:
# Keep a separate index for layers which have weights. This allows users
# to insert Layers without weights anywhere in the network without
# breaking checkpoints.
self._track_checkpointable(
layer, name='layer_with_weights-%d' % weight_layer_index,
overwrite=True)
weight_layer_index += 1
# Even if it doesn't have weights, we should still track everything in
# case it has/will have Checkpointable dependencies.
self._track_checkpointable(
layer, name='layer-%d' % layer_index, overwrite=True)
def _no_dependency(self, value):
"""Override to allow `Layer` to disable dependency tracking.
`CheckpointableBase` defines this method, whose semantics are "if a subclass
does dependency tracking, this method exempts `value`." Layer uses
`_no_dependency` to exempt some of its attribute assignments (conditional on
attribute assignment causing tracking in the subclass).
Args:
value: An object which will be assigned to an object attribute, whose
value should not be tracked.
Returns:
A wrapped object which, when assigned to an attribute, will not be
tracked (`value` will be stored in the attribute).
"""
return data_structures.NoDependency(value)
def __setattr__(self, name, value):
if not getattr(self, '_setattr_tracking', True):
super(Network, self).__setattr__(name, value)
return
no_dependency = isinstance(value, data_structures.NoDependency)
value = data_structures.sticky_attribute_assignment(
checkpointable=self, value=value, name=name)
if isinstance(value, (
base_layer.Layer,
Network,
data_structures.CheckpointableDataStructure)):
try:
is_graph_network = self._is_graph_network
except AttributeError:
raise RuntimeError('It looks like you are subclassing `Model` and you '
'forgot to call `super(YourClass, self).__init__()`.'
' Always start with this line.')
if not is_graph_network:
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, '_use_resource_variables'):
# In subclassed models, legacy layers (tf.layers) must always use
# resource variables.
value._use_resource_variables = True
if (not no_dependency
and isinstance(value, checkpointable.CheckpointableBase)):
if ( # For subclassed models only, users may add extra weights/variables
# simply by assigning them to attributes.
not self._is_graph_network
and isinstance(value, variables.Variable)):
self._extra_variables.append(value)
super(Network, self).__setattr__(name, value)
def add_variable(self, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, constraint=None):
if self._is_graph_network:
raise NotImplementedError('`add_variable` is not supported on Networks.')
else:
raise NotImplementedError(
'`add_variable` is not supported on Networks. However, you may '
'assign variables to attributes and they will show up in the weights '
'and variables properties.')
def add_loss(self, *args, **kwargs):
if context.executing_eagerly():
raise NotImplementedError('`add_loss` is not supported on Networks '
'when eager execution is enabled.')
super(Network, self).add_loss(*args, **kwargs)
@property
def uses_learning_phase(self):
return any(
[getattr(x, '_uses_learning_phase', False) for x in self.outputs])
@property
def stateful(self):
return any([(hasattr(layer, 'stateful') and layer.stateful)
for layer in self.layers])
def reset_states(self):
for layer in self.layers:
if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):
layer.reset_states()
@property
def state_updates(self):
"""Returns the `updates` from all layers that are stateful.
This is useful for separating training updates and
state updates, e.g. when we need to update a layer's internal state
during prediction.
Returns:
A list of update ops.
"""
state_updates = []
for layer in self.layers:
if getattr(layer, 'stateful', False):
if hasattr(layer, 'updates'):
state_updates += layer.updates
return state_updates
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
weights = []
for layer in self.layers:
weights += layer.weights
return backend.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
Arguments:
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for layer in self.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
tuples.append((sw, w))
weights = weights[num_param:]
backend.batch_set_value(tuples)
def compute_mask(self, inputs, mask):
if not self._is_graph_network:
return None
inputs = generic_utils.to_list(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = generic_utils.to_list(mask)
cache_key = (generic_utils.object_list_uid(inputs)
+ '_' + generic_utils.object_list_uid(masks))
if cache_key in self._output_mask_cache:
return self._output_mask_cache[cache_key]
else:
_, output_masks = self._run_internal_graph(inputs, mask=masks)
return output_masks
@property
def layers(self):
return checkpointable_layer_utils.filter_empty_layer_containers(
self._layers)
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Arguments:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
"""
# TODO(fchollet): We could build a dictionary based on layer names
# since they are constant, but we have not done that yet.
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) +
' but model only has ' + str(len(self.layers)) +
' layers.')
else:
return self.layers[index]
else:
if not name:
raise ValueError('Provide either a layer name or layer index.')
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name)
@property
def _unfiltered_updates(self):
if context.executing_eagerly():
return []
updates = []
for layer in self.layers:
if isinstance(layer, Network):
updates += layer._unfiltered_updates
else:
updates += layer.updates
return updates
@property
def _unfiltered_losses(self):
losses = []
for layer in self.layers:
if isinstance(layer, Network):
losses += layer._unfiltered_losses
else:
losses += layer.losses
return losses
@property
def updates(self):
"""Retrieves the network's updates.
Will only include updates that are either
unconditional, or conditional on inputs to this model
(e.g. will not include updates that were created by layers of this model
outside of the model).
When the network has no registered inputs, all updates are returned.
Effectively, `network.updates` behaves like `layer.updates`.
Concrete example:
```python
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1) # This creates 2 updates.
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2) # This creates 2 more updates.
# The BN layer has now 4 updates.
self.assertEqual(len(bn.updates), 4)
# Let's create a model from x2 to y2.
model = keras.models.Model(x2, y2)
# The model does not list all updates from its underlying layers,
# but only the updates that are relevant to it. Updates created by layers
# outside of the model are discarded.
self.assertEqual(len(model.updates), 2)
# If you keep calling the model, you append to its updates, just like
# what happens for a layer.
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
self.assertEqual(len(model.updates), 4)
# But if you call the inner BN layer independently, you don't affect
# the model's updates.
x4 = keras.layers.Input(shape=(10,))
_ = bn(x4)
self.assertEqual(len(model.updates), 4)
```
Returns:
A list of update ops.
"""
if context.executing_eagerly():
return []
if not self.trainable and not self.stateful:
return []
updates = self._unfiltered_updates
# `updates` might contain irrelevant updates, so it needs to be filtered
# with respect to inputs the model has been called on.
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return updates
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, updates)
relevant_conditional_updates = [x for x in updates if x in reachable]
unconditional_updates = [
x for x in updates if x._unconditional_update] # pylint: disable=protected-access
# A layer could be used multiple times in a nested structure,
# so the updates list must be de-duped.
return list(set(
relevant_conditional_updates + unconditional_updates + self._updates))
@property
def losses(self):
"""Retrieves the network's losses.
Will only include losses that are either
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).
When the network has no registered inputs, all losses are returned.
Returns:
A list of loss tensors.
"""
losses = self._unfiltered_losses
if context.executing_eagerly():
return losses
relevant_inputs = []
for i in range(0, len(self._inbound_nodes)):
inputs = self.get_input_at(i)
if isinstance(inputs, list):
relevant_inputs += inputs
else:
relevant_inputs.append(inputs)
if not relevant_inputs:
return losses
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, losses)
relevant_conditional_losses = [x for x in losses if x in reachable]
unconditional_losses = [
x for x in losses if x._unconditional_loss] # pylint: disable=protected-access
return list(set(
relevant_conditional_losses + unconditional_losses + self._losses))
@property
def trainable_weights(self):
return checkpointable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
@property
def non_trainable_weights(self):
return checkpointable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self.layers,
extra_variables=self._extra_variables)
@property
def input_spec(self):
"""Gets the network's input specs.
Returns:
A list of `InputSpec` instances (one per input to the model)
or a single instance if the model has only one input.
"""
# If not a graph network, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
def build(self, input_shape):
"""Builds the model based on input shapes received.
This is to be used for subclassed models, which do not know at instantiation
time what their inputs look like.
Args:
input_shape: Single tuple, TensorShape, or list of shapes, where shapes
are tuples, integers, or TensorShapes.
Raises:
ValueError:
1. In case of invalid user-provided data (not of type tuple,
list, or TensorShape).
2. If the model requires call arguments that are agnostic
to the input shapes (positional or kwarg in call signature).
3. If not all layers were properly built.
4. If float type inputs are not supported within the layers.
In each of these cases, the user should build their model by calling it
on real tensor data.
"""
if self._is_graph_network:
self.built = True
return
# If subclass network
if input_shape is None:
raise ValueError('Input shape must be defined when calling build on a '
'model subclass network.')
valid_types = (tuple, list, tensor_shape.TensorShape)
if not isinstance(input_shape, valid_types):
raise ValueError('Specified input shape is not one of the valid types. '
'Please specify a batch input shape of type tuple or '
'list of input shapes. User provided '
'input type: {}'.format(type(input_shape)))
if input_shape and not self.inputs:
if isinstance(input_shape, list):
# List of input shapes
x = [base_layer.generate_dummy_data_from_shape(shape)
for shape in input_shape]
else:
x = base_layer.generate_dummy_data_from_shape(input_shape)
kwargs = {}
num_call_args = len(tf_inspect.getargspec(self.call).args)
if self._expects_training_arg and num_call_args == 3:
# Has call signature of call(self, input, training)
kwargs['training'] = False
elif num_call_args > 2:
# Has invalid call signature of call(self, input, *args, **kwargs)
raise ValueError('Currently, you cannot build your model if it has '
'positional or keyword arguments that are not '
'inputs to the model, but are required for its '
'`call` method. Instead, in order to instantiate '
'and build your model, `call` your model on real '
'tensor data with all expected call arguments.')
try:
self.call(x, **kwargs)
except (errors.InvalidArgumentError, TypeError):
raise ValueError('You cannot build your model by calling `build` '
'if your layers do not support float type inputs. '
'Instead, in order to instantiate and build your '
'model, `call` your model on real tensor data (of '
'the correct dtype).')
if self._layers:
self._track_layers(self._layers)
if self.layers:
for layer in self.layers:
if not layer.built:
raise ValueError('Layer: {} was not built in your model. Calling '
'`build` manually on a subclassed model is only '
'allowed for models with a static topology. '
'In this case, you can build your model by '
'calling it on real tensor data.'.format(layer))
self.built = True
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
inputs = nest.flatten(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = nest.flatten(mask)
if not context.executing_eagerly():
# Try to retrieve cached outputs if the layer has already been called
# on these exact inputs.
cache_key = (generic_utils.object_list_uid(inputs)
+ '_' + generic_utils.object_list_uid(masks))
if cache_key in self._output_tensor_cache:
# Cache hit.
return self._output_tensor_cache[cache_key]
# Actually apply the network graph to the new inputs.
outputs, _ = self._run_internal_graph(inputs,
training=training,
mask=masks)
return outputs
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
if context.executing_eagerly():
return super(Network, self).compute_output_shape(input_shape)
raise NotImplementedError
if isinstance(input_shape, list):
input_shapes = []
for shape in input_shape:
if shape is not None:
input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list()))
else:
input_shapes.append(None)
else:
if input_shape is not None:
input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())]
else:
input_shapes = [None]
if len(input_shapes) != len(self._input_layers):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = generic_utils.object_list_uid(input_shapes)
if cache_key not in self._output_shape_cache:
# Cache miss. We have to run the network graph manually (recursive calls
# to `compute_output_shape`).
layers_to_output_shapes = {}
for i in range(len(input_shapes)):
layer = self._input_layers[i]
input_shape = input_shapes[i]
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor output.
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = input_shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Potentially redundant list,
# same size as node.input_tensors.
input_shapes = []
for j in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[j]
node_index = node.node_indices[j]
tensor_index = node.tensor_indices[j]
shape_key = inbound_layer.name + '_%s_%s' % (node_index,
tensor_index)
input_shape = layers_to_output_shapes[shape_key]
input_shapes.append(input_shape)
if len(input_shapes) == 1:
output_shape = layer.compute_output_shape(input_shapes[0])
else:
output_shape = layer.compute_output_shape(input_shapes)
if isinstance(output_shape, list):
output_shapes = [
tuple(tensor_shape.TensorShape(shape).as_list())
for shape in output_shape
]
else:
output_shapes = [
tuple(tensor_shape.TensorShape(output_shape).as_list())
]
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j in range(len(output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = output_shapes[j]
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
else:
# Cache hit.
output_shapes = self._output_shape_cache[cache_key]
if isinstance(output_shapes, list):
if len(output_shapes) == 1:
return tensor_shape.TensorShape(output_shapes[0])
else:
return [tensor_shape.TensorShape(shape) for shape in output_shapes]
else:
return tensor_shape.TensorShape(output_shapes)
def _run_internal_graph(self, inputs, training=None, mask=None):
"""Computes output tensors for new inputs.
# Note:
- Expects `inputs` to be a list (potentially with 1 element).
- Can be run on non-Keras tensors.
Arguments:
inputs: List of tensors
training: Boolean learning phase.
mask: List of masks (tensors or None).
Returns:
Three lists: output_tensors, output_masks, output_shapes
"""
# Note: masking support is relevant mainly for Keras.
# It cannot be factored out without having the fully reimplement the network
# calling logic on the Keras side. We choose to incorporate it in
# Network because 1) it may be useful to fully support in tf.layers in
# the future and 2) Keras is a major user of Network. If you don't
# use masking, it does not interfere with regular behavior at all and you
# can ignore it.
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = mask
# Dictionary mapping reference tensors to tuples
# (computed tensor, compute mask)
# we assume a 1:1 mapping from tensor to mask
# TODO(fchollet): raise exception when a `.compute_mask()` call
# does not return a list the same size as `call`
tensor_map = {}
for x, y, mask in zip(self.inputs, inputs, masks):
tensor_map[str(id(x))] = (y, mask)
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if str(id(x)) in tensor_map:
computed_data.append(tensor_map[str(id(x))])
if len(computed_data) == len(reference_input_tensors):
# Call layer (reapplying ops to new inputs).
with ops.name_scope(layer.name):
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
# Ensure mask propagation if applicable.
if 'mask' in tf_inspect.getargspec(layer.call).args:
kwargs.setdefault('mask', computed_mask)
if 'training' in tf_inspect.getargspec(layer.call).args:
kwargs.setdefault('training', training)
output_tensors = nest.flatten(
layer.call(computed_tensor, **kwargs))
if hasattr(layer, 'compute_mask'):
output_masks = layer.compute_mask(computed_tensor,
computed_mask)
if output_masks is None:
output_masks = [None for _ in output_tensors]
else:
output_masks = nest.flatten(output_masks)
else:
output_masks = [None for _ in output_tensors]
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if 'mask' in tf_inspect.getargspec(layer.call).args:
kwargs.setdefault('mask', computed_masks)
if 'training' in tf_inspect.getargspec(layer.call).args:
kwargs.setdefault('training', training)
output_tensors = nest.flatten(
layer.call(computed_tensors, **kwargs))
if hasattr(layer, 'compute_mask'):
output_masks = layer.compute_mask(computed_tensors,
computed_masks)
if output_masks is None:
output_masks = [None for _ in output_tensors]
else:
output_masks = nest.flatten(output_masks)
else:
output_masks = [None for _ in output_tensors]
if not context.executing_eagerly():
if layer.activity_regularizer is not None:
regularization_losses = [
layer.activity_regularizer(x) for x in output_tensors
]
# Apply activity regularizer if any:
layer.add_loss(regularization_losses, computed_tensors)
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors, output_tensors,
output_masks):
tensor_map[str(id(x))] = (y, mask)
output_tensors = []
output_masks = []
output_shapes = []
for x in self.outputs:
assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x)
tensor, mask = tensor_map[str(id(x))]
output_shapes.append(backend.int_shape(x))
output_tensors.append(tensor)
output_masks.append(mask)
if len(output_tensors) == 1:
output_tensors = output_tensors[0]
if output_shapes is not None:
output_shapes = output_shapes[0]
if output_masks is not None:
output_masks = output_masks[0]
if not context.executing_eagerly():
# Update cache;
# keys are based on ids on input tensors and inputs masks.
cache_key = (generic_utils.object_list_uid(inputs)
+ '_' + generic_utils.object_list_uid(masks))
self._output_tensor_cache[cache_key] = output_tensors
self._output_mask_cache[cache_key] = output_masks
if output_shapes is not None:
input_shapes = [backend.int_shape(x) for x in inputs]
cache_key = generic_utils.object_list_uid(input_shapes)
self._output_shape_cache[cache_key] = output_shapes
return output_tensors, output_masks
def get_config(self):
if not self._is_graph_network:
raise NotImplementedError
config = {
'name': self.name,
}
node_conversion_map = {}
for layer in self.layers:
if issubclass(layer.__class__, Network):
# Networks start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in self.layers: # From the earliest layers on.
layer_class_name = layer.__class__.__name__
layer_config = layer.get_config()
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
if node.arguments:
try:
json.dumps(node.arguments)
kwargs = node.arguments
except TypeError:
logging.warning(
'Layer ' + layer.name +
' was passed non-serializable keyword arguments: ' +
str(node.arguments) + '. They will not be included '
'in the serialized model (and thus will be missing '
'at deserialization time).')
kwargs = {}
else:
kwargs = {}
if node.inbound_layers:
node_data = []
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
node_key = _make_node_key(inbound_layer.name, node_index)
new_node_index = node_conversion_map.get(node_key, 0)
node_data.append(
[inbound_layer.name, new_node_index, tensor_index, kwargs])
filtered_inbound_nodes.append(node_data)
layer_configs.append({
'name': layer.name,
'class_name': layer_class_name,
'config': layer_config,
'inbound_nodes': filtered_inbound_nodes,
})
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(self._input_layers)):
layer, node_index, tensor_index = self._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append([layer.name, new_node_index, tensor_index])
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append([layer.name, new_node_index, tensor_index])
config['output_layers'] = model_outputs
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Arguments:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Deserialize a node.
Arguments:
layer: layer instance.
node_data: node config dict.
Raises:
ValueError: In case of improperly formatted `node_data` dict.
"""
input_tensors = []
for input_data in node_data:
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError('Improperly formatted model config.')
if inbound_layer_name not in created_layers:
add_unprocessed_node(layer, node_data)
return
inbound_layer = created_layers[inbound_layer_name]
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(inbound_node.output_tensors[inbound_tensor_index])
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors:
if len(input_tensors) == 1:
layer(input_tensors[0], **kwargs)
else:
layer(input_tensors, **kwargs)
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Arguments:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
# Gather layer inputs.
inbound_nodes_data = layer_data['inbound_nodes']
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
name = config.get('name')
input_tensors = []
output_tensors = []
for layer_data in config['input_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(layer_output_tensors[tensor_index])
for layer_data in config['output_layers']:
layer_name, node_index, tensor_index = layer_data
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(layer_output_tensors[tensor_index])
return cls(inputs=input_tensors, outputs=output_tensors, name=name)
def save(self, filepath, overwrite=True, include_optimizer=True):
"""Saves the model to a single HDF5 file.
The savefile includes:
- The model architecture, allowing to re-instantiate the model.
- The model weights.
- The state of the optimizer, allowing to resume training
exactly where you left off.
This allows you to save the entirety of the state of a model
in a single file.
Saved models can be reinstantiated via `keras.models.load_model`.
The model returned by `load_model`
is a compiled model ready to be used (unless the saved model
was never compiled in the first place).
Arguments:
filepath: String, path to the file to save the weights to.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
Example:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
"""
if not self._is_graph_network:
raise NotImplementedError
from tensorflow.python.keras.models import save_model # pylint: disable=g-import-not-at-top
save_model(self, filepath, overwrite, include_optimizer)
def save_weights(self, filepath, overwrite=True, save_format=None):
"""Saves all layer weights.
Either saves in HDF5 or in TensorFlow format based on the `save_format`
argument.
When saving in HDF5 format, the weight file has:
- `layer_names` (attribute), a list of strings
(ordered names of model layers).
- For every layer, a `group` named `layer.name`
- For every such layer group, a group attribute `weight_names`,
a list of strings
(ordered names of weights tensor of the layer).
- For every weight in the layer, a dataset
storing the weight value, named after the weight tensor.
When saving in TensorFlow format, all objects referenced by the network are
saved in the same format as `tf.train.Checkpoint`, including any `Layer`
instances or `Optimizer` instances assigned to object attributes. For
networks constructed from inputs and outputs using `tf.keras.Model(inputs,
outputs)`, `Layer` instances used by the network are tracked/saved
automatically. For user-defined classes which inherit from `tf.keras.Model`,
`Layer` instances must be assigned to object attributes, typically in the
constructor. See the documentation of `tf.train.Checkpoint` and
`tf.keras.Model` for details.
Arguments:
filepath: String, path to the file to save the weights to. When saving
in TensorFlow format, this is the prefix used for checkpoint files
(multiple files are generated). Note that the '.h5' suffix causes
weights to be saved in HDF5 format.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`. Otherwise
`None` defaults to 'tf'.
Raises:
ImportError: If h5py is not available when attempting to save in HDF5
format.
ValueError: For invalid/unknown format arguments.
"""
filepath_is_h5 = _is_hdf5_filepath(filepath)
if save_format is None:
if filepath_is_h5:
save_format = 'h5'
else:
save_format = 'tf'
else:
user_format = save_format.lower().strip()
if user_format in ('tensorflow', 'tf'):
save_format = 'tf'
elif user_format in ('hdf5', 'h5', 'keras'):
save_format = 'h5'
else:
raise ValueError(
'Unknown format "%s". Was expecting one of {"tf", "h5"}.' % (
save_format,))
if save_format == 'tf' and filepath_is_h5:
raise ValueError(
('save_weights got save_format="tf"/"tensorflow", but the '
'filepath ("%s") looks like an HDF5 file. Omit the ".h5"/".keras" '
'when saving in TensorFlow format.')
% filepath)
if save_format == 'h5' and h5py is None:
raise ImportError(
'`save_weights` requires h5py when saving in hdf5.')
if save_format == 'tf':
check_filepath = filepath + '.index'
else:
check_filepath = filepath
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(check_filepath):
proceed = ask_to_proceed_with_overwrite(check_filepath)
if not proceed:
return
if save_format == 'h5':
with h5py.File(filepath, 'w') as f:
saving.save_weights_to_hdf5_group(f, self.layers)
else:
if context.executing_eagerly():
session = None
else:
session = backend.get_session()
self._checkpointable_saver.save(filepath, session=session)
def load_weights(self, filepath, by_name=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
"""
if _is_hdf5_filepath(filepath):
save_format = 'h5'
else:
try:
pywrap_tensorflow.NewCheckpointReader(filepath)
save_format = 'tf'
except errors_impl.DataLossError:
# The checkpoint is not readable in TensorFlow format. Try HDF5.
save_format = 'h5'
if save_format == 'tf':
status = self._checkpointable_saver.restore(filepath)
if by_name:
raise NotImplementedError(
'Weights may only be loaded based on topology into Models when '
'loading TensorFlow-formatted weights (got by_name=True to '
'load_weights).')
if not context.executing_eagerly():
session = backend.get_session()
# Restore existing variables (if any) immediately, and set up a
# streaming restore for any variables created in the future.
checkpointable_utils.streaming_restore(status=status, session=session)
return status
if h5py is None:
raise ImportError(
'`load_weights` requires h5py when loading weights from HDF5.')
if self._is_graph_network and not self.built:
raise NotImplementedError(
'Unable to load weights saved in HDF5 format into a subclassed '
'Model which has not created its variables yet. Call the Model '
'first, then load the weights.')
with h5py.File(filepath, 'r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, self.layers)
else:
saving.load_weights_from_hdf5_group(f, self.layers)
def _updated_config(self):
"""Util shared between different serialization methods.
Returns:
Model config with Keras version information added.
"""
from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version,
'backend': backend.backend()
}
return model_config
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string.
"""
def get_json_type(obj):
# If obj is any numpy type
if type(obj).__module__ == np.__name__:
return obj.item()
# If obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
"""Returns a yaml string containing the network configuration.
To load a network from a yaml save file, use
`keras.models.model_from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of custom losses / layers / etc to the corresponding
functions / classes.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `yaml.dump()`.
Returns:
A YAML string.
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError(
'Requires yaml module installed (`pip install pyyaml`).')
return yaml.dump(self._updated_config(), **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network.
Arguments:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
if not self.built:
raise ValueError('This model has never been called, thus its weights '
'have not yet been created, so no summary can be '
'displayed. Build the model first '
'(e.g. by calling it on some data).')
layer_utils.print_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
def _is_hdf5_filepath(filepath):
return filepath.endswith('.h5') or filepath.endswith('.keras')
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes.
Arguments:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
"""
# Network_nodes: set of nodes included in the graph of layers
# (not all nodes included in the layers are relevant to the current graph).
network_nodes = set() # ids of all nodes relevant to the Network
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
layer_indices = {} # dict {layer: index in traversal}
nodes_in_decreasing_depth = []
def build_map(tensor,
finished_nodes,
nodes_in_progress,
layer,
node_index,
tensor_index):
"""Builds a map of the graph of layers.
This recursively updates the map `layer_indices`,
the list `nodes_in_decreasing_depth` and the set `network_nodes`.
Arguments:
tensor: Some tensor in a graph.
finished_nodes: Set of nodes whose subgraphs have been traversed
completely. Useful to prevent duplicated work.
nodes_in_progress: Set of nodes that are currently active on the
recursion stack. Useful to detect cycles.
layer: Layer from which `tensor` comes from. If not provided,
will be obtained from `tensor._keras_history`.
node_index: Node index from which `tensor` comes from.
tensor_index: Tensor_index from which `tensor` comes from.
Raises:
ValueError: if a cycle is detected.
"""
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' +
layer.name + '" is part of a cycle.')
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
node_key = _make_node_key(layer.name, node_index)
# Update network_nodes.
network_nodes.add(node_key)
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
nodes_in_progress.add(node)
# Propagate to all previous tensors connected to this node.
for i in range(len(node.inbound_layers)):
x = node.input_tensors[i]
layer = node.inbound_layers[i]
node_index = node.node_indices[i]
tensor_index = node.tensor_indices[i]
build_map(x, finished_nodes, nodes_in_progress, layer,
node_index, tensor_index)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
finished_nodes = set()
nodes_in_progress = set()
for x in outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
build_map(x, finished_nodes, nodes_in_progress,
layer=layer,
node_index=node_index,
tensor_index=tensor_index)
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.outbound_layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.outbound_layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all layers it is connected to.
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i]
node_index = node.node_indices[i]
inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access
previous_depth = nodes_depths.get(inbound_node, 0)
nodes_depths[inbound_node] = max(depth + 1, previous_depth)
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = {}
for node, depth in nodes_depths.items():
if depth not in nodes_by_depth:
nodes_by_depth[depth] = []
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = {}
for layer, depth in layers_depths.items():
if depth not in layers_by_depth:
layers_by_depth[depth] = []
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers and self._layers_by_depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = []
for x in inputs:
computable_tensors.append(x)
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.outbound_layer
if layer:
for x in node.input_tensors:
if x not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in node.output_tensors:
computable_tensors.append(x)
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
| 39.413584 | 120 | 0.656 |
d5cd68f8211c4ddae669e1d77c1105c38522daba | 246,613 | py | Python | zerver/tests/test_auth_backends.py | arpit551/zulip | 6950d8d76965371df70a735b499d17377e6db42b | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_auth_backends.py | arpit551/zulip | 6950d8d76965371df70a735b499d17377e6db42b | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_auth_backends.py | arpit551/zulip | 6950d8d76965371df70a735b499d17377e6db42b | [
"Apache-2.0"
] | null | null | null | import base64
import copy
import datetime
import json
import re
import time
import urllib
from contextlib import contextmanager
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple
from unittest import mock
import jwt
import ldap
import requests
import responses
import ujson
from bs4 import BeautifulSoup
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from django.conf import settings
from django.contrib.auth import authenticate
from django.core import mail
from django.http import HttpRequest, HttpResponse
from django.test import override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from django_auth_ldap.backend import LDAPSearch, _LDAPUser
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.response import OneLogin_Saml2_Response
from social_core.exceptions import AuthFailed, AuthStateForbidden
from social_django.storage import BaseDjangoStorage
from social_django.strategy import DjangoStrategy
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.actions import (
do_create_realm,
do_create_user,
do_deactivate_realm,
do_deactivate_user,
do_invite_users,
do_reactivate_realm,
do_reactivate_user,
do_set_realm_property,
ensure_stream,
)
from zerver.lib.avatar import avatar_url
from zerver.lib.avatar_hash import user_avatar_path
from zerver.lib.dev_ldap_directory import generate_dev_ldap_dir
from zerver.lib.email_validation import (
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.exceptions import RateLimited
from zerver.lib.initial_password import initial_password
from zerver.lib.mobile_auth_otp import otp_decrypt_api_key
from zerver.lib.rate_limiter import add_ratelimit_rule, remove_ratelimit_rule
from zerver.lib.request import JsonableError
from zerver.lib.storage import static_path
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
create_s3_buckets,
get_test_image_file,
load_subdomain_token,
use_s3_backend,
)
from zerver.lib.upload import MEDIUM_AVATAR_SIZE, resize_avatar
from zerver.lib.users import get_all_api_keys
from zerver.lib.utils import generate_random_token
from zerver.lib.validator import (
Validator,
check_bool,
check_dict_only,
check_int,
check_list,
check_string,
validate_login_email,
)
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
MultiuseInvite,
PasswordTooWeakError,
PreregistrationUser,
Realm,
RealmDomain,
UserProfile,
clear_supported_auth_backends_cache,
email_to_username,
get_realm,
get_user_by_delivery_email,
)
from zerver.signals import JUST_CREATED_THRESHOLD
from zerver.views.auth import maybe_send_to_registration
from zproject.backends import (
AUTH_BACKEND_NAME_MAP,
AppleAuthBackend,
AzureADAuthBackend,
DevAuthBackend,
EmailAuthBackend,
ExternalAuthDataDict,
ExternalAuthResult,
GitHubAuthBackend,
GitLabAuthBackend,
GoogleAuthBackend,
PopulateUserLDAPError,
RateLimitedAuthenticationByUsername,
SAMLAuthBackend,
SocialAuthMixin,
ZulipAuthMixin,
ZulipDummyBackend,
ZulipLDAPAuthBackend,
ZulipLDAPConfigurationError,
ZulipLDAPException,
ZulipLDAPExceptionNoMatchingLDAPUser,
ZulipLDAPExceptionOutsideDomain,
ZulipLDAPUser,
ZulipLDAPUserPopulator,
ZulipRemoteUserBackend,
apple_auth_enabled,
check_password_strength,
dev_auth_enabled,
email_belongs_to_ldap,
get_external_method_dicts,
github_auth_enabled,
gitlab_auth_enabled,
google_auth_enabled,
password_auth_enabled,
query_ldap,
require_email_format_usernames,
saml_auth_enabled,
sync_user_from_ldap,
)
class AuthBackendTest(ZulipTestCase):
def get_username(self, email_to_username: Optional[Callable[[str], str]]=None) -> str:
username = self.example_email('hamlet')
if email_to_username is not None:
username = email_to_username(self.example_email('hamlet'))
return username
def verify_backend(self, backend: Any, *, good_kwargs: Dict[str, Any], bad_kwargs: Optional[Dict[str, Any]]=None) -> None:
clear_supported_auth_backends_cache()
user_profile = self.example_user('hamlet')
# If bad_kwargs was specified, verify auth fails in that case
if bad_kwargs is not None:
self.assertIsNone(backend.authenticate(**bad_kwargs))
# Verify auth works
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated user
do_deactivate_user(user_profile)
result = backend.authenticate(**good_kwargs)
if isinstance(backend, SocialAuthMixin):
# Returns a redirect to login page with an error.
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/?is_deactivated=true")
else:
# Just takes you back to the login page treating as
# invalid auth; this is correct because the form will
# provide the appropriate validation error for deactivated
# account.
self.assertIsNone(result)
# Reactivate the user and verify auth works again
do_reactivate_user(user_profile)
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated realm
do_deactivate_realm(user_profile.realm)
self.assertIsNone(backend.authenticate(**good_kwargs))
# Verify auth works again after reactivating the realm
do_reactivate_realm(user_profile.realm)
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# ZulipDummyBackend isn't a real backend so the remainder
# doesn't make sense for it
if isinstance(backend, ZulipDummyBackend):
return
# Verify auth fails if the auth backend is disabled on server
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',)):
clear_supported_auth_backends_cache()
self.assertIsNone(backend.authenticate(**good_kwargs))
clear_supported_auth_backends_cache()
# Verify auth fails if the auth backend is disabled for the realm
for backend_name in AUTH_BACKEND_NAME_MAP.keys():
if isinstance(backend, AUTH_BACKEND_NAME_MAP[backend_name]):
break
index = getattr(user_profile.realm.authentication_methods, backend_name).number
user_profile.realm.authentication_methods.set_bit(index, False)
user_profile.realm.save()
if 'realm' in good_kwargs:
# Because this test is a little unfaithful to the ordering
# (i.e. we fetched the realm object before this function
# was called, when in fact it should be fetched after we
# changed the allowed authentication methods), we need to
# propagate the changes we just made to the actual realm
# object in good_kwargs.
good_kwargs['realm'] = user_profile.realm
self.assertIsNone(backend.authenticate(**good_kwargs))
user_profile.realm.authentication_methods.set_bit(index, True)
user_profile.realm.save()
def test_dummy_backend(self) -> None:
realm = get_realm("zulip")
username = self.get_username()
self.verify_backend(ZulipDummyBackend(),
good_kwargs=dict(username=username,
realm=realm,
use_dummy_backend=True),
bad_kwargs=dict(username=username,
realm=realm,
use_dummy_backend=False))
def setup_subdomain(self, user_profile: UserProfile) -> None:
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
def test_email_auth_backend(self) -> None:
username = self.get_username()
user_profile = self.example_user('hamlet')
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
with mock.patch('zproject.backends.email_auth_enabled',
return_value=False), \
mock.patch('zproject.backends.password_auth_enabled',
return_value=True):
return_data: Dict[str, bool] = {}
user = EmailAuthBackend().authenticate(request=mock.MagicMock(),
username=user_profile.delivery_email,
realm=get_realm("zulip"),
password=password,
return_data=return_data)
self.assertEqual(user, None)
self.assertTrue(return_data['email_auth_disabled'])
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm('zulip'),
return_data=dict()),
bad_kwargs=dict(request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm('zephyr'),
return_data=dict()))
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm('zulip'),
return_data=dict()),
bad_kwargs=dict(request=mock.MagicMock(),
password=password,
username=username,
realm=get_realm('zephyr'),
return_data=dict()))
def test_email_auth_backend_empty_password(self) -> None:
user_profile = self.example_user('hamlet')
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# First, verify authentication works with the a nonempty
# password so we know we've set up the test correctly.
self.assertIsNotNone(EmailAuthBackend().authenticate(request=mock.MagicMock(),
username=self.example_email('hamlet'),
password=password,
realm=get_realm("zulip")))
# Now do the same test with the empty string as the password.
password = ""
with self.assertRaises(PasswordTooWeakError):
# UserProfile.set_password protects against setting an empty password.
user_profile.set_password(password)
# We do want to force an empty password for this test, so we bypass the protection
# by using Django's version of this method.
super(UserProfile, user_profile).set_password(password)
user_profile.save()
self.assertIsNone(EmailAuthBackend().authenticate(request=mock.MagicMock(),
username=self.example_email('hamlet'),
password=password,
realm=get_realm("zulip")))
def test_email_auth_backend_disabled_password_auth(self) -> None:
user_profile = self.example_user('hamlet')
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# Verify if a realm has password auth disabled, correct password is rejected
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
self.assertIsNone(EmailAuthBackend().authenticate(request=mock.MagicMock(),
username=self.example_email('hamlet'),
password=password,
realm=get_realm("zulip")))
def test_login_preview(self) -> None:
# Test preview=true displays organization login page
# instead of redirecting to app
self.login('iago')
realm = get_realm("zulip")
result = self.client_get('/login/?preview=true')
self.assertEqual(result.status_code, 200)
self.assert_in_response(realm.description, result)
self.assert_in_response(realm.name, result)
self.assert_in_response("Log in to Zulip", result)
data = dict(description=ujson.dumps("New realm description"),
name=ujson.dumps("New Zulip"))
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
result = self.client_get('/login/?preview=true')
self.assertEqual(result.status_code, 200)
self.assert_in_response("New realm description", result)
self.assert_in_response("New Zulip", result)
result = self.client_get('/login/')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, 'http://zulip.testserver')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',))
def test_no_backend_enabled(self) -> None:
result = self.client_get('/login/')
self.assert_in_success_response(["No authentication backends are enabled"], result)
result = self.client_get('/register/')
self.assert_in_success_response(["No authentication backends are enabled"], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleAuthBackend',))
def test_any_backend_enabled(self) -> None:
# testing to avoid false error messages.
result = self.client_get('/login/')
self.assert_not_in_success_response(["No authentication backends are enabled"], result)
result = self.client_get('/register/')
self.assert_not_in_success_response(["No authentication backends are enabled"], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
LDAP_EMAIL_ATTR="mail")
def test_ldap_backend(self) -> None:
self.init_default_ldap_database()
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
password = self.ldap_password('hamlet')
self.setup_subdomain(user_profile)
username = self.get_username()
backend = ZulipLDAPAuthBackend()
# Test LDAP auth fails when LDAP server rejects password
self.assertIsNone(backend.authenticate(request=mock.MagicMock(), username=email,
password="wrongpass", realm=get_realm("zulip")))
self.verify_backend(backend,
bad_kwargs=dict(request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm('zephyr')),
good_kwargs=dict(request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm('zulip')))
self.verify_backend(backend,
bad_kwargs=dict(request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm('zephyr')),
good_kwargs=dict(request=mock.MagicMock(),
username=username,
password=password,
realm=get_realm('zulip')))
def test_devauth_backend(self) -> None:
self.verify_backend(DevAuthBackend(),
good_kwargs=dict(dev_auth_username=self.get_username(),
realm=get_realm("zulip")),
bad_kwargs=dict(dev_auth_username=self.get_username(),
realm=get_realm("zephyr")))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend(self) -> None:
username = self.get_username()
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username,
realm=get_realm('zulip')),
bad_kwargs=dict(remote_user=username,
realm=get_realm('zephyr')))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend_invalid_realm(self) -> None:
username = self.get_username()
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username,
realm=get_realm('zulip')),
bad_kwargs=dict(remote_user=username,
realm=get_realm('zephyr')))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
@override_settings(SSO_APPEND_DOMAIN='zulip.com')
def test_remote_user_backend_sso_append_domain(self) -> None:
username = self.get_username(email_to_username)
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username,
realm=get_realm("zulip")),
bad_kwargs=dict(remote_user=username,
realm=get_realm('zephyr')))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleAuthBackend'))
def test_social_auth_backends(self) -> None:
user = self.example_user('hamlet')
token_data_dict = {
'access_token': 'foobar',
'token_type': 'bearer',
}
github_email_data = [
dict(email=user.delivery_email,
verified=True,
primary=True),
dict(email="nonprimary@zulip.com",
verified=True),
dict(email="ignored@example.com",
verified=False),
]
google_email_data = dict(email=user.delivery_email,
name=user.full_name,
email_verified=True)
backends_to_test: Dict[str, Any] = {
'google': {
'urls': [
# The limited process that we test here doesn't require mocking any urls.
],
'backend': GoogleAuthBackend,
},
'github': {
'urls': [
{
'url': "https://api.github.com/user/emails",
'method': responses.GET,
'status': 200,
'body': json.dumps(github_email_data),
},
],
'backend': GitHubAuthBackend,
},
}
def patched_authenticate(**kwargs: Any) -> Any:
# This is how we pass the subdomain to the authentication
# backend in production code, so we need to do this setup
# here.
if 'subdomain' in kwargs:
backend.strategy.session_set("subdomain", kwargs["subdomain"])
del kwargs['subdomain']
# Because we're not simulating the full python-social-auth
# pipeline here, we need to provide the user's choice of
# which email to select in the partial phase of the
# pipeline when we display an email picker for the GitHub
# authentication backend. We do that here.
def return_email() -> Dict[str, str]:
return {'email': user.delivery_email}
backend.strategy.request_data = return_email
result = orig_authenticate(backend, **kwargs)
return result
def patched_get_verified_emails(*args: Any, **kwargs: Any) -> Any:
return google_email_data['email']
for backend_name in backends_to_test:
with responses.RequestsMock(assert_all_requests_are_fired=True) as requests_mock:
urls: List[Dict[str, Any]] = backends_to_test[backend_name]['urls']
for details in urls:
requests_mock.add(
details['method'],
details['url'],
status=details['status'],
body=details['body'])
backend_class = backends_to_test[backend_name]['backend']
backend = backend_class()
backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
orig_authenticate = backend_class.authenticate
backend.authenticate = patched_authenticate
orig_get_verified_emails = backend_class.get_verified_emails
if backend_name == "google":
backend.get_verified_emails = patched_get_verified_emails
good_kwargs = dict(backend=backend, strategy=backend.strategy,
storage=backend.strategy.storage,
response=token_data_dict,
subdomain='zulip')
bad_kwargs = dict(subdomain='acme')
with mock.patch('zerver.views.auth.redirect_and_log_into_subdomain',
return_value=user):
self.verify_backend(backend,
good_kwargs=good_kwargs,
bad_kwargs=bad_kwargs)
bad_kwargs['subdomain'] = "zephyr"
self.verify_backend(backend,
good_kwargs=good_kwargs,
bad_kwargs=bad_kwargs)
backend.authenticate = orig_authenticate
backend.get_verified_emails = orig_get_verified_emails
class RateLimitAuthenticationTests(ZulipTestCase):
@override_settings(RATE_LIMITING_AUTHENTICATE=True)
def do_test_auth_rate_limiting(self,
attempt_authentication_func: Callable[[HttpRequest, str, str],
Optional[UserProfile]],
username: str, correct_password: str, wrong_password: str,
expected_user_profile: UserProfile) -> None:
# We have to mock RateLimitedAuthenticationByUsername.key to avoid key collisions
# if tests run in parallel.
original_key_method = RateLimitedAuthenticationByUsername.key
salt = generate_random_token(32)
def _mock_key(self: RateLimitedAuthenticationByUsername) -> str:
return f"{salt}:{original_key_method(self)}"
def attempt_authentication(username: str, password: str) -> Optional[UserProfile]:
request = HttpRequest()
return attempt_authentication_func(request, username, password)
add_ratelimit_rule(10, 2, domain='authenticate_by_username')
with mock.patch.object(RateLimitedAuthenticationByUsername, 'key', new=_mock_key):
try:
start_time = time.time()
with mock.patch('time.time', return_value=start_time):
self.assertIsNone(attempt_authentication(username, wrong_password))
self.assertIsNone(attempt_authentication(username, wrong_password))
# 2 failed attempts is the limit, so the next ones should get blocked,
# even with the correct password.
with self.assertRaises(RateLimited):
attempt_authentication(username, correct_password)
with self.assertRaises(RateLimited):
attempt_authentication(username, wrong_password)
# After enough time passes, more authentication attempts can be made:
with mock.patch('time.time', return_value=start_time + 11.0):
self.assertIsNone(attempt_authentication(username, wrong_password))
self.assertEqual(attempt_authentication(username, correct_password), expected_user_profile) # Correct password
# A correct login attempt should reset the rate limits for this user profile,
# so the next two attempts shouldn't get limited:
self.assertIsNone(attempt_authentication(username, wrong_password))
self.assertIsNone(attempt_authentication(username, wrong_password))
# But the third attempt goes over the limit:
with self.assertRaises(RateLimited):
attempt_authentication(username, wrong_password)
finally:
# Clean up to avoid affecting other tests.
RateLimitedAuthenticationByUsername(username).clear_history()
remove_ratelimit_rule(10, 2, domain='authenticate_by_username')
def test_email_auth_backend_user_based_rate_limiting(self) -> None:
user_profile = self.example_user('hamlet')
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
def attempt_authentication(request: HttpRequest, username: str, password: str) -> Optional[UserProfile]:
return EmailAuthBackend().authenticate(request=request,
username=username,
realm=get_realm("zulip"),
password=password,
return_data=dict())
self.do_test_auth_rate_limiting(attempt_authentication,
user_profile.delivery_email,
password, 'wrong_password',
user_profile)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
LDAP_EMAIL_ATTR="mail")
def test_ldap_backend_user_based_rate_limiting(self) -> None:
self.init_default_ldap_database()
user_profile = self.example_user('hamlet')
password = self.ldap_password('hamlet')
def attempt_authentication(request: HttpRequest, username: str, password: str) -> Optional[UserProfile]:
return ZulipLDAPAuthBackend().authenticate(request=request,
username=username,
realm=get_realm("zulip"),
password=password,
return_data=dict())
self.do_test_auth_rate_limiting(attempt_authentication,
user_profile.delivery_email,
password, 'wrong_password',
user_profile)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend'),
LDAP_EMAIL_ATTR="mail")
def test_email_and_ldap_backends_user_based_rate_limiting(self) -> None:
self.init_default_ldap_database()
user_profile = self.example_user('hamlet')
ldap_password = self.ldap_password('hamlet')
email_password = "email_password"
user_profile.set_password(email_password)
user_profile.save()
def attempt_authentication(request: HttpRequest, username: str, password: str) -> Optional[UserProfile]:
return authenticate(request=request,
username=username,
realm=get_realm("zulip"),
password=password,
return_data=dict())
self.do_test_auth_rate_limiting(attempt_authentication,
user_profile.delivery_email,
email_password, 'wrong_password',
user_profile)
self.do_test_auth_rate_limiting(attempt_authentication,
user_profile.delivery_email,
ldap_password, 'wrong_password',
user_profile)
class CheckPasswordStrengthTest(ZulipTestCase):
def test_check_password_strength(self) -> None:
with self.settings(PASSWORD_MIN_LENGTH=0, PASSWORD_MIN_GUESSES=0):
# Never allow empty password.
self.assertFalse(check_password_strength(''))
with self.settings(PASSWORD_MIN_LENGTH=6, PASSWORD_MIN_GUESSES=1000):
self.assertFalse(check_password_strength(''))
self.assertFalse(check_password_strength('short'))
# Long enough, but too easy:
self.assertFalse(check_password_strength('longer'))
# Good password:
self.assertTrue(check_password_strength('f657gdGGk9'))
class DesktopFlowTestingLib(ZulipTestCase):
def verify_desktop_flow_app_page(self, response: HttpResponse) -> None:
self.assertEqual(response.status_code, 200)
self.assertIn(b"<h1>Finish desktop login</h1>", response.content)
def verify_desktop_flow_end_page(self, response: HttpResponse, email: str,
desktop_flow_otp: str) -> None:
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, "html.parser")
desktop_data = soup.find("input", value=True)["value"]
browser_url = soup.find("a", href=True)["href"]
decrypted_key = self.verify_desktop_data_and_return_key(desktop_data, desktop_flow_otp)
self.assertEqual(browser_url, f'http://zulip.testserver/accounts/login/subdomain/{decrypted_key}')
result = self.client_get(browser_url)
self.assertEqual(result.status_code, 302)
realm = get_realm("zulip")
user_profile = get_user_by_delivery_email(email, realm)
self.assert_logged_in_user_id(user_profile.id)
def verify_desktop_data_and_return_key(self, desktop_data: str, desktop_flow_otp: str) -> str:
key = bytes.fromhex(desktop_flow_otp)
data = bytes.fromhex(desktop_data)
iv = data[:12]
ciphertext = data[12:]
return AESGCM(key).decrypt(iv, ciphertext, b"").decode()
class SocialAuthBase(DesktopFlowTestingLib, ZulipTestCase):
"""This is a base class for testing social-auth backends. These
methods are often overridden by subclasses:
register_extra_endpoints() - If the backend being tested calls some extra
endpoints then they can be added here.
get_account_data_dict() - Return the data returned by the user info endpoint
according to the respective backend.
"""
# Don't run base class tests, make sure to set it to False
# in subclass otherwise its tests will not run.
__unittest_skip__ = True
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.delivery_email
self.name = self.user_profile.full_name
self.backend = self.BACKEND_CLASS
self.backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
self.user_profile.backend = self.backend
self.logger_string = f'zulip.auth.{self.backend.name}'
# This is a workaround for the fact that Python social auth
# caches the set of authentication backends that are enabled
# the first time that `social_django.utils` is imported. See
# https://github.com/python-social-auth/social-app-django/pull/162
# for details.
from social_core.backends.utils import load_backends
load_backends(settings.AUTHENTICATION_BACKENDS, force_load=True)
def logger_output(self, output_string: str, type: str) -> str:
return f'{type.upper()}:zulip.auth.{self.backend.name}:{output_string}'
def register_extra_endpoints(self, requests_mock: responses.RequestsMock,
account_data_dict: Dict[str, str],
**extra_data: Any) -> None:
pass
def prepare_login_url_and_headers(
self,
subdomain: Optional[str]=None,
mobile_flow_otp: Optional[str]=None,
desktop_flow_otp: Optional[str]=None,
is_signup: bool=False,
next: str='',
multiuse_object_key: str='',
alternative_start_url: Optional[str]=None,
*,
user_agent: Optional[str]=None,
) -> Tuple[str, Dict[str, Any]]:
url = self.LOGIN_URL
if alternative_start_url is not None:
url = alternative_start_url
params = {}
headers = {}
if subdomain is not None:
headers['HTTP_HOST'] = subdomain + ".testserver"
if mobile_flow_otp is not None:
params['mobile_flow_otp'] = mobile_flow_otp
headers['HTTP_USER_AGENT'] = "ZulipAndroid"
if desktop_flow_otp is not None:
params['desktop_flow_otp'] = desktop_flow_otp
if is_signup:
url = self.SIGNUP_URL
params['next'] = next
params['multiuse_object_key'] = multiuse_object_key
if len(params) > 0:
url += f"?{urllib.parse.urlencode(params)}"
if user_agent is not None:
headers['HTTP_USER_AGENT'] = user_agent
return url, headers
def social_auth_test_finish(self, result: HttpResponse,
account_data_dict: Dict[str, str],
expect_choose_email_screen: bool,
**headers: Any) -> HttpResponse:
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)['state']
result = self.client_get(self.AUTH_FINISH_URL,
dict(state=csrf_state), **headers)
return result
def generate_access_url_payload(self, account_data_dict: Dict[str, str]) -> str:
return json.dumps({
'access_token': 'foobar',
'token_type': 'bearer',
})
def social_auth_test(self, account_data_dict: Dict[str, str],
*, subdomain: Optional[str]=None,
mobile_flow_otp: Optional[str]=None,
desktop_flow_otp: Optional[str]=None,
is_signup: bool=False,
next: str='',
multiuse_object_key: str='',
expect_choose_email_screen: bool=False,
alternative_start_url: Optional[str]=None,
user_agent: Optional[str]=None,
**extra_data: Any) -> HttpResponse:
"""Main entrypoint for all social authentication tests.
* account_data_dict: Dictionary containing the name/email data
that should be returned by the social auth backend.
* subdomain: Which organization's login page is being accessed.
* desktop_flow_otp / mobile_flow_otp: Token to be used for
mobile or desktop authentication flow testing.
* is_signup: Whether we're testing the social flow for
/register (True) or /login (False). This is important
because we need to verify behavior like the
"Continue to registration" if you try to login using an
account that doesn't exist but is allowed to signup.
* next: Parameter passed through in production authentication
to redirect the user to (e.g.) the specific page in the webapp
that they clicked a link to before being presented with the login
page.
* expect_choose_email_screen: Some social auth backends, like
GitHub, simultaneously authenticate for multiple email addresses.
Set this to True if we expect to show the "Choose Email" screen
in this test should the backend have that feature.
* multiuse_object_key: Used when the user has clicked a multi-use
reusable invitation link.
* alternative_start_url: Used to test legacy mobile app behavior.
* user_agent: What user-agent to use for the HTTP requests.
"""
url, headers = self.prepare_login_url_and_headers(
subdomain, mobile_flow_otp, desktop_flow_otp, is_signup, next,
multiuse_object_key, alternative_start_url,
user_agent=user_agent,
)
result = self.client_get(url, **headers)
expected_result_url_prefix = f'http://testserver/login/{self.backend.name}/'
if settings.SOCIAL_AUTH_SUBDOMAIN is not None:
expected_result_url_prefix = f'http://{settings.SOCIAL_AUTH_SUBDOMAIN}.testserver/login/{self.backend.name}/'
if result.status_code != 302 or not result.url.startswith(expected_result_url_prefix):
return result
result = self.client_get(result.url, **headers)
self.assertEqual(result.status_code, 302)
assert self.AUTHORIZATION_URL in result.url
self.client.cookies = result.cookies
# Next, the browser requests result["Location"], and gets
# redirected back to the registered redirect uri.
# We register callbacks for the key URLs on Identity Provider that
# auth completion url will call
with responses.RequestsMock(assert_all_requests_are_fired=False) as requests_mock:
requests_mock.add(
requests_mock.POST,
self.ACCESS_TOKEN_URL,
match_querystring=False,
status=200,
body=self.generate_access_url_payload(account_data_dict))
requests_mock.add(
requests_mock.GET,
self.USER_INFO_URL,
status=200,
body=json.dumps(account_data_dict),
)
self.register_extra_endpoints(requests_mock, account_data_dict, **extra_data)
result = self.social_auth_test_finish(result, account_data_dict,
expect_choose_email_screen,
**headers)
return result
def test_social_auth_no_key(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.settings(**{self.CLIENT_KEY_SETTING: None}):
result = self.social_auth_test(account_data_dict,
subdomain='zulip', next='/user_uploads/image')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, self.CONFIG_ERROR_URL)
def test_config_error_development(self) -> None:
if hasattr(self, 'CLIENT_KEY_SETTING') and hasattr(self, 'CLIENT_SECRET_SETTING'):
with self.settings(**{self.CLIENT_KEY_SETTING: None}):
result = self.client_get(self.LOGIN_URL)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, self.CONFIG_ERROR_URL)
result = self.client_get(result.url)
self.assert_in_success_response([self.CLIENT_KEY_SETTING.lower()], result)
self.assert_in_success_response([self.CLIENT_SECRET_SETTING.lower()], result)
self.assert_in_success_response(["zproject/dev-secrets.conf"], result)
self.assert_not_in_success_response([self.CLIENT_KEY_SETTING], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_not_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
@override_settings(DEVELOPMENT=False)
def test_config_error_production(self) -> None:
if hasattr(self, 'CLIENT_KEY_SETTING') and hasattr(self, 'CLIENT_SECRET_SETTING'):
with self.settings(**{self.CLIENT_KEY_SETTING: None}):
result = self.client_get(self.LOGIN_URL)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, self.CONFIG_ERROR_URL)
result = self.client_get(result.url)
self.assert_in_success_response([self.CLIENT_KEY_SETTING], result)
self.assert_in_success_response(["/etc/zulip/settings.py"], result)
self.assert_in_success_response([self.CLIENT_SECRET_SETTING.lower()], result)
self.assert_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
self.assert_not_in_success_response([self.CLIENT_KEY_SETTING.lower()], result)
self.assert_not_in_success_response(["zproject/dev_settings.py"], result)
self.assert_not_in_success_response(["zproject/dev-secrets.conf"], result)
def test_social_auth_success(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=False,
subdomain='zulip', next='/user_uploads/image')
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(data['redirect_to'], '/user_uploads/image')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
@override_settings(SOCIAL_AUTH_SUBDOMAIN=None)
def test_when_social_auth_subdomain_is_not_set(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
expect_choose_email_screen=False,
next='/user_uploads/image')
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(data['redirect_to'], '/user_uploads/image')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_social_auth_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
do_deactivate_user(user_profile)
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
# We expect to go through the "choose email" screen here,
# because there won't be an existing user account we can
# auto-select for the user.
with self.assertLogs(self.logger_string, level='INFO') as m:
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zulip')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/?is_deactivated=true")
self.assertEqual(m.output, [self.logger_output(f"Failed login attempt for deactivated account: {user_profile.id}@zulip", 'info')])
# TODO: verify whether we provide a clear error message
def test_social_auth_invalid_realm(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch('zerver.middleware.get_realm', return_value=get_realm("zulip")):
# This mock.patch case somewhat hackishly arranges it so
# that we switch realms halfway through the test
result = self.social_auth_test(account_data_dict,
subdomain='invalid', next='/user_uploads/image')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/")
def test_social_auth_invalid_email(self) -> None:
account_data_dict = self.get_account_data_dict(email="invalid", name=self.name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zulip', next='/user_uploads/image')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/?next=/user_uploads/image")
def test_user_cannot_log_into_nonexisting_realm(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(account_data_dict,
subdomain='nonexistent')
self.assert_in_response("There is no Zulip organization hosted at this subdomain.",
result)
self.assertEqual(result.status_code, 404)
def test_user_cannot_log_into_wrong_subdomain(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zephyr')
self.assertTrue(result.url.startswith("http://zephyr.testserver/accounts/login/subdomain/"))
result = self.client_get(result.url.replace('http://zephyr.testserver', ''),
subdomain="zephyr")
self.assert_in_success_response(['Your email address, hamlet@zulip.com, is not in one of the domains ',
'that are allowed to register for accounts in this organization.'], result)
def test_social_auth_mobile_success(self) -> None:
mobile_flow_otp = '1234abcd' * 8
account_data_dict = self.get_account_data_dict(email=self.email, name='Full Name')
self.assertEqual(len(mail.outbox), 0)
self.user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=JUST_CREATED_THRESHOLD + 1)
self.user_profile.save()
with self.settings(SEND_LOGIN_EMAILS=True):
# Verify that the right thing happens with an invalid-format OTP
result = self.social_auth_test(account_data_dict, subdomain='zulip',
mobile_flow_otp="1234")
self.assert_json_error(result, "Invalid OTP")
result = self.social_auth_test(account_data_dict, subdomain='zulip',
mobile_flow_otp="invalido" * 8)
self.assert_json_error(result, "Invalid OTP")
# Now do it correctly
result = self.social_auth_test(account_data_dict, subdomain='zulip',
expect_choose_email_screen=False,
mobile_flow_otp=mobile_flow_otp)
self.assertEqual(result.status_code, 302)
redirect_url = result['Location']
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, 'zulip')
self.assertEqual(query_params["realm"], ['http://zulip.testserver'])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user('hamlet'))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Zulip on Android', mail.outbox[0].body)
def test_social_auth_desktop_success(self) -> None:
desktop_flow_otp = '1234abcd' * 8
account_data_dict = self.get_account_data_dict(email=self.email, name='Full Name')
# Verify that the right thing happens with an invalid-format OTP
result = self.social_auth_test(account_data_dict, subdomain='zulip',
desktop_flow_otp="1234")
self.assert_json_error(result, "Invalid OTP")
result = self.social_auth_test(account_data_dict, subdomain='zulip',
desktop_flow_otp="invalido" * 8)
self.assert_json_error(result, "Invalid OTP")
# Now do it correctly
result = self.social_auth_test(
account_data_dict,
subdomain='zulip',
expect_choose_email_screen=False,
desktop_flow_otp=desktop_flow_otp,
user_agent="ZulipElectron/5.0.0",
)
self.verify_desktop_flow_app_page(result)
result = self.social_auth_test(account_data_dict, subdomain='zulip',
expect_choose_email_screen=False,
desktop_flow_otp=desktop_flow_otp)
self.verify_desktop_flow_end_page(result, self.email, desktop_flow_otp)
def test_social_auth_session_fields_cleared_correctly(self) -> None:
mobile_flow_otp = '1234abcd' * 8
def initiate_auth(mobile_flow_otp: Optional[str]=None) -> None:
url, headers = self.prepare_login_url_and_headers(subdomain='zulip',
mobile_flow_otp=mobile_flow_otp)
result = self.client_get(url, **headers)
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url, **headers)
self.assertEqual(result.status_code, 302)
# Start social auth with mobile_flow_otp param. It should get saved into the session
# on SOCIAL_AUTH_SUBDOMAIN.
initiate_auth(mobile_flow_otp)
self.assertEqual(self.client.session['mobile_flow_otp'], mobile_flow_otp)
# Make a request without mobile_flow_otp param and verify the field doesn't persist
# in the session from the previous request.
initiate_auth()
self.assertEqual(self.client.session.get('mobile_flow_otp'), None)
def test_social_auth_mobile_and_desktop_flow_in_one_request_error(self) -> None:
otp = '1234abcd' * 8
account_data_dict = self.get_account_data_dict(email=self.email, name='Full Name')
result = self.social_auth_test(account_data_dict, subdomain='zulip',
expect_choose_email_screen=False,
desktop_flow_otp=otp, mobile_flow_otp=otp)
self.assert_json_error(result, "Can't use both mobile_flow_otp and desktop_flow_otp together.")
def test_social_auth_registration_existing_account(self) -> None:
"""If the user already exists, signup flow just logs them in"""
email = "hamlet@zulip.com"
name = 'Full Name'
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zulip', is_signup=True)
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
# Verify data has the full_name consistent with the user we're logging in as.
self.assertEqual(data['full_name'], self.example_user("hamlet").full_name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
hamlet = self.example_user("hamlet")
# Name wasn't changed at all
self.assertEqual(hamlet.full_name, "King Hamlet")
def stage_two_of_registration(self, result: HttpResponse, realm: Realm, subdomain: str,
email: str, name: str, expected_final_name: str,
skip_registration_form: bool,
mobile_flow_otp: Optional[str]=None,
desktop_flow_otp: Optional[str]=None,
expect_confirm_registration_page: bool=False,
expect_full_name_prepopulated: bool=True) -> None:
data = load_subdomain_token(result)
self.assertEqual(data['email'], email)
self.assertEqual(data['full_name'], name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
result = self.client_get(result.url)
if expect_confirm_registration_page:
self.assertEqual(result.status_code, 200)
else:
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().last()
confirmation_key = confirmation.confirmation_key
if expect_confirm_registration_page:
self.assert_in_success_response(['do_confirm/' + confirmation_key], result)
do_confirm_url = '/accounts/do_confirm/' + confirmation_key
else:
self.assertIn('do_confirm/' + confirmation_key, result.url)
do_confirm_url = result.url
result = self.client_get(do_confirm_url, name = name)
self.assert_in_response('action="/accounts/register/"', result)
confirmation_data = {"from_confirmation": "1",
"key": confirmation_key}
result = self.client_post('/accounts/register/', confirmation_data)
if not skip_registration_form:
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
if expect_full_name_prepopulated:
# Verify the name field gets correctly pre-populated:
self.assert_in_success_response([expected_final_name], result)
# Click confirm registration button.
result = self.client_post(
'/accounts/register/',
{'full_name': expected_final_name,
'key': confirmation_key,
'terms': True})
# Mobile and desktop flow have additional steps:
if mobile_flow_otp:
self.assertEqual(result.status_code, 302)
redirect_url = result['Location']
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, 'zulip')
self.assertEqual(query_params["realm"], ['http://zulip.testserver'])
self.assertEqual(query_params["email"], [email])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
user_api_keys = get_all_api_keys(get_user_by_delivery_email(email, realm))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), user_api_keys)
return
elif desktop_flow_otp:
self.verify_desktop_flow_end_page(result, email, desktop_flow_otp)
# Now the desktop app is logged in, continue with the logged in check.
else:
self.assertEqual(result.status_code, 302)
user_profile = get_user_by_delivery_email(email, realm)
self.assert_logged_in_user_id(user_profile.id)
self.assertEqual(user_profile.full_name, expected_final_name)
self.assertFalse(user_profile.has_usable_password())
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_registration(self) -> None:
"""If the user doesn't exist yet, social auth can be used to register an account"""
email = "newuser@zulip.com"
name = 'Full Name'
subdomain = 'zulip'
realm = get_realm("zulip")
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
self.BACKEND_CLASS.full_name_validated)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_mobile_registration(self) -> None:
email = "newuser@zulip.com"
name = 'Full Name'
subdomain = 'zulip'
realm = get_realm("zulip")
mobile_flow_otp = '1234abcd' * 8
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict, subdomain='zulip',
expect_choose_email_screen=True,
is_signup=True,
mobile_flow_otp=mobile_flow_otp)
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
self.BACKEND_CLASS.full_name_validated,
mobile_flow_otp=mobile_flow_otp)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_desktop_registration(self) -> None:
email = "newuser@zulip.com"
name = 'Full Name'
subdomain = 'zulip'
realm = get_realm("zulip")
desktop_flow_otp = '1234abcd' * 8
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict, subdomain='zulip',
expect_choose_email_screen=True,
is_signup=True,
desktop_flow_otp=desktop_flow_otp)
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
self.BACKEND_CLASS.full_name_validated,
desktop_flow_otp=desktop_flow_otp)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_registration_invitation_exists(self) -> None:
"""
This tests the registration flow in the case where an invitation for the user
was generated.
"""
email = "newuser@zulip.com"
name = 'Full Name'
subdomain = 'zulip'
realm = get_realm("zulip")
iago = self.example_user("iago")
do_invite_users(iago, [email], [])
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
self.BACKEND_CLASS.full_name_validated)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_registration_using_multiuse_invite(self) -> None:
"""If the user doesn't exist yet, social auth can be used to register an account"""
email = "newuser@zulip.com"
name = 'Full Name'
subdomain = 'zulip'
realm = get_realm("zulip")
realm.invite_required = True
realm.save()
stream_names = ["new_stream_1", "new_stream_2"]
streams = []
for stream_name in set(stream_names):
stream = ensure_stream(realm, stream_name)
streams.append(stream)
referrer = self.example_user("hamlet")
multiuse_obj = MultiuseInvite.objects.create(realm=realm, referred_by=referrer)
multiuse_obj.streams.set(streams)
create_confirmation_link(multiuse_obj, Confirmation.MULTIUSE_INVITE)
multiuse_confirmation = Confirmation.objects.all().last()
multiuse_object_key = multiuse_confirmation.confirmation_key
account_data_dict = self.get_account_data_dict(email=email, name=name)
# First, try to signup for closed realm without using an invitation
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
result = self.client_get(result.url)
# Verify that we're unable to signup, since this is a closed realm
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Sign up"], result)
result = self.social_auth_test(account_data_dict, subdomain=subdomain, is_signup=True,
expect_choose_email_screen=True,
multiuse_object_key=multiuse_object_key)
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
self.BACKEND_CLASS.full_name_validated)
def test_social_auth_registration_without_is_signup(self) -> None:
"""If `is_signup` is not set then a new account isn't created"""
email = "newuser@zulip.com"
name = 'Full Name'
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zulip')
self.assertEqual(result.status_code, 302)
data = load_subdomain_token(result)
self.assertEqual(data['email'], email)
self.assertEqual(data['full_name'], name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
result = self.client_get(result.url)
self.assertEqual(result.status_code, 200)
self.assert_in_response("No account found for newuser@zulip.com.", result)
def test_social_auth_registration_without_is_signup_closed_realm(self) -> None:
"""If the user doesn't exist yet in closed realm, give an error"""
realm = get_realm("zulip")
do_set_realm_property(realm, "emails_restricted_to_domains", True)
email = "nonexisting@phantom.com"
name = 'Full Name'
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zulip')
self.assertEqual(result.status_code, 302)
data = load_subdomain_token(result)
self.assertEqual(data['email'], email)
self.assertEqual(data['full_name'], name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
result = self.client_get(result.url)
self.assertEqual(result.status_code, 200)
self.assert_in_response('action="/register/"', result)
self.assert_in_response('Your email address, {}, is not '
'in one of the domains that are allowed to register '
'for accounts in this organization.'.format(email), result)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_with_ldap_populate_registration_from_confirmation(self) -> None:
self.init_default_ldap_database()
email = "newuser@zulip.com"
name = "Full Name"
realm = get_realm("zulip")
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'cn'}
account_data_dict = self.get_account_data_dict(email=email, name=name)
backend_path = f'zproject.backends.{self.BACKEND_CLASS.__name__}'
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=(backend_path,
'zproject.backends.ZulipLDAPUserPopulator',
'zproject.backends.ZulipDummyBackend'),
):
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
# Full name should get populated from ldap:
self.stage_two_of_registration(result, realm, subdomain, email, name, "New LDAP fullname",
skip_registration_form=True)
# Now try a user that doesn't exist in ldap:
email = self.nonreg_email("alice")
name = "Alice Social"
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
# Full name should get populated as provided by the social backend, because
# this user isn't in the ldap dictionary:
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
skip_registration_form=self.BACKEND_CLASS.full_name_validated)
@override_settings(TERMS_OF_SERVICE=None)
def test_social_auth_with_ldap_auth_registration_from_confirmation(self) -> None:
"""
This test checks that in configurations that use the ldap authentication backend
and a social backend, it is possible to create non-ldap users via the social backend.
"""
self.init_default_ldap_database()
email = self.nonreg_email("alice")
name = "Alice Social"
realm = get_realm("zulip")
subdomain = "zulip"
ldap_user_attr_map = {'full_name': 'cn'}
account_data_dict = self.get_account_data_dict(email=email, name=name)
backend_path = f'zproject.backends.{self.BACKEND_CLASS.__name__}'
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_EMAIL_ATTR='mail',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=(backend_path,
'zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'),
):
account_data_dict = self.get_account_data_dict(email=email, name=name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
# Full name should get populated as provided by the social backend, because
# this user isn't in the ldap dictionary:
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
skip_registration_form=self.BACKEND_CLASS.full_name_validated)
def test_social_auth_complete(self) -> None:
with mock.patch('social_core.backends.oauth.BaseOAuth2.process_error',
side_effect=AuthFailed('Not found')):
result = self.client_get(reverse('social:complete', args=[self.backend.name]))
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
with mock.patch('social_core.backends.oauth.BaseOAuth2.auth_complete',
side_effect=requests.exceptions.HTTPError):
result = self.client_get(reverse('social:complete', args=[self.backend.name]))
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
def test_social_auth_complete_when_base_exc_is_raised(self) -> None:
with mock.patch('social_core.backends.oauth.BaseOAuth2.auth_complete',
side_effect=AuthStateForbidden('State forbidden')), \
self.assertLogs(self.logger_string, level='WARNING'):
result = self.client_get(reverse('social:complete', args=[self.backend.name]))
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
class SAMLAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = SAMLAuthBackend
LOGIN_URL = "/accounts/login/social/saml/test_idp"
SIGNUP_URL = "/accounts/register/social/saml/test_idp"
AUTHORIZATION_URL = "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO"
AUTH_FINISH_URL = "/complete/saml/"
CONFIG_ERROR_URL = "/config-error/saml"
# We have to define our own social_auth_test as the flow of SAML authentication
# is different from the other social backends.
def social_auth_test(self, account_data_dict: Dict[str, str],
*, subdomain: Optional[str]=None,
mobile_flow_otp: Optional[str]=None,
desktop_flow_otp: Optional[str]=None,
is_signup: bool=False,
next: str='',
multiuse_object_key: str='',
user_agent: Optional[str]=None,
**extra_data: Any) -> HttpResponse:
url, headers = self.prepare_login_url_and_headers(
subdomain,
mobile_flow_otp,
desktop_flow_otp,
is_signup,
next,
multiuse_object_key,
user_agent=user_agent,
)
result = self.client_get(url, **headers)
expected_result_url_prefix = f'http://testserver/login/{self.backend.name}/'
if settings.SOCIAL_AUTH_SUBDOMAIN is not None:
expected_result_url_prefix = (
f'http://{settings.SOCIAL_AUTH_SUBDOMAIN}.testserver/login/{self.backend.name}/'
)
if result.status_code != 302 or not result.url.startswith(expected_result_url_prefix):
return result
result = self.client_get(result.url, **headers)
self.assertEqual(result.status_code, 302)
assert self.AUTHORIZATION_URL in result.url
assert "samlrequest" in result.url.lower()
self.client.cookies = result.cookies
parsed_url = urllib.parse.urlparse(result.url)
relay_state = urllib.parse.parse_qs(parsed_url.query)['RelayState'][0]
# Make sure params are getting encoded into RelayState:
data = SAMLAuthBackend.get_data_from_redis(ujson.loads(relay_state)['state_token'])
if next:
self.assertEqual(data['next'], next)
if is_signup:
self.assertEqual(data['is_signup'], '1')
saml_response = self.generate_saml_response(**account_data_dict)
post_params = {"SAMLResponse": saml_response, "RelayState": relay_state}
# The mock below is necessary, so that python3-saml accepts our SAMLResponse,
# and doesn't verify the cryptographic signatures etc., since generating
# a perfectly valid SAMLResponse for the purpose of these tests would be too complex,
# and we simply use one loaded from a fixture file.
with mock.patch.object(OneLogin_Saml2_Response, 'is_valid', return_value=True):
# We are simulating a cross-domain POST request here. Session is a Lax cookie, meaning
# it won't be sent by the browser in this request. To simulate that effect with the django
# test client, we flush the session before the request.
self.client.session.flush()
result = self.client_post(self.AUTH_FINISH_URL, post_params, **headers)
return result
def generate_saml_response(self, email: str, name: str) -> str:
"""
The samlresponse.txt fixture has a pre-generated SAMLResponse,
with {email}, {first_name}, {last_name} placeholders, that can
be filled out with the data we want.
"""
name_parts = name.split(' ')
first_name = name_parts[0]
last_name = name_parts[1]
unencoded_saml_response = self.fixture_data("samlresponse.txt", type="saml").format(
email=email,
first_name=first_name,
last_name=last_name,
)
# SAMLResponse needs to be base64-encoded.
saml_response: str = base64.b64encode(unencoded_saml_response.encode()).decode()
return saml_response
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name)
def test_social_auth_no_key(self) -> None:
"""
Since in the case of SAML there isn't a direct equivalent of CLIENT_KEY_SETTING,
we override this test, to test for the case where the obligatory
SOCIAL_AUTH_SAML_ENABLED_IDPS isn't configured.
"""
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=None):
result = self.social_auth_test(account_data_dict,
subdomain='zulip', next='/user_uploads/image')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, self.CONFIG_ERROR_URL)
# Test the signup path too:
result = self.social_auth_test(account_data_dict, is_signup=True,
subdomain='zulip', next='/user_uploads/image')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, self.CONFIG_ERROR_URL)
def test_config_error_page(self) -> None:
result = self.client_get("/accounts/login/social/saml")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/saml')
result = self.client_get(result.url)
self.assert_in_success_response(["SAML authentication"], result)
def test_saml_auth_works_without_private_public_keys(self) -> None:
with self.settings(SOCIAL_AUTH_SAML_SP_PUBLIC_CERT='', SOCIAL_AUTH_SAML_SP_PRIVATE_KEY=''):
self.test_social_auth_success()
def test_saml_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.SAMLAuthBackend',)):
self.assertTrue(saml_auth_enabled())
result = self.client_get("/saml/metadata.xml")
self.assert_in_success_response(
[f'entityID="{settings.SOCIAL_AUTH_SAML_SP_ENTITY_ID}"'], result,
)
def test_social_auth_complete(self) -> None:
with mock.patch.object(OneLogin_Saml2_Response, 'is_valid', return_value=True):
with mock.patch.object(OneLogin_Saml2_Auth, 'is_authenticated', return_value=False), \
self.assertLogs(self.logger_string, level='INFO') as m:
# This mock causes AuthFailed to be raised.
saml_response = self.generate_saml_response(self.email, self.name)
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
post_params = {"SAMLResponse": saml_response, "RelayState": relay_state}
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertEqual(m.output, [self.logger_output("AuthFailed: Authentication failed: SAML login failed: [] (None)",
'info')])
def test_social_auth_complete_when_base_exc_is_raised(self) -> None:
with mock.patch.object(OneLogin_Saml2_Response, 'is_valid', return_value=True):
with mock.patch('social_core.backends.saml.SAMLAuth.auth_complete',
side_effect=AuthStateForbidden('State forbidden')), \
self.assertLogs(self.logger_string, level='WARNING') as m:
saml_response = self.generate_saml_response(self.email, self.name)
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
post_params = {"SAMLResponse": saml_response, "RelayState": relay_state}
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertEqual(m.output, [self.logger_output("Wrong state parameter given.", 'warning')])
def test_social_auth_complete_bad_params(self) -> None:
# Simple GET for /complete/saml without the required parameters.
# This tests the auth_complete wrapped in our SAMLAuthBackend,
# ensuring it prevents this requests from causing an internal server error.
with self.assertLogs(self.logger_string, level='INFO') as m:
result = self.client_get('/complete/saml/')
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertEqual(m.output, [self.logger_output("/complete/saml/: No SAMLResponse in request.", 'info')])
# Check that POSTing the RelayState, but with missing SAMLResponse,
# doesn't cause errors either:
with self.assertLogs(self.logger_string, level='INFO') as m:
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
post_params = {"RelayState": relay_state}
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertEqual(m.output, [self.logger_output("/complete/saml/: No SAMLResponse in request.", 'info')])
# Now test bad SAMLResponses.
with self.assertLogs(self.logger_string, level='INFO') as m:
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
post_params = {"RelayState": relay_state, 'SAMLResponse': ''}
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertTrue(m.output != '')
with self.assertLogs(self.logger_string, level='INFO') as m:
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
post_params = {"RelayState": relay_state, 'SAMLResponse': 'b'}
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertTrue(m.output != '')
with self.assertLogs(self.logger_string, level='INFO') as m:
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
post_params = {"RelayState": relay_state, 'SAMLResponse': 'dGVzdA=='} # base64 encoded 'test'
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertTrue(m.output != '')
def test_social_auth_complete_no_subdomain(self) -> None:
with self.assertLogs(self.logger_string, level='INFO') as m:
post_params = {"RelayState": '',
'SAMLResponse': self.generate_saml_response(email=self.example_email("hamlet"),
name="King Hamlet")}
with mock.patch.object(SAMLAuthBackend, 'choose_subdomain', return_value=None):
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual('/login/', result.url)
self.assertEqual(m.output, [self.logger_output(
"/complete/saml/: Can't figure out subdomain for this authentication request. relayed_params: {}".format("{}"),
'info',
)])
def test_social_auth_complete_wrong_issuing_idp(self) -> None:
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
saml_response = self.generate_saml_response(email=self.example_email("hamlet"),
name="King Hamlet")
# We change the entity_id of the configured test IdP, which means it won't match
# the Entity ID in the SAMLResponse generated above.
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict['test_idp']['entity_id'] = 'https://different.idp.example.com/'
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
with self.assertLogs(self.logger_string, level='INFO') as m:
post_params = {"RelayState": relay_state,
"SAMLResponse": saml_response}
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual('/login/', result.url)
self.assertEqual(m.output, [self.logger_output("/complete/saml/: No valid IdP as issuer of the SAMLResponse.", "info")])
def test_social_auth_complete_valid_get_idp_bad_samlresponse(self) -> None:
"""
This tests for a hypothetical scenario where our basic parsing of the SAMLResponse
successfully returns the issuing IdP, but it fails further down the line, during proper
validation in the underlying libraries.
"""
with self.assertLogs(self.logger_string, level='INFO') as m, \
mock.patch.object(SAMLAuthBackend, 'get_issuing_idp', return_value='test_idp'):
relay_state = ujson.dumps(dict(
state_token=SAMLAuthBackend.put_data_in_redis({"subdomain": "zulip"}),
))
post_params = {"RelayState": relay_state, 'SAMLResponse': 'dGVzdA=='}
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertTrue(m.output != '')
def test_social_auth_saml_bad_idp_param_on_login_page(self) -> None:
with self.assertLogs(self.logger_string, level='INFO') as m:
result = self.client_get('/login/saml/')
self.assertEqual(result.status_code, 302)
self.assertEqual('/login/', result.url)
self.assertEqual(m.output, [self.logger_output("/login/saml/ : Bad idp param: KeyError: {}.".format("'idp'"), 'info')])
with self.assertLogs(self.logger_string, level='INFO') as m:
result = self.client_get('/login/saml/?idp=bad_idp')
self.assertEqual(result.status_code, 302)
self.assertEqual('/login/', result.url)
self.assertEqual(m.output, [self.logger_output("/login/saml/ : Bad idp param: KeyError: {}.".format("'bad_idp'"), 'info')])
def test_social_auth_invalid_email(self) -> None:
"""
This test needs an override from the original class. For security reasons,
the 'next' and 'mobile_flow_otp' params don't get passed on in the session
if the authentication attempt failed. See SAMLAuthBackend.auth_complete for details.
"""
account_data_dict = self.get_account_data_dict(email="invalid", name=self.name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zulip', next='/user_uploads/image')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
def test_social_auth_saml_multiple_idps_configured(self) -> None:
# Setup a new SOCIAL_AUTH_SAML_ENABLED_IDPS dict with two idps.
# We deepcopy() dictionaries around for the sake of brevity,
# to avoid having to spell them out explicitly here.
# The second idp's configuration is a copy of the first one,
# with name test_idp2 and altered url. It is also configured to be
# limited to the zulip realm, so that we get to test both types
# of configs here.
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict['test_idp2'] = copy.deepcopy(idps_dict['test_idp'])
idps_dict['test_idp2']['url'] = 'https://idp2.example.com/idp/profile/SAML2/Redirect/SSO'
idps_dict['test_idp2']['display_name'] = 'Second Test IdP'
idps_dict['test_idp2']['limit_to_subdomains'] = ['zulip']
# Run tests with multiple idps configured:
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
# Go to the login page and check that buttons to log in show up for both IdPs:
result = self.client_get('/accounts/login/')
self.assert_in_success_response(["Log in with Test IdP"], result)
self.assert_in_success_response(["/accounts/login/social/saml/test_idp"], result)
self.assert_in_success_response(["Log in with Second Test IdP"], result)
self.assert_in_success_response(["/accounts/login/social/saml/test_idp2"], result)
# Try successful authentication with the regular idp from all previous tests:
self.test_social_auth_success()
# Now test with the second idp:
original_LOGIN_URL = self.LOGIN_URL
original_SIGNUP_URL = self.SIGNUP_URL
original_AUTHORIZATION_URL = self.AUTHORIZATION_URL
self.LOGIN_URL = "/accounts/login/social/saml/test_idp2"
self.SIGNUP_URL = "/accounts/register/social/saml/test_idp2"
self.AUTHORIZATION_URL = idps_dict['test_idp2']['url']
try:
self.test_social_auth_success()
finally:
# Restore original values at the end, regardless of what happens
# in the block above, to avoid affecting other tests in unpredictable
# ways.
self.LOGIN_URL = original_LOGIN_URL
self.SIGNUP_URL = original_SIGNUP_URL
self.AUTHORIZATION_URL = original_AUTHORIZATION_URL
def test_social_auth_saml_idp_limited_to_subdomains_success(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict['test_idp']['limit_to_subdomains'] = ['zulip']
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
self.test_social_auth_success()
def test_social_auth_saml_idp_limited_to_subdomains_attempt_wrong_realm(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict['test_idp']['limit_to_subdomains'] = ['zulip']
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict):
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level='INFO') as m:
result = self.social_auth_test(account_data_dict, subdomain='zephyr')
self.assertEqual(result.status_code, 302)
self.assertEqual('/login/', result.url)
self.assertEqual(m.output, [self.logger_output(
'/complete/saml/: Authentication request with IdP test_idp but this provider is not enabled '
'for this subdomain zephyr.', 'info',
)])
def test_social_auth_saml_login_bad_idp_arg(self) -> None:
for action in ['login', 'register']:
result = self.client_get(f'/accounts/{action}/social/saml')
# Missing idp argument.
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/saml')
result = self.client_get(f'/accounts/{action}/social/saml/nonexistent_idp')
# No such IdP is configured.
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/saml')
result = self.client_get(f'/accounts/{action}/social/saml/')
# No matching url pattern.
self.assertEqual(result.status_code, 404)
def test_social_auth_saml_require_limit_to_subdomains(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
idps_dict['test_idp2'] = copy.deepcopy(idps_dict['test_idp'])
idps_dict['test_idp2']['url'] = 'https://idp2.example.com/idp/profile/SAML2/Redirect/SSO'
idps_dict['test_idp2']['display_name'] = 'Second Test IdP'
idps_dict['test_idp2']['limit_to_subdomains'] = ['zulip']
with self.settings(SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict,
SAML_REQUIRE_LIMIT_TO_SUBDOMAINS=True):
with self.assertLogs(self.logger_string, level="ERROR") as m:
# Initialization of the backend should validate the configured IdPs
# with respect to the SAML_REQUIRE_LIMIT_TO_SUBDOMAINS setting and remove
# the non-compliant ones.
SAMLAuthBackend()
self.assertEqual(list(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.keys()),
['test_idp2'])
self.assertEqual(m.output, [self.logger_output(
"SAML_REQUIRE_LIMIT_TO_SUBDOMAINS is enabled and the following "
"IdPs don't have limit_to_subdomains specified and will be ignored: "
"['test_idp']", 'error',
)])
def test_idp_initiated_signin_subdomain_specified(self) -> None:
post_params = {
"RelayState": '{"subdomain": "zulip"}',
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, 'is_valid', return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post('/complete/saml/', post_params)
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
self.client_get(uri)
self.assert_logged_in_user_id(self.example_user("hamlet").id)
def test_choose_subdomain_invalid_subdomain_specified(self) -> None:
post_params = {
"RelayState": '{"subdomain": "invalid"}',
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, 'is_valid', return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post('/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/")
def test_idp_initiated_signin_subdomain_implicit(self) -> None:
post_params = {
"RelayState": '',
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, 'is_valid', return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post('http://zulip.testserver/complete/saml/', post_params)
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
self.client_get(uri)
self.assert_logged_in_user_id(self.example_user("hamlet").id)
def test_idp_initiated_signin_subdomain_implicit_no_relaystate_param(self) -> None:
post_params = {
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with mock.patch.object(OneLogin_Saml2_Response, 'is_valid', return_value=True):
# We're not able to generate valid signatures in tests, so we need the mock.
result = self.client_post('http://zulip.testserver/complete/saml/', post_params)
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
self.client_get(uri)
self.assert_logged_in_user_id(self.example_user("hamlet").id)
def test_idp_initiated_signin_subdomain_implicit_invalid(self) -> None:
post_params = {
"RelayState": '',
"SAMLResponse": self.generate_saml_response(email=self.email, name=self.name),
}
with self.assertLogs(self.logger_string, level='INFO') as m:
with mock.patch('zproject.backends.get_subdomain', return_value='invalid'):
# Due to the quirks of our test setup, get_subdomain on all these `some_subdomain.testserver`
# requests returns 'zulip', so we need to mock it here.
result = self.client_post('http://invalid.testserver/complete/saml/', post_params)
self.assertEqual(result.status_code, 302)
self.assertEqual('/login/', result.url)
self.assertEqual(m.output, [self.logger_output(
"/complete/saml/: Can't figure out subdomain for this authentication request. relayed_params: {}",
"info",
)])
class AppleAuthMixin:
BACKEND_CLASS = AppleAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_APPLE_KEY"
AUTHORIZATION_URL = "https://appleid.apple.com/auth/authorize"
ACCESS_TOKEN_URL = "https://appleid.apple.com/auth/token"
AUTH_FINISH_URL = "/complete/apple/"
CONFIG_ERROR_URL = "/config-error/apple"
def generate_id_token(self, account_data_dict: Dict[str, str], audience: Optional[str]=None) -> str:
payload = account_data_dict
# This setup is important because python-social-auth decodes `id_token`
# with `SOCIAL_AUTH_APPLE_CLIENT` as the `audience`
payload['aud'] = settings.SOCIAL_AUTH_APPLE_CLIENT
if audience is not None:
payload['aud'] = audience
headers = {"kid": "SOMEKID"}
private_key = settings.APPLE_ID_TOKEN_GENERATION_KEY
id_token = jwt.encode(payload, private_key, algorithm='RS256',
headers=headers).decode('utf-8')
return id_token
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
name_parts = name.split(' ')
first_name = name_parts[0]
last_name = ''
if (len(name_parts) > 0):
last_name = name_parts[-1]
name_dict = {'firstName': first_name, 'lastName': last_name}
return dict(email=email, name=name_dict, email_verified=True)
class AppleIdAuthBackendTest(AppleAuthMixin, SocialAuthBase):
__unittest_skip__ = False
LOGIN_URL = "/accounts/login/social/apple"
SIGNUP_URL = "/accounts/register/social/apple"
# This URL isn't used in the Apple auth flow, so we just set a
# dummy value to keep SocialAuthBase common code happy.
USER_INFO_URL = '/invalid-unused-url'
def social_auth_test_finish(self, result: HttpResponse,
account_data_dict: Dict[str, str],
expect_choose_email_screen: bool,
**headers: Any) -> HttpResponse:
parsed_url = urllib.parse.urlparse(result.url)
state = urllib.parse.parse_qs(parsed_url.query)['state']
self.client.session.flush()
result = self.client_post(self.AUTH_FINISH_URL,
dict(state=state), **headers)
return result
def register_extra_endpoints(self, requests_mock: responses.RequestsMock,
account_data_dict: Dict[str, str],
**extra_data: Any) -> None:
# This is an URL of an endpoint on Apple servers that returns
# the public keys to be used for verifying the signature
# on the JWT id_token.
requests_mock.add(
requests_mock.GET,
self.BACKEND_CLASS.JWK_URL,
status=200,
json=json.loads(settings.APPLE_JWK),
)
def generate_access_url_payload(self, account_data_dict: Dict[str, str]) -> str:
# The ACCESS_TOKEN_URL endpoint works a bit different in standard Oauth2,
# where the token_data_dict contains some essential data. we add that data here.
return json.dumps({
'access_token': 'foobar',
'expires_in': time.time() + 60*5,
'id_token': self.generate_id_token(account_data_dict),
'token_type': 'bearer',
})
def test_apple_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.AppleAuthBackend',)):
self.assertTrue(apple_auth_enabled())
def test_get_apple_locale(self) -> None:
language_locale = [('ar', 'ar_SA'), ('ca', 'ca_ES'), ('cs', 'cs_CZ'),
('da', 'da_DK'), ('de', 'de_DE'), ('el', 'el_GR'),
('en', 'en_US'), ('es', 'es_ES'), ('fi', 'fi_FI'),
('fr', 'fr_FR'), ('hr', 'hr_HR'), ('hu', 'hu_HU'),
('id', 'id_ID'), ('it', 'it_IT'), ('iw', 'iw_IL'),
('ja', 'ja_JP'), ('ko', 'ko_KR'), ('ms', 'ms_MY'),
('nl', 'nl_NL'), ('no', 'no_NO'), ('pl', 'pl_PL'),
('pt', 'pt_PT'), ('ro', 'ro_RO'), ('ru', 'ru_RU'),
('sk', 'sk_SK'), ('sv', 'sv_SE'), ('th', 'th_TH'),
('tr', 'tr_TR'), ('uk', 'uk_UA'), ('vi', 'vi_VI'),
('zh', 'zh_CN')]
for language_code, locale in language_locale:
self.assertEqual(AppleAuthBackend.get_apple_locale(language_code), locale)
# return 'en_US' if invalid `language_code` is given.
self.assertEqual(AppleAuthBackend.get_apple_locale(':)'), 'en_US')
def test_auth_registration_with_no_name_sent_from_apple(self) -> None:
"""
Apple doesn't send the name in consecutive attempts if user registration
fails the first time. This tests verifies that the social pipeline is able
to handle the case of the backend not providing this information.
"""
email = "newuser@zulip.com"
subdomain = "zulip"
realm = get_realm("zulip")
account_data_dict = self.get_account_data_dict(email=email, name='')
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
self.stage_two_of_registration(result, realm, subdomain, email, '', 'Full Name',
skip_registration_form=False,
expect_full_name_prepopulated=False)
def test_id_token_verification_failure(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level='INFO') as m:
with mock.patch("jwt.decode", side_effect=jwt.exceptions.PyJWTError):
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=True,
subdomain='zulip', is_signup=True)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(m.output, [
self.logger_output("AuthFailed: Authentication failed: Token validation failed", "info"),
])
def test_validate_state(self) -> None:
with self.assertLogs(self.logger_string, level='INFO') as m:
# (1) check if auth fails if no state value is sent.
result = self.client_post('/complete/apple/')
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
# (2) Check if auth fails when a state sent has no valid data stored in redis.
fake_state = "fa42e4ccdb630f0070c1daab70ad198d8786d4b639cd7a1b4db4d5a13c623060"
result = self.client_post('/complete/apple/', {'state': fake_state})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
self.assertEqual(m.output, [
self.logger_output("Sign in with Apple failed: missing state parameter.", "info"), # (1)
self.logger_output("Missing needed parameter state", "warning"),
self.logger_output("Sign in with Apple failed: bad state token.", "info"), # (2)
self.logger_output("Wrong state parameter given.", "warning"),
])
class AppleAuthBackendNativeFlowTest(AppleAuthMixin, SocialAuthBase):
__unittest_skip__ = False
SIGNUP_URL = '/complete/apple/'
LOGIN_URL = '/complete/apple/'
def prepare_login_url_and_headers(
self,
subdomain: Optional[str]=None,
mobile_flow_otp: Optional[str]=None,
desktop_flow_otp: Optional[str]=None,
is_signup: bool=False,
next: str='',
multiuse_object_key: str='',
alternative_start_url: Optional[str]=None,
id_token: Optional[str]=None,
*,
user_agent: Optional[str]=None,
) -> Tuple[str, Dict[str, Any]]:
url, headers = super().prepare_login_url_and_headers(
subdomain, mobile_flow_otp, desktop_flow_otp, is_signup, next,
multiuse_object_key, alternative_start_url=alternative_start_url,
user_agent=user_agent,
)
params = {'native_flow': 'true'}
if id_token is not None:
params['id_token'] = id_token
if is_signup:
params['is_signup'] = '1'
if subdomain:
params['subdomain'] = subdomain
url += f"&{urllib.parse.urlencode(params)}"
return url, headers
def social_auth_test(self, account_data_dict: Dict[str, str],
*, subdomain: Optional[str]=None,
mobile_flow_otp: Optional[str]=None,
desktop_flow_otp: Optional[str]=None,
is_signup: bool=False,
next: str='',
multiuse_object_key: str='',
alternative_start_url: Optional[str]=None,
skip_id_token: bool=False,
user_agent: Optional[str]=None,
**extra_data: Any) -> HttpResponse:
"""In Apple's native authentication flow, the client app authenticates
with Apple and receives the JWT id_token, before contacting
the Zulip server. The app sends an appropriate request with
it to /complete/apple/ to get logged in. See the backend
class for details.
As a result, we need a custom social_auth_test function that
effectively just does the second half of the flow (i.e. the
part after the redirect from this third-party authentication
provider) with a properly generated id_token.
"""
if not skip_id_token:
id_token = self.generate_id_token(account_data_dict, settings.SOCIAL_AUTH_APPLE_BUNDLE_ID)
else:
id_token = None
url, headers = self.prepare_login_url_and_headers(
subdomain, mobile_flow_otp, desktop_flow_otp, is_signup, next,
multiuse_object_key, alternative_start_url=self.AUTH_FINISH_URL,
user_agent=user_agent, id_token=id_token,
)
with self.apple_jwk_url_mock():
result = self.client_get(url, **headers)
return result
@contextmanager
def apple_jwk_url_mock(self) -> Iterator[None]:
with responses.RequestsMock(assert_all_requests_are_fired=False) as requests_mock:
# The server fetches public keys for validating the id_token
# from Apple servers. We need to mock that URL to return our key,
# created for these tests.
requests_mock.add(
requests_mock.GET,
self.BACKEND_CLASS.JWK_URL,
status=200,
json=json.loads(settings.APPLE_JWK),
)
yield
def test_no_id_token_sent(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=False,
subdomain='zulip', next='/user_uploads/image',
skip_id_token=True)
self.assert_json_error(result, "Missing id_token parameter")
def test_social_auth_session_fields_cleared_correctly(self) -> None:
mobile_flow_otp = '1234abcd' * 8
def initiate_auth(mobile_flow_otp: Optional[str]=None) -> None:
url, headers = self.prepare_login_url_and_headers(subdomain='zulip',
id_token='invalid',
mobile_flow_otp=mobile_flow_otp)
result = self.client_get(url, **headers)
self.assertEqual(result.status_code, 302)
# Start Apple auth with mobile_flow_otp param. It should get saved into the session
# on SOCIAL_AUTH_SUBDOMAIN.
initiate_auth(mobile_flow_otp)
self.assertEqual(self.client.session['mobile_flow_otp'], mobile_flow_otp)
# Make a request without mobile_flow_otp param and verify the field doesn't persist
# in the session from the previous request.
initiate_auth()
self.assertEqual(self.client.session.get('mobile_flow_otp'), None)
def test_id_token_with_invalid_aud_sent(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
url, headers = self.prepare_login_url_and_headers(
subdomain='zulip', alternative_start_url=self.AUTH_FINISH_URL,
id_token=self.generate_id_token(account_data_dict, audience='com.different.app'),
)
with self.apple_jwk_url_mock(), mock.patch('logging.info') as mock_info:
result = self.client_get(url, **headers)
mock_info.assert_called_once_with('/complete/apple/: %s',
'Authentication failed: Token validation failed')
return result
def test_social_auth_desktop_success(self) -> None:
"""
The desktop app doesn't use the native flow currently and the desktop app flow in its
current form happens in the browser, thus only the webflow is viable there.
"""
pass
def test_social_auth_no_key(self) -> None:
"""
The basic validation of server configuration is handled on the
/login/social/apple/ endpoint which isn't even a part of the native flow.
"""
pass
class GitHubAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = GitHubAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_GITHUB_KEY"
CLIENT_SECRET_SETTING = "SOCIAL_AUTH_GITHUB_SECRET"
LOGIN_URL = "/accounts/login/social/github"
SIGNUP_URL = "/accounts/register/social/github"
AUTHORIZATION_URL = "https://github.com/login/oauth/authorize"
ACCESS_TOKEN_URL = "https://github.com/login/oauth/access_token"
USER_INFO_URL = "https://api.github.com/user"
AUTH_FINISH_URL = "/complete/github/"
CONFIG_ERROR_URL = "/config-error/github"
email_data: List[Dict[str, Any]] = []
def social_auth_test_finish(self, result: HttpResponse,
account_data_dict: Dict[str, str],
expect_choose_email_screen: bool,
**headers: Any) -> HttpResponse:
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)['state']
result = self.client_get(self.AUTH_FINISH_URL,
dict(state=csrf_state), **headers)
if expect_choose_email_screen:
# As GitHub authenticates multiple email addresses,
# we'll have an additional screen where the user selects
# which email address to login using (this screen is a
# "partial" state of the python-social-auth pipeline).
#
# TODO: Generalize this testing code for use with other
# authentication backends when a new authentacation backend
# that requires "choose email" screen;
self.assert_in_success_response(["Select account"], result)
# Verify that all the emails returned by GitHub auth
# are in the "choose email" screen.
all_emails_verified = True
for email_data_dict in self.email_data:
email = email_data_dict["email"]
if email.endswith("noreply.github.com"):
self.assert_not_in_success_response([email], result)
elif email_data_dict.get('verified'):
self.assert_in_success_response([email], result)
else:
# We may change this if we provide a way to see
# the list of emails the user had.
self.assert_not_in_success_response([email], result)
all_emails_verified = False
if all_emails_verified:
self.assert_not_in_success_response(["also has unverified email"], result)
else:
self.assert_in_success_response(["also has unverified email"], result)
result = self.client_get(self.AUTH_FINISH_URL,
dict(state=csrf_state, email=account_data_dict['email']), **headers)
return result
def register_extra_endpoints(self, requests_mock: responses.RequestsMock,
account_data_dict: Dict[str, str],
**extra_data: Any) -> None:
# Keeping a verified email before the primary email makes sure
# get_verified_emails puts the primary email at the start of the
# email list returned as social_associate_user_helper assumes the
# first email as the primary email.
email_data = [
dict(email="notprimary@example.com",
verified=True),
dict(email=account_data_dict["email"],
verified=True,
primary=True),
dict(email="ignored@example.com",
verified=False),
]
email_data = extra_data.get("email_data", email_data)
requests_mock.add(
requests_mock.GET,
"https://api.github.com/user/emails",
status=200,
body=json.dumps(email_data),
)
requests_mock.add(
requests_mock.GET,
"https://api.github.com/teams/zulip-webapp/members/None",
status=200,
body=json.dumps(email_data),
)
self.email_data = email_data
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name)
def test_social_auth_email_not_verified(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
email_data = [
dict(email=account_data_dict["email"],
verified=False,
primary=True),
]
with self.assertLogs(self.logger_string, level='WARNING') as m:
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
email_data=email_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(m.output, [self.logger_output(
"Social auth ({}) failed because user has no verified emails".format('GitHub'),
"warning",
)])
@override_settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp')
def test_social_auth_github_team_not_member_failed(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch('social_core.backends.github.GithubTeamOAuth2.user_data',
side_effect=AuthFailed('Not found')), \
self.assertLogs(self.logger_string, level='INFO') as mock_info:
result = self.social_auth_test(account_data_dict,
subdomain='zulip')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(mock_info.output, [self.logger_output(
"GitHub user is not member of required team", 'info',
)])
@override_settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp')
def test_social_auth_github_team_member_success(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch('social_core.backends.github.GithubTeamOAuth2.user_data',
return_value=account_data_dict):
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=False,
subdomain='zulip')
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
@override_settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip')
def test_social_auth_github_organization_not_member_failed(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.user_data',
side_effect=AuthFailed('Not found')), \
self.assertLogs(self.logger_string, level='INFO') as mock_info:
result = self.social_auth_test(account_data_dict,
subdomain='zulip')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(mock_info.output, [self.logger_output(
"GitHub user is not member of required organization", "info",
)])
@override_settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip')
def test_social_auth_github_organization_member_success(self) -> None:
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.user_data',
return_value=account_data_dict):
result = self.social_auth_test(account_data_dict,
expect_choose_email_screen=False,
subdomain='zulip')
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
def test_github_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
self.assertTrue(github_auth_enabled())
def test_github_oauth2_success_non_primary(self) -> None:
account_data_dict = dict(email='nonprimary@zulip.com', name="Non Primary")
email_data = [
dict(email=account_data_dict["email"],
verified=True),
dict(email='hamlet@zulip.com',
verified=True,
primary=True),
dict(email="aaron@zulip.com",
verified=True),
dict(email="ignored@example.com",
verified=False),
]
result = self.social_auth_test(account_data_dict,
subdomain='zulip', email_data=email_data,
expect_choose_email_screen=True,
next='/user_uploads/image')
data = load_subdomain_token(result)
self.assertEqual(data['email'], 'nonprimary@zulip.com')
self.assertEqual(data['full_name'], 'Non Primary')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(data['redirect_to'], '/user_uploads/image')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_github_oauth2_success_single_email(self) -> None:
# If the user has a single email associated with its GitHub account,
# the choose email screen should not be shown and the first email
# should be used for user's signup/login.
account_data_dict = dict(email='not-hamlet@zulip.com', name=self.name)
email_data = [
dict(email='hamlet@zulip.com',
verified=True,
primary=True),
]
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
email_data=email_data,
expect_choose_email_screen=False,
next='/user_uploads/image')
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(data['redirect_to'], '/user_uploads/image')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_github_oauth2_login_only_one_account_exists(self) -> None:
# In a login flow, if only one of the user's verified emails
# is associated with an existing account, the user should be
# just logged in (skipping the "choose email screen"). We
# only want that screen if the user were instead trying to
# register a new account, which they're not.
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
email_data = [
dict(email=account_data_dict["email"],
verified=True),
dict(email="notprimary@zulip.com",
verified=True),
dict(email="verifiedemail@zulip.com",
verified=True),
]
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
email_data=email_data,
expect_choose_email_screen=False,
next='/user_uploads/image')
data = load_subdomain_token(result)
self.assertEqual(data['email'], account_data_dict["email"])
self.assertEqual(data['full_name'], self.name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(data['redirect_to'], '/user_uploads/image')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_github_oauth2_login_multiple_accounts_exist(self) -> None:
# In the login flow, if multiple of the user's verified emails
# are associated with existing accounts, we expect the choose
# email screen to select which account to use.
hamlet = self.example_user("hamlet")
account_data_dict = dict(email='hamlet@zulip.com', name="Hamlet")
email_data = [
dict(email=account_data_dict["email"],
verified=True),
dict(email='hamlet@zulip.com',
verified=True,
primary=True),
dict(email="aaron@zulip.com",
verified=True),
dict(email="ignored@example.com",
verified=False),
]
result = self.social_auth_test(account_data_dict,
subdomain='zulip', email_data=email_data,
expect_choose_email_screen=True,
next='/user_uploads/image')
data = load_subdomain_token(result)
self.assertEqual(data['email'], 'hamlet@zulip.com')
self.assertEqual(data['full_name'], hamlet.full_name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(data['redirect_to'], '/user_uploads/image')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_github_oauth2_login_no_account_exists(self) -> None:
# In the login flow, if the user has multiple verified emails,
# none of which are associated with an existing account, the
# choose email screen should be shown (which will lead to a
# "continue to registration" choice).
account_data_dict = dict(email="not-hamlet@zulip.com", name="Not Hamlet")
email_data = [
dict(email=account_data_dict["email"],
verified=True),
dict(email="notprimary@zulip.com",
verified=True),
dict(email="verifiedemail@zulip.com",
verified=True),
]
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
email_data=email_data,
expect_choose_email_screen=True)
email = account_data_dict['email']
name = account_data_dict['name']
subdomain = 'zulip'
realm = get_realm("zulip")
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
expect_confirm_registration_page=True,
skip_registration_form=False)
def test_github_oauth2_signup_choose_existing_account(self) -> None:
# In the sign up flow, if the user has chosen an email of an
# existing account, the user will be logged in.
account_data_dict = self.get_account_data_dict(email=self.email, name=self.name)
email_data = [
dict(email=account_data_dict["email"],
verified=True),
dict(email="notprimary@zulip.com",
verified=True),
dict(email="verifiedemail@zulip.com",
verified=True),
]
result = self.social_auth_test(account_data_dict,
email_data=email_data,
is_signup=True,
expect_choose_email_screen=True,
next='/user_uploads/image')
data = load_subdomain_token(result)
self.assertEqual(data['email'], account_data_dict["email"])
self.assertEqual(data['full_name'], account_data_dict["name"])
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(data['redirect_to'], '/user_uploads/image')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_github_oauth2_signup_choose_new_email_to_register(self) -> None:
# In the sign up flow, if the user has multiple verified
# emails, we show the "choose email" screen, even if the user
# has another verified email with an existing account,
# allowing the user to register a second account associated
# with the second email.
email = "newuser@zulip.com"
name = 'Full Name'
subdomain = 'zulip'
realm = get_realm("zulip")
account_data_dict = self.get_account_data_dict(email=email, name=name)
email_data = [
dict(email="hamlet@zulip.com",
verified=True),
dict(email=email,
verified=True),
dict(email="verifiedemail@zulip.com",
verified=True),
]
result = self.social_auth_test(account_data_dict,
email_data = email_data,
expect_choose_email_screen=True,
subdomain=subdomain, is_signup=True)
self.stage_two_of_registration(result, realm, subdomain, email, name, name,
self.BACKEND_CLASS.full_name_validated)
def test_github_oauth2_email_no_reply_dot_github_dot_com(self) -> None:
# As emails ending with `noreply.github.com` are excluded from
# verified_emails, choosing it as an email should raise a `email
# not associated` warning.
account_data_dict = dict(email="hamlet@users.noreply.github.com", name=self.name)
email_data = [
dict(email="notprimary@zulip.com",
verified=True),
dict(email="hamlet@zulip.com",
verified=True,
primary=True),
dict(email="aaron@zulip.com",
verified=True),
dict(email=account_data_dict["email"],
verified=True),
]
with self.assertLogs(self.logger_string, level='WARNING') as m:
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
expect_choose_email_screen=True,
email_data=email_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(m.output, [self.logger_output(
"Social auth (GitHub) failed because user has no verified"
" emails associated with the account",
"warning",
)])
def test_github_oauth2_email_not_associated(self) -> None:
account_data_dict = dict(email='not-associated@zulip.com', name=self.name)
email_data = [
dict(email='nonprimary@zulip.com',
verified=True),
dict(email='hamlet@zulip.com',
verified=True,
primary=True),
dict(email="aaron@zulip.com",
verified=True),
]
with self.assertLogs(self.logger_string, level='WARNING') as m:
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
expect_choose_email_screen=True,
email_data=email_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(m.output, [self.logger_output(
"Social auth (GitHub) failed because user has no verified"
" emails associated with the account",
"warning",
)])
def test_github_unverified_email_with_existing_account(self) -> None:
# check if a user is denied to login if the user manages to
# send an unverified email that has an existing account in
# organisation through `email` GET parameter.
account_data_dict = dict(email='hamlet@zulip.com', name=self.name)
email_data = [
dict(email='iago@zulip.com',
verified=True),
dict(email='hamlet@zulip.com',
verified=False),
dict(email="aaron@zulip.com",
verified=True,
primary=True),
]
with self.assertLogs(self.logger_string, level='WARNING') as m:
result = self.social_auth_test(account_data_dict,
subdomain='zulip',
expect_choose_email_screen=True,
email_data=email_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(m.output, [self.logger_output(
"Social auth ({}) failed because user has no verified emails associated with the account".format("GitHub"),
"warning",
)])
class GitLabAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = GitLabAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_GITLAB_KEY"
CLIENT_SECRET_SETTING = "SOCIAL_AUTH_GITLAB_SECRET"
LOGIN_URL = "/accounts/login/social/gitlab"
SIGNUP_URL = "/accounts/register/social/gitlab"
AUTHORIZATION_URL = "https://gitlab.com/oauth/authorize"
ACCESS_TOKEN_URL = "https://gitlab.com/oauth/token"
USER_INFO_URL = "https://gitlab.com/api/v4/user"
AUTH_FINISH_URL = "/complete/gitlab/"
CONFIG_ERROR_URL = "/config-error/gitlab"
def test_gitlab_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitLabAuthBackend',)):
self.assertTrue(gitlab_auth_enabled())
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name, email_verified=True)
class GoogleAuthBackendTest(SocialAuthBase):
__unittest_skip__ = False
BACKEND_CLASS = GoogleAuthBackend
CLIENT_KEY_SETTING = "SOCIAL_AUTH_GOOGLE_KEY"
CLIENT_SECRET_SETTING = "SOCIAL_AUTH_GOOGLE_SECRET"
LOGIN_URL = "/accounts/login/social/google"
SIGNUP_URL = "/accounts/register/social/google"
AUTHORIZATION_URL = "https://accounts.google.com/o/oauth2/auth"
ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
USER_INFO_URL = "https://www.googleapis.com/oauth2/v3/userinfo"
AUTH_FINISH_URL = "/complete/google/"
CONFIG_ERROR_URL = "/config-error/google"
def get_account_data_dict(self, email: str, name: str) -> Dict[str, Any]:
return dict(email=email, name=name, email_verified=True)
def test_social_auth_email_not_verified(self) -> None:
account_data_dict = dict(email=self.email, name=self.name)
with self.assertLogs(self.logger_string, level='WARNING') as m:
result = self.social_auth_test(account_data_dict,
subdomain='zulip')
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/login/")
self.assertEqual(m.output, [self.logger_output(
"Social auth ({}) failed because user has no verified emails".format("Google"),
"warning",
)])
def test_social_auth_mobile_success_legacy_url(self) -> None:
mobile_flow_otp = '1234abcd' * 8
account_data_dict = self.get_account_data_dict(email=self.email, name='Full Name')
self.assertEqual(len(mail.outbox), 0)
self.user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=JUST_CREATED_THRESHOLD + 1)
self.user_profile.save()
with self.settings(SEND_LOGIN_EMAILS=True):
# Verify that the right thing happens with an invalid-format OTP
result = self.social_auth_test(account_data_dict, subdomain='zulip',
alternative_start_url="/accounts/login/google/",
mobile_flow_otp="1234")
self.assert_json_error(result, "Invalid OTP")
result = self.social_auth_test(account_data_dict, subdomain='zulip',
alternative_start_url="/accounts/login/google/",
mobile_flow_otp="invalido" * 8)
self.assert_json_error(result, "Invalid OTP")
# Now do it correctly
result = self.social_auth_test(account_data_dict, subdomain='zulip',
expect_choose_email_screen=True,
alternative_start_url="/accounts/login/google/",
mobile_flow_otp=mobile_flow_otp)
self.assertEqual(result.status_code, 302)
redirect_url = result['Location']
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, 'zulip')
self.assertEqual(query_params["realm"], ['http://zulip.testserver'])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user('hamlet'))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Zulip on Android', mail.outbox[0].body)
def test_google_auth_enabled(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleAuthBackend',)):
self.assertTrue(google_auth_enabled())
def get_log_into_subdomain(self, data: ExternalAuthDataDict, *, subdomain: str='zulip',
force_token: Optional[str]=None) -> HttpResponse:
if force_token is None:
token = ExternalAuthResult(data_dict=data).store_data()
else:
token = force_token
url_path = reverse('zerver.views.auth.log_into_subdomain', args=[token])
return self.client_get(url_path, subdomain=subdomain)
def test_redirect_to_next_url_for_log_into_subdomain(self) -> None:
def test_redirect_to_next_url(next: str='') -> HttpResponse:
data: ExternalAuthDataDict = {
'full_name': 'Hamlet',
'email': self.example_email("hamlet"),
'subdomain': 'zulip',
'is_signup': False,
'redirect_to': next,
}
user_profile = self.example_user('hamlet')
with mock.patch(
'zerver.views.auth.authenticate',
return_value=user_profile):
with mock.patch('zerver.views.auth.do_login'):
result = self.get_log_into_subdomain(data)
return result
res = test_redirect_to_next_url()
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, 'http://zulip.testserver')
res = test_redirect_to_next_url('/user_uploads/path_to_image')
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, 'http://zulip.testserver/user_uploads/path_to_image')
res = test_redirect_to_next_url('/#narrow/stream/7-test-here')
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, 'http://zulip.testserver/#narrow/stream/7-test-here')
def test_log_into_subdomain_when_token_is_malformed(self) -> None:
data: ExternalAuthDataDict = {
'full_name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zulip',
'is_signup': False,
'redirect_to': '',
}
with mock.patch("logging.warning") as mock_warn:
result = self.get_log_into_subdomain(data, force_token='nonsense')
mock_warn.assert_called_once_with("log_into_subdomain: Malformed token given: %s", "nonsense")
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_token_not_found(self) -> None:
data: ExternalAuthDataDict = {
'full_name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zulip',
'is_signup': False,
'redirect_to': '',
}
with mock.patch("logging.warning") as mock_warn:
token = generate_random_token(ExternalAuthResult.LOGIN_TOKEN_LENGTH)
result = self.get_log_into_subdomain(data, force_token=token)
mock_warn.assert_called_once_with("log_into_subdomain: Invalid token given: %s", token)
self.assertEqual(result.status_code, 400)
self.assert_in_response("Invalid or expired login session.", result)
def test_prevent_duplicate_signups(self) -> None:
existing_user = self.example_user('hamlet')
existing_user.delivery_email = 'existing@zulip.com'
existing_user.email = 'whatever@zulip.com'
existing_user.save()
data: ExternalAuthDataDict = {
'full_name': 'Full Name',
'email': 'existing@zulip.com',
'subdomain': 'zulip',
'is_signup': True,
'redirect_to': '',
}
result = self.get_log_into_subdomain(data)
# Should simply get logged into the existing account:
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(existing_user.id)
def test_log_into_subdomain_when_is_signup_is_true_and_new_user(self) -> None:
data: ExternalAuthDataDict = {
'full_name': 'New User Name',
'email': 'new@zulip.com',
'subdomain': 'zulip',
'is_signup': True,
'redirect_to': '',
}
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
confirmation_data = {"from_confirmation": "1",
"full_name": data['full_name'],
"key": confirmation_key}
result = self.client_post('/accounts/register/', confirmation_data, subdomain="zulip")
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
def test_log_into_subdomain_when_is_signup_is_false_and_new_user(self) -> None:
data: ExternalAuthDataDict = {
'full_name': 'New User Name',
'email': 'new@zulip.com',
'subdomain': 'zulip',
'is_signup': False,
'redirect_to': '',
}
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 200)
self.assert_in_response('No account found for', result)
self.assert_in_response('new@zulip.com.', result)
self.assert_in_response('action="http://zulip.testserver/accounts/do_confirm/', result)
url = re.findall('action="(http://zulip.testserver/accounts/do_confirm[^"]*)"', result.content.decode('utf-8'))[0]
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, url)
result = self.client_get(url)
self.assert_in_response('action="/accounts/register/"', result)
confirmation_data = {"from_confirmation": "1",
"full_name": data['full_name'],
"key": confirmation_key}
result = self.client_post('/accounts/register/', confirmation_data, subdomain="zulip")
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
def test_log_into_subdomain_when_using_invite_link(self) -> None:
data: ExternalAuthDataDict = {
'full_name': 'New User Name',
'email': 'new@zulip.com',
'subdomain': 'zulip',
'is_signup': True,
'redirect_to': '',
}
realm = get_realm("zulip")
realm.invite_required = True
realm.save()
stream_names = ["new_stream_1", "new_stream_2"]
streams = []
for stream_name in set(stream_names):
stream = ensure_stream(realm, stream_name)
streams.append(stream)
# Without the invite link, we can't create an account due to invite_required
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(['Sign up for Zulip'], result)
# Now confirm an invitation link works
referrer = self.example_user("hamlet")
multiuse_obj = MultiuseInvite.objects.create(realm=realm, referred_by=referrer)
multiuse_obj.streams.set(streams)
create_confirmation_link(multiuse_obj, Confirmation.MULTIUSE_INVITE)
multiuse_confirmation = Confirmation.objects.all().last()
multiuse_object_key = multiuse_confirmation.confirmation_key
data["multiuse_object_key"] = multiuse_object_key
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().last()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
data2 = {"from_confirmation": "1",
"full_name": data['full_name'],
"key": confirmation_key}
result = self.client_post('/accounts/register/', data2, subdomain="zulip")
self.assert_in_response("We just need you to do one last thing", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
# Click confirm registration button.
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User Name',
'key': confirmation_key,
'terms': True})
self.assertEqual(result.status_code, 302)
new_user = get_user_by_delivery_email('new@zulip.com', realm)
new_streams = self.get_streams(new_user)
self.assertEqual(sorted(new_streams), stream_names)
def test_log_into_subdomain_when_email_is_none(self) -> None:
data: ExternalAuthDataDict = {
'full_name': None,
'email': None,
'subdomain': 'zulip',
'is_signup': False,
'redirect_to': '',
}
with mock.patch('logging.warning') as mock_warn:
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 400)
mock_warn.assert_called_once()
def test_user_cannot_log_into_wrong_subdomain(self) -> None:
data: ExternalAuthDataDict = {
'full_name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zephyr',
}
result = self.get_log_into_subdomain(data)
self.assert_json_error(result, "Invalid subdomain")
class JSONFetchAPIKeyTest(ZulipTestCase):
def test_success(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
result = self.client_post("/json/fetch_api_key",
dict(user_profile=user,
password=initial_password(user.delivery_email)))
self.assert_json_success(result)
def test_not_loggedin(self) -> None:
user = self.example_user('hamlet')
result = self.client_post("/json/fetch_api_key",
dict(user_profile=user,
password=initial_password(user.delivery_email)))
self.assert_json_error(result,
"Not logged in: API authentication or user session required", 401)
def test_wrong_password(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
result = self.client_post("/json/fetch_api_key",
dict(user_profile=user,
password="wrong"))
self.assert_json_error(result, "Your username or password is incorrect.", 400)
class FetchAPIKeyTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.delivery_email
def test_success(self) -> None:
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_success(result)
def test_invalid_email(self) -> None:
result = self.client_post("/api/v1/fetch_api_key",
dict(username='hamlet',
password=initial_password(self.email)))
self.assert_json_error(result, "Enter a valid email address.", 400)
def test_wrong_password(self) -> None:
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="wrong"))
self.assert_json_error(result, "Your username or password is incorrect.", 403)
def test_password_auth_disabled(self) -> None:
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Password auth is disabled", 403)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_auth_email_auth_disabled_success(self) -> None:
self.init_default_ldap_database()
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.example_email('hamlet'),
password=self.ldap_password('hamlet')))
self.assert_json_success(result)
def test_inactive_user(self) -> None:
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self) -> None:
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "This organization has been deactivated", 403)
class DevFetchAPIKeyTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.delivery_email
def test_success(self) -> None:
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_success(result)
data = result.json()
self.assertEqual(data["email"], self.email)
user_api_keys = get_all_api_keys(self.user_profile)
self.assertIn(data['api_key'], user_api_keys)
def test_invalid_email(self) -> None:
email = 'hamlet'
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=email))
self.assert_json_error_contains(result, "Enter a valid email address.", 400)
def test_unregistered_user(self) -> None:
email = 'foo@zulip.com'
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=email))
self.assert_json_error_contains(result, "This user is not registered.", 403)
def test_inactive_user(self) -> None:
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self) -> None:
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "This organization has been deactivated", 403)
def test_dev_auth_disabled(self) -> None:
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class DevGetEmailsTest(ZulipTestCase):
def test_success(self) -> None:
result = self.client_get("/api/v1/dev_list_users")
self.assert_json_success(result)
self.assert_in_response("direct_admins", result)
self.assert_in_response("direct_users", result)
def test_dev_auth_disabled(self) -> None:
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_get("/api/v1/dev_list_users")
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class ExternalMethodDictsTests(ZulipTestCase):
def get_configured_saml_backend_idp_names(self) -> List[str]:
return settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.keys()
def test_get_external_method_dicts_correctly_sorted(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleAuthBackend',
'zproject.backends.ZulipRemoteUserBackend',
'zproject.backends.SAMLAuthBackend',
'zproject.backends.AzureADAuthBackend'),
):
external_auth_methods = get_external_method_dicts()
# First backends in the list should be SAML:
self.assertIn('saml:', external_auth_methods[0]['name'])
self.assertEqual(
[social_backend['name'] for social_backend in external_auth_methods[1:]],
[social_backend.name for social_backend in sorted(
[ZulipRemoteUserBackend, GitHubAuthBackend, AzureADAuthBackend, GoogleAuthBackend],
key=lambda x: x.sort_order,
reverse=True,
)],
)
def test_get_external_method_buttons(self) -> None:
with self.settings(
AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleAuthBackend',
'zproject.backends.SAMLAuthBackend'),
):
saml_idp_names = self.get_configured_saml_backend_idp_names()
expected_button_id_strings = [
'id="{}_auth_button_github"',
'id="{}_auth_button_google"',
]
for name in saml_idp_names:
expected_button_id_strings.append(f'id="{{}}_auth_button_saml:{name}"')
result = self.client_get("/login/")
self.assert_in_success_response([string.format("login") for string in expected_button_id_strings],
result)
result = self.client_get("/register/")
self.assert_in_success_response([string.format("register") for string in expected_button_id_strings],
result)
def test_get_external_method_dicts_multiple_saml_idps(self) -> None:
idps_dict = copy.deepcopy(settings.SOCIAL_AUTH_SAML_ENABLED_IDPS)
# Create another IdP config, by copying the original one and changing some details.idps_dict['test_idp'])
idps_dict['test_idp2'] = copy.deepcopy(idps_dict['test_idp'])
idps_dict['test_idp2']['url'] = 'https://idp2.example.com/idp/profile/SAML2/Redirect/SSO'
idps_dict['test_idp2']['display_name'] = 'Second Test IdP'
idps_dict['test_idp2']['limit_to_subdomains'] = ['zephyr']
with self.settings(
SOCIAL_AUTH_SAML_ENABLED_IDPS=idps_dict,
AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.SAMLAuthBackend'),
):
# Calling get_external_method_dicts without a realm returns all methods configured on the server:
external_auth_methods = get_external_method_dicts()
# 1 IdP enabled for all realms + a dict for github auth
self.assert_length(external_auth_methods, 2)
self.assertEqual([external_auth_methods[0]['name'], external_auth_methods[1]['name']],
['saml:test_idp', 'github'])
external_auth_methods = get_external_method_dicts(get_realm("zulip"))
# Only test_idp enabled for the zulip realm, + github auth.
self.assert_length(external_auth_methods, 2)
self.assertEqual([external_auth_methods[0]['name'], external_auth_methods[1]['name']],
['saml:test_idp', 'github'])
external_auth_methods = get_external_method_dicts(get_realm("zephyr"))
# Both idps enabled for the zephyr realm, + github auth.
self.assert_length(external_auth_methods, 3)
self.assertEqual({external_auth_methods[0]['name'], external_auth_methods[1]['name']},
{'saml:test_idp', 'saml:test_idp2'})
class FetchAuthBackends(ZulipTestCase):
def assert_on_error(self, error: Optional[str]) -> None:
if error:
raise AssertionError(error)
def test_get_server_settings(self) -> None:
def check_result(result: HttpResponse, extra_fields: Sequence[Tuple[str, Validator]] = []) -> None:
authentication_methods_list = [
('password', check_bool),
]
for backend_name_with_case in AUTH_BACKEND_NAME_MAP:
authentication_methods_list.append((backend_name_with_case.lower(), check_bool))
external_auth_methods = get_external_method_dicts()
self.assert_json_success(result)
checker = check_dict_only([
('authentication_methods', check_dict_only(authentication_methods_list)),
('external_authentication_methods', check_list(None, length=len(external_auth_methods))),
('email_auth_enabled', check_bool),
('is_incompatible', check_bool),
('require_email_format_usernames', check_bool),
('realm_uri', check_string),
('zulip_version', check_string),
('zulip_feature_level', check_int),
('push_notifications_enabled', check_bool),
('msg', check_string),
('result', check_string),
*extra_fields,
])
self.assert_on_error(checker("data", result.json()))
result = self.client_get("/api/v1/server_settings", subdomain="", HTTP_USER_AGENT="")
check_result(result)
self.assertEqual(result.json()['external_authentication_methods'], get_external_method_dicts())
result = self.client_get("/api/v1/server_settings", subdomain="", HTTP_USER_AGENT="ZulipInvalid")
self.assertTrue(result.json()["is_incompatible"])
with self.settings(ROOT_DOMAIN_LANDING_PAGE=False):
result = self.client_get("/api/v1/server_settings", subdomain="", HTTP_USER_AGENT="")
check_result(result)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=False):
result = self.client_get("/api/v1/server_settings", subdomain="zulip", HTTP_USER_AGENT="")
check_result(result, [
('realm_name', check_string),
('realm_description', check_string),
('realm_icon', check_string),
])
# Verify invalid subdomain
result = self.client_get("/api/v1/server_settings",
subdomain="invalid")
self.assert_json_error_contains(result, "Invalid subdomain", 400)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
# With ROOT_DOMAIN_LANDING_PAGE, homepage fails
result = self.client_get("/api/v1/server_settings",
subdomain="")
self.assert_json_error_contains(result, "Subdomain required", 400)
class TestTwoFactor(ZulipTestCase):
def test_direct_dev_login_with_2fa(self) -> None:
email = self.example_email('hamlet')
user_profile = self.example_user('hamlet')
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
# User logs in but when otp device doesn't exist.
self.assertNotIn('otp_device_id', self.client.session.keys())
self.create_default_device(user_profile)
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
# User logs in when otp device exists.
self.assertIn('otp_device_id', self.client.session.keys())
@mock.patch('two_factor.models.totp')
def test_two_factor_login_with_ldap(self, mock_totp: mock.MagicMock) -> None:
token = 123456
email = self.example_email('hamlet')
password = self.ldap_password('hamlet')
user_profile = self.example_user('hamlet')
user_profile.set_password(password)
user_profile.save()
self.create_default_device(user_profile)
def totp(*args: Any, **kwargs: Any) -> int:
return token
mock_totp.side_effect = totp
# Setup LDAP
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
with self.settings(
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
TWO_FACTOR_CALL_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_SMS_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_AUTHENTICATION_ENABLED=True,
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
first_step_data = {"username": email,
"password": password,
"two_factor_login_view-current_step": "auth"}
result = self.client_post("/accounts/login/", first_step_data)
self.assertEqual(result.status_code, 200)
second_step_data = {"token-otp_token": str(token),
"two_factor_login_view-current_step": "token"}
result = self.client_post("/accounts/login/", second_step_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result['Location'], 'http://zulip.testserver')
# Going to login page should redirect to `realm.uri` if user is
# already logged in.
result = self.client_get('/accounts/login/')
self.assertEqual(result.status_code, 302)
self.assertEqual(result['Location'], 'http://zulip.testserver')
class TestDevAuthBackend(ZulipTestCase):
def test_login_success(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_success_with_2fa(self) -> None:
user_profile = self.example_user('hamlet')
self.create_default_device(user_profile)
email = user_profile.delivery_email
data = {'direct_email': email}
with self.settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True):
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, 'http://zulip.testserver/')
self.assert_logged_in_user_id(user_profile.id)
self.assertIn('otp_device_id', list(self.client.session.keys()))
def test_redirect_to_next_url(self) -> None:
def do_local_login(formaction: str) -> HttpResponse:
user_email = self.example_email('hamlet')
data = {'direct_email': user_email}
return self.client_post(formaction, data)
res = do_local_login('/accounts/login/local/')
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, 'http://zulip.testserver/')
res = do_local_login('/accounts/login/local/?next=/user_uploads/path_to_image')
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, 'http://zulip.testserver/user_uploads/path_to_image')
# In local Email based authentication we never make browser send the hash
# to the backend. Rather we depend upon the browser's behaviour of persisting
# hash anchors in between redirect requests. See below stackoverflow conversation
# https://stackoverflow.com/questions/5283395/url-hash-is-persisting-between-redirects
res = do_local_login('/accounts/login/local/?next=#narrow/stream/7-test-here')
self.assertEqual(res.status_code, 302)
self.assertEqual(res.url, 'http://zulip.testserver')
def test_login_with_subdomain(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_choose_realm(self) -> None:
result = self.client_post('/devlogin/', subdomain="zulip")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Click on a user to log in to Zulip Dev!"], result)
self.assert_in_success_response(["iago@zulip.com", "hamlet@zulip.com"], result)
result = self.client_post('/devlogin/', subdomain="")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Click on a user to log in!"], result)
self.assert_in_success_response(["iago@zulip.com", "hamlet@zulip.com"], result)
self.assert_in_success_response(["starnine@mit.edu", "espuser@mit.edu"], result)
result = self.client_post('/devlogin/', {'new_realm': 'all_realms'},
subdomain="zephyr")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["starnine@mit.edu", "espuser@mit.edu"], result)
self.assert_in_success_response(["Click on a user to log in!"], result)
self.assert_in_success_response(["iago@zulip.com", "hamlet@zulip.com"], result)
data = {'new_realm': 'zephyr'}
result = self.client_post('/devlogin/', data, subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zephyr.testserver")
result = self.client_get('/devlogin/', subdomain="zephyr")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["starnine@mit.edu", "espuser@mit.edu"], result)
self.assert_in_success_response(["Click on a user to log in to MIT!"], result)
self.assert_not_in_success_response(["iago@zulip.com", "hamlet@zulip.com"], result)
def test_choose_realm_with_subdomains_enabled(self) -> None:
with mock.patch('zerver.views.auth.is_subdomain_root_or_alias', return_value=False):
with mock.patch('zerver.views.auth.get_realm_from_request', return_value=get_realm('zulip')):
result = self.client_get("http://zulip.testserver/devlogin/")
self.assert_in_success_response(["iago@zulip.com", "hamlet@zulip.com"], result)
self.assert_not_in_success_response(["starnine@mit.edu", "espuser@mit.edu"], result)
self.assert_in_success_response(["Click on a user to log in to Zulip Dev!"], result)
with mock.patch('zerver.views.auth.get_realm_from_request', return_value=get_realm('zephyr')):
result = self.client_post("http://zulip.testserver/devlogin/", {'new_realm': 'zephyr'})
self.assertEqual(result["Location"], "http://zephyr.testserver")
result = self.client_get("http://zephyr.testserver/devlogin/")
self.assert_not_in_success_response(["iago@zulip.com", "hamlet@zulip.com"], result)
self.assert_in_success_response(["starnine@mit.edu", "espuser@mit.edu"], result)
self.assert_in_success_response(["Click on a user to log in to MIT!"], result)
def test_login_failure(self) -> None:
email = self.example_email("hamlet")
data = {'direct_email': email}
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
response = self.client_post('/accounts/login/local/', data)
self.assertRedirects(response, reverse('config_error', kwargs={'error_category_name': 'dev'}))
def test_dev_direct_production_config_error(self) -> None:
result = self.client_get("/config-error/dev")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["DevAuthBackend"], result)
def test_login_failure_due_to_nonexistent_user(self) -> None:
email = 'nonexisting@zulip.com'
data = {'direct_email': email}
response = self.client_post('/accounts/login/local/', data)
self.assertRedirects(response, reverse('config_error', kwargs={'error_category_name': 'dev'}))
class TestZulipRemoteUserBackend(DesktopFlowTestingLib, ZulipTestCase):
def test_start_remote_user_sso(self) -> None:
result = self.client_get('/accounts/login/start/sso/?param1=value1¶ms=value2')
self.assertEqual(result.status_code, 302)
url = result.url
parsed_url = urllib.parse.urlparse(url)
self.assertEqual(parsed_url.path, '/accounts/login/sso/')
self.assertEqual(parsed_url.query, 'param1=value1¶ms=value2')
def test_start_remote_user_sso_with_desktop_app(self) -> None:
headers = dict(HTTP_USER_AGENT="ZulipElectron/5.0.0")
result = self.client_get('/accounts/login/start/sso/', **headers)
self.verify_desktop_flow_app_page(result)
def test_login_success(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_get('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_success_with_sso_append_domain(self) -> None:
username = 'hamlet'
user_profile = self.example_user('hamlet')
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',),
SSO_APPEND_DOMAIN='zulip.com'):
result = self.client_get('/accounts/login/sso/', REMOTE_USER=username)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_failure(self) -> None:
email = self.example_email("hamlet")
result = self.client_get('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
self.assert_in_response("Authentication via the REMOTE_USER header is", result)
self.assert_logged_in_user_id(None)
def test_login_failure_due_to_nonexisting_user(self) -> None:
email = 'nonexisting@zulip.com'
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_get('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assert_logged_in_user_id(None)
self.assert_in_response("No account found for", result)
def test_login_failure_due_to_invalid_email(self) -> None:
email = 'hamlet'
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_get('/accounts/login/sso/', REMOTE_USER=email)
self.assert_json_error_contains(result, "Enter a valid email address.", 400)
def test_login_failure_due_to_missing_field(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_get('/accounts/login/sso/')
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
self.assert_in_response("The REMOTE_USER header is not set.", result)
def test_login_failure_due_to_wrong_subdomain(self) -> None:
email = self.example_email("hamlet")
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
result = self.client_get('http://testserver:9080/accounts/login/sso/',
REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assert_logged_in_user_id(None)
self.assert_in_response("You need an invitation to join this organization.", result)
def test_login_failure_due_to_empty_subdomain(self) -> None:
email = self.example_email("hamlet")
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
result = self.client_get('http://testserver:9080/accounts/login/sso/',
REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assert_logged_in_user_id(None)
self.assert_in_response("You need an invitation to join this organization.", result)
def test_login_success_under_subdomains(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
with self.settings(
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_get('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_login_mobile_flow_otp_success_email(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
mobile_flow_otp = '1234abcd' * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get('/accounts/login/sso/',
dict(mobile_flow_otp="1234"),
REMOTE_USER=email,
HTTP_USER_AGENT = "ZulipAndroid")
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(mobile_flow_otp="invalido" * 8),
REMOTE_USER=email,
HTTP_USER_AGENT = "ZulipAndroid")
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(mobile_flow_otp=mobile_flow_otp),
REMOTE_USER=email,
HTTP_USER_AGENT = "ZulipAndroid")
self.assertEqual(result.status_code, 302)
redirect_url = result['Location']
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, 'zulip')
self.assertEqual(query_params["realm"], ['http://zulip.testserver'])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user('hamlet'))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Zulip on Android', mail.outbox[0].body)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(SSO_APPEND_DOMAIN="zulip.com")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_login_mobile_flow_otp_success_username(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
remote_user = email_to_username(email)
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
mobile_flow_otp = '1234abcd' * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get('/accounts/login/sso/',
dict(mobile_flow_otp="1234"),
REMOTE_USER=remote_user,
HTTP_USER_AGENT = "ZulipAndroid")
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(mobile_flow_otp="invalido" * 8),
REMOTE_USER=remote_user,
HTTP_USER_AGENT = "ZulipAndroid")
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(mobile_flow_otp=mobile_flow_otp),
REMOTE_USER=remote_user,
HTTP_USER_AGENT = "ZulipAndroid")
self.assertEqual(result.status_code, 302)
redirect_url = result['Location']
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, 'zulip')
self.assertEqual(query_params["realm"], ['http://zulip.testserver'])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
hamlet_api_keys = get_all_api_keys(self.example_user('hamlet'))
self.assertIn(otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp), hamlet_api_keys)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Zulip on Android', mail.outbox[0].body)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',
'zproject.backends.ZulipDummyBackend'))
def test_login_desktop_flow_otp_success_email(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
desktop_flow_otp = '1234abcd' * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get('/accounts/login/sso/',
dict(desktop_flow_otp="1234"),
REMOTE_USER=email)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(desktop_flow_otp="invalido" * 8),
REMOTE_USER=email)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(desktop_flow_otp=desktop_flow_otp),
REMOTE_USER=email)
self.verify_desktop_flow_end_page(result, email, desktop_flow_otp)
@override_settings(SEND_LOGIN_EMAILS=True)
@override_settings(SSO_APPEND_DOMAIN="zulip.com")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',
'zproject.backends.ZulipDummyBackend'))
def test_login_desktop_flow_otp_success_username(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
remote_user = email_to_username(email)
user_profile.date_joined = timezone_now() - datetime.timedelta(seconds=61)
user_profile.save()
desktop_flow_otp = '1234abcd' * 8
# Verify that the right thing happens with an invalid-format OTP
result = self.client_get('/accounts/login/sso/',
dict(desktop_flow_otp="1234"),
REMOTE_USER=remote_user)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(desktop_flow_otp="invalido" * 8),
REMOTE_USER=remote_user)
self.assert_logged_in_user_id(None)
self.assert_json_error_contains(result, "Invalid OTP", 400)
result = self.client_get('/accounts/login/sso/',
dict(desktop_flow_otp=desktop_flow_otp),
REMOTE_USER=remote_user)
self.verify_desktop_flow_end_page(result, email, desktop_flow_otp)
def test_redirect_to(self) -> None:
"""This test verifies the behavior of the redirect_to logic in
login_or_register_remote_user."""
def test_with_redirect_to_param_set_as_next(next: str='') -> HttpResponse:
user_profile = self.example_user('hamlet')
email = user_profile.delivery_email
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_get('/accounts/login/sso/?next=' + next, REMOTE_USER=email)
return result
res = test_with_redirect_to_param_set_as_next()
self.assertEqual('http://zulip.testserver', res.url)
res = test_with_redirect_to_param_set_as_next('/user_uploads/image_path')
self.assertEqual('http://zulip.testserver/user_uploads/image_path', res.url)
# Third-party domains are rejected and just send you to root domain
res = test_with_redirect_to_param_set_as_next('https://rogue.zulip-like.server/login')
self.assertEqual('http://zulip.testserver', res.url)
# In SSO based auth we never make browser send the hash to the backend.
# Rather we depend upon the browser's behaviour of persisting hash anchors
# in between redirect requests. See below stackoverflow conversation
# https://stackoverflow.com/questions/5283395/url-hash-is-persisting-between-redirects
res = test_with_redirect_to_param_set_as_next('#narrow/stream/7-test-here')
self.assertEqual('http://zulip.testserver', res.url)
class TestJWTLogin(ZulipTestCase):
"""
JWT uses ZulipDummyBackend.
"""
def test_login_success(self) -> None:
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': {'key': 'key', 'algorithms': ['HS256']}}):
email = self.example_email("hamlet")
realm = get_realm('zulip')
key = settings.JWT_AUTH_KEYS['zulip']['key']
[algorithm] = settings.JWT_AUTH_KEYS['zulip']['algorithms']
web_token = jwt.encode(payload, key, algorithm).decode('utf8')
user_profile = get_user_by_delivery_email(email, realm)
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_login_failure_when_user_is_missing(self) -> None:
payload = {'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': {'key': 'key', 'algorithms': ['HS256']}}):
key = settings.JWT_AUTH_KEYS['zulip']['key']
[algorithm] = settings.JWT_AUTH_KEYS['zulip']['algorithms']
web_token = jwt.encode(payload, key, algorithm).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No user specified in JSON web token claims", 400)
def test_login_failure_when_realm_is_missing(self) -> None:
payload = {'user': 'hamlet'}
with self.settings(JWT_AUTH_KEYS={'zulip': {'key': 'key', 'algorithms': ['HS256']}}):
key = settings.JWT_AUTH_KEYS['zulip']['key']
[algorithm] = settings.JWT_AUTH_KEYS['zulip']['algorithms']
web_token = jwt.encode(payload, key, algorithm).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No organization specified in JSON web token claims", 400)
def test_login_failure_when_key_does_not_exist(self) -> None:
data = {'json_web_token': 'not relevant'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Auth key for this subdomain not found.", 400)
def test_login_failure_when_key_is_missing(self) -> None:
with self.settings(JWT_AUTH_KEYS={'zulip': {'key': 'key', 'algorithms': ['HS256']}}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
def test_login_failure_when_bad_token_is_passed(self) -> None:
with self.settings(JWT_AUTH_KEYS={'zulip': {'key': 'key', 'algorithms': ['HS256']}}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
data = {'json_web_token': 'bad token'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Bad JSON web token", 400)
def test_login_failure_when_user_does_not_exist(self) -> None:
payload = {'user': 'nonexisting', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': {'key': 'key', 'algorithms': ['HS256']}}):
key = settings.JWT_AUTH_KEYS['zulip']['key']
[algorithm] = settings.JWT_AUTH_KEYS['zulip']['algorithms']
web_token = jwt.encode(payload, key, algorithm).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assert_logged_in_user_id(None)
def test_login_failure_due_to_wrong_subdomain(self) -> None:
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'acme': {'key': 'key', 'algorithms': ['HS256']}}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'), \
mock.patch('logging.warning'):
key = settings.JWT_AUTH_KEYS['acme']['key']
[algorithm] = settings.JWT_AUTH_KEYS['acme']['algorithms']
web_token = jwt.encode(payload, key, algorithm).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assert_logged_in_user_id(None)
def test_login_failure_due_to_empty_subdomain(self) -> None:
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': {'key': 'key', 'algorithms': ['HS256']}}):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''), \
mock.patch('logging.warning'):
key = settings.JWT_AUTH_KEYS['']['key']
[algorithm] = settings.JWT_AUTH_KEYS['']['algorithms']
web_token = jwt.encode(payload, key, algorithm).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assert_logged_in_user_id(None)
def test_login_success_under_subdomains(self) -> None:
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': {'key': 'key', 'algorithms': ['HS256']}}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
key = settings.JWT_AUTH_KEYS['zulip']['key']
[algorithm] = settings.JWT_AUTH_KEYS['zulip']['algorithms']
web_token = jwt.encode(payload, key, algorithm).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
user_profile = self.example_user('hamlet')
self.assert_logged_in_user_id(user_profile.id)
class DjangoToLDAPUsernameTests(ZulipTestCase):
def setUp(self) -> None:
self.init_default_ldap_database()
self.backend = ZulipLDAPAuthBackend()
def test_django_to_ldap_username_with_append_domain(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
self.assertEqual(self.backend.django_to_ldap_username("hamlet"), "hamlet")
self.assertEqual(self.backend.django_to_ldap_username("hamlet@zulip.com"), "hamlet")
with self.assertRaisesRegex(ZulipLDAPExceptionOutsideDomain,
'Email hamlet@example.com does not match LDAP domain zulip.com.'):
self.backend.django_to_ldap_username("hamlet@example.com")
self.mock_ldap.directory['uid="hamlet@test",ou=users,dc=zulip,dc=com'] = {
"cn": ["King Hamlet"],
"uid": ['"hamlet@test"'],
}
username = self.backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"')
self.mock_ldap.directory['uid="hamlet@test"@zulip,ou=users,dc=zulip,dc=com'] = {
"cn": ["King Hamlet"],
"uid": ['"hamlet@test"@zulip'],
}
username = self.backend.django_to_ldap_username('"hamlet@test"@zulip')
self.assertEqual(username, '"hamlet@test"@zulip')
def test_django_to_ldap_username_with_email_search(self) -> None:
self.assertEqual(self.backend.django_to_ldap_username("hamlet"),
self.ldap_username("hamlet"))
self.assertEqual(self.backend.django_to_ldap_username("hamlet@zulip.com"),
self.ldap_username("hamlet"))
# If there are no matches through the email search, raise exception:
with self.assertRaises(ZulipLDAPExceptionNoMatchingLDAPUser):
self.backend.django_to_ldap_username("no_such_email@example.com")
self.assertEqual(self.backend.django_to_ldap_username("aaron@zulip.com"),
self.ldap_username("aaron"))
with mock.patch("zproject.backends.logging.warning") as mock_warn:
with self.assertRaises(ZulipLDAPExceptionNoMatchingLDAPUser):
self.backend.django_to_ldap_username("shared_email@zulip.com")
mock_warn.assert_called_with("Multiple users with email %s found in LDAP.", "shared_email@zulip.com")
# Test on a weird case of a user whose uid is an email and his actual "mail"
# attribute is a different email address:
self.mock_ldap.directory['uid=some_user@organization_a.com,ou=users,dc=zulip,dc=com'] = {
"cn": ["Some User"],
"uid": ['some_user@organization_a.com'],
"mail": ["some_user@contactaddress.com"],
}
self.assertEqual(self.backend.django_to_ldap_username("some_user@contactaddress.com"),
"some_user@organization_a.com")
self.assertEqual(self.backend.django_to_ldap_username("some_user@organization_a.com"),
"some_user@organization_a.com")
# Configure email search for emails in the uid attribute:
with self.settings(AUTH_LDAP_REVERSE_EMAIL_SEARCH=LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL,
"(uid=%(email)s)")):
self.assertEqual(self.backend.django_to_ldap_username("newuser_email_as_uid@zulip.com"),
"newuser_email_as_uid@zulip.com")
self.mock_ldap.directory['uid="hamlet@test"@zulip.com",ou=users,dc=zulip,dc=com'] = {
"cn": ["King Hamlet"],
"uid": ['"hamlet@test"@zulip.com'],
}
username = self.backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend'))
def test_authenticate_to_ldap_via_email(self) -> None:
"""
With AUTH_LDAP_REVERSE_EMAIL_SEARCH configured, django_to_ldap_username
should be able to translate an email to ldap username,
and thus it should be possible to authenticate through user_profile.delivery_email.
"""
realm = get_realm("zulip")
user_profile = self.example_user("hamlet")
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
with self.settings(LDAP_EMAIL_ATTR='mail'):
self.assertEqual(
authenticate(request=mock.MagicMock(), username=user_profile.delivery_email,
password=self.ldap_password('hamlet'), realm=realm),
user_profile)
@override_settings(LDAP_EMAIL_ATTR='mail', LDAP_DEACTIVATE_NON_MATCHING_USERS=True)
def test_sync_user_from_ldap_with_email_attr(self) -> None:
"""In LDAP configurations with LDAP_EMAIL_ATTR configured and
LDAP_DEACTIVATE_NON_MATCHING_USERS set, a possible failure
mode if django_to_ldap_username isn't configured correctly is
all LDAP users having their accounts deactivated. Before the
introduction of AUTH_LDAP_REVERSE_EMAIL_SEARCH, this would happen
even in valid LDAP configurations using LDAP_EMAIL_ATTR.
This test confirms that such a failure mode doesn't happen with
a valid LDAP configuration.
"""
user_profile = self.example_user("hamlet")
with self.settings():
sync_user_from_ldap(user_profile, mock.Mock())
# Syncing didn't deactivate the user:
self.assertTrue(user_profile.is_active)
class ZulipLDAPTestCase(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.init_default_ldap_database()
user_profile = self.example_user('hamlet')
self.setup_subdomain(user_profile)
self.backend = ZulipLDAPAuthBackend()
# Internally `_realm` and `_prereg_user` attributes are automatically set
# by the `authenticate()` method. But for testing the `get_or_build_user()`
# method separately, we need to set them manually.
self.backend._realm = get_realm('zulip')
self.backend._prereg_user = None
def setup_subdomain(self, user_profile: UserProfile) -> None:
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
class TestLDAP(ZulipLDAPTestCase):
def test_generate_dev_ldap_dir(self) -> None:
ldap_dir = generate_dev_ldap_dir('A', 10)
self.assertEqual(len(ldap_dir), 10)
regex = re.compile(r'(uid\=)+[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+(\,ou\=users\,dc\=zulip\,dc\=com)')
common_attrs = ['cn', 'userPassword', 'phoneNumber', 'birthDate']
for key, value in ldap_dir.items():
self.assertTrue(regex.match(key))
self.assertCountEqual(list(value.keys()), common_attrs + ['uid', 'thumbnailPhoto', 'userAccountControl'])
ldap_dir = generate_dev_ldap_dir('b', 9)
self.assertEqual(len(ldap_dir), 9)
regex = re.compile(r'(uid\=)+[a-zA-Z0-9_.+-]+(\,ou\=users\,dc\=zulip\,dc\=com)')
for key, value in ldap_dir.items():
self.assertTrue(regex.match(key))
self.assertCountEqual(list(value.keys()), common_attrs + ['uid', 'jpegPhoto'])
ldap_dir = generate_dev_ldap_dir('c', 8)
self.assertEqual(len(ldap_dir), 8)
regex = re.compile(r'(uid\=)+[a-zA-Z0-9_.+-]+(\,ou\=users\,dc\=zulip\,dc\=com)')
for key, value in ldap_dir.items():
self.assertTrue(regex.match(key))
self.assertCountEqual(list(value.keys()), common_attrs + ['uid', 'email'])
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_dev_ldap_fail_login(self) -> None:
# Tests that login with a substring of password fails. We had a bug in
# dev LDAP environment that allowed login via password substrings.
self.mock_ldap.directory = generate_dev_ldap_dir('B', 8)
with self.settings(
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL, "(uid=%(user)s)"),
AUTH_LDAP_REVERSE_EMAIL_SEARCH = LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL, "(email=%(email)s)"),
LDAP_APPEND_DOMAIN='zulip.com',
):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username='ldapuser1', password='dapu',
realm=get_realm('zulip'))
assert(user_profile is None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password('hamlet'),
realm=get_realm('zulip'))
assert(user_profile is not None)
self.assertEqual(user_profile.delivery_email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_username(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username="hamlet", password=self.ldap_password('hamlet'),
realm=get_realm('zulip'))
assert(user_profile is not None)
self.assertEqual(user_profile, self.example_user("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_email_attr(self) -> None:
with self.settings(LDAP_EMAIL_ATTR='mail'):
username = self.ldap_username("aaron")
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username=username,
password=self.ldap_password(username),
realm=get_realm('zulip'))
assert (user_profile is not None)
self.assertEqual(user_profile, self.example_user("aaron"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend'))
def test_email_and_ldap_backends_together(self) -> None:
with self.settings(
LDAP_EMAIL_ATTR='mail',
AUTH_LDAP_REVERSE_EMAIL_SEARCH = LDAPSearch("ou=users,dc=zulip,dc=com",
ldap.SCOPE_ONELEVEL,
"(mail=%(email)s)"),
AUTH_LDAP_USERNAME_ATTR = "uid",
):
realm = get_realm('zulip')
self.assertEqual(email_belongs_to_ldap(realm, self.example_email("aaron")), True)
username = self.ldap_username("aaron")
user_profile = ZulipLDAPAuthBackend().authenticate(request=mock.MagicMock(),
username=username,
password=self.ldap_password(username),
realm=realm)
self.assertEqual(user_profile, self.example_user("aaron"))
othello = self.example_user('othello')
password = "testpassword"
othello.set_password(password)
othello.save()
self.assertEqual(email_belongs_to_ldap(realm, othello.delivery_email), False)
user_profile = EmailAuthBackend().authenticate(request=mock.MagicMock(),
username=othello.delivery_email,
password=password,
realm=realm)
self.assertEqual(user_profile, othello)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_password(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
user = self.backend.authenticate(request=mock.MagicMock(),
username=self.example_email("hamlet"), password='wrong',
realm=get_realm('zulip'))
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_nonexistent_user(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
user = self.backend.authenticate(request=mock.MagicMock(),
username='nonexistent@zulip.com',
password="doesnt_matter",
realm=get_realm('zulip'))
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_permissions(self) -> None:
backend = self.backend
self.assertFalse(backend.has_perm(None, None))
self.assertFalse(backend.has_module_perms(None, None))
self.assertTrue(backend.get_all_permissions(None, None) == set())
self.assertTrue(backend.get_group_permissions(None, None) == set())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_user_email_from_ldapuser_with_append_domain(self) -> None:
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.user_email_from_ldapuser('this_argument_is_ignored',
_LDAPUser(self.backend, username='"hamlet@test"'))
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_build_user_when_user_exists(self) -> None:
class _LDAPUser:
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
backend = self.backend
email = self.example_email("hamlet")
user_profile, created = backend.get_or_build_user(str(email), _LDAPUser())
self.assertFalse(created)
self.assertEqual(user_profile.delivery_email, email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_build_user_when_user_does_not_exist(self) -> None:
class _LDAPUser:
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'newuser@zulip.com'
user_profile, created = backend.get_or_build_user(email, _LDAPUser())
self.assertTrue(created)
self.assertEqual(user_profile.delivery_email, email)
self.assertEqual(user_profile.full_name, 'Full Name')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_build_user_when_user_has_invalid_name(self) -> None:
class _LDAPUser:
attrs = {'fn': ['<invalid name>'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
with self.assertRaisesRegex(Exception, "Invalid characters in name!"):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_build_user_when_realm_is_deactivated(self) -> None:
class _LDAPUser:
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
do_deactivate_realm(backend._realm)
with self.assertRaisesRegex(Exception, 'Realm has been deactivated'):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_build_user_when_ldap_has_no_email_attr(self) -> None:
class _LDAPUser:
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
nonexisting_attr = 'email'
with self.settings(LDAP_EMAIL_ATTR=nonexisting_attr):
backend = self.backend
email = 'nonexisting@zulip.com'
with self.assertRaisesRegex(Exception, 'LDAP user doesn\'t have the needed email attribute'):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_build_user_email(self) -> None:
class _LDAPUser:
attrs = {'fn': ['Test User']}
ldap_user_attr_map = {'full_name': 'fn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
realm = self.backend._realm
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
email = 'spam@mailnator.com'
with self.assertRaisesRegex(ZulipLDAPException, 'Email validation failed.'):
self.backend.get_or_build_user(email, _LDAPUser())
realm.emails_restricted_to_domains = True
realm.save(update_fields=['emails_restricted_to_domains'])
email = 'spam+spam@mailnator.com'
with self.assertRaisesRegex(ZulipLDAPException, 'Email validation failed.'):
self.backend.get_or_build_user(email, _LDAPUser())
email = 'spam@acme.com'
with self.assertRaisesRegex(ZulipLDAPException, "This email domain isn't allowed in this organization."):
self.backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_build_user_when_ldap_has_no_full_name_mapping(self) -> None:
class _LDAPUser:
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
with self.settings(AUTH_LDAP_USER_ATTR_MAP={}):
backend = self.backend
email = 'nonexisting@zulip.com'
with self.assertRaisesRegex(Exception, "Missing required mapping for user's full name"):
backend.get_or_build_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_when_domain_does_not_match(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN='acme.com'):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password('hamlet'),
realm=get_realm('zulip'))
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_different_subdomain(self) -> None:
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
Realm.objects.create(string_id='acme')
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username=self.example_email('hamlet'),
password=self.ldap_password('hamlet'),
realm=get_realm('acme'))
self.assertEqual(user_profile.delivery_email, self.example_email('hamlet'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_valid_subdomain(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password('hamlet'),
realm=get_realm('zulip'))
assert(user_profile is not None)
self.assertEqual(user_profile.delivery_email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
do_deactivate_user(user_profile)
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username=self.example_email("hamlet"),
password=self.ldap_password('hamlet'),
realm=get_realm('zulip'))
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
@override_settings(AUTH_LDAP_USER_ATTR_MAP={
"full_name": "cn",
"avatar": "jpegPhoto",
})
def test_login_success_when_user_does_not_exist_with_valid_subdomain(self) -> None:
RealmDomain.objects.create(realm=self.backend._realm, domain='acme.com')
with self.settings(LDAP_APPEND_DOMAIN='acme.com'):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username='newuser@acme.com',
password=self.ldap_password("newuser"),
realm=get_realm('zulip'))
assert(user_profile is not None)
self.assertEqual(user_profile.delivery_email, 'newuser@acme.com')
self.assertEqual(user_profile.full_name, 'New LDAP fullname')
self.assertEqual(user_profile.realm.string_id, 'zulip')
# Verify avatar gets created
self.assertEqual(user_profile.avatar_source, UserProfile.AVATAR_FROM_USER)
result = self.client_get(avatar_url(user_profile))
self.assertEqual(result.status_code, 200)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_user_does_not_exist_with_split_full_name_mapping(self) -> None:
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP={'first_name': 'sn', 'last_name': 'cn'}):
user_profile = self.backend.authenticate(request=mock.MagicMock(),
username='newuser_splitname@zulip.com',
password=self.ldap_password("newuser_splitname"),
realm=get_realm('zulip'))
assert(user_profile is not None)
self.assertEqual(user_profile.delivery_email, 'newuser_splitname@zulip.com')
self.assertEqual(user_profile.full_name, 'First Last')
self.assertEqual(user_profile.realm.string_id, 'zulip')
class TestZulipLDAPUserPopulator(ZulipLDAPTestCase):
def test_authenticate(self) -> None:
backend = ZulipLDAPUserPopulator()
result = backend.authenticate(username=self.example_email("hamlet"),
password=self.ldap_password("hamlet"),
realm=get_realm('zulip'))
self.assertIs(result, None)
def perform_ldap_sync(self, user_profile: UserProfile) -> None:
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
result = sync_user_from_ldap(user_profile, mock.Mock())
self.assertTrue(result)
@mock.patch("zproject.backends.do_deactivate_user")
def test_ldaperror_doesnt_deactivate_user(self, mock_deactivate: mock.MagicMock) -> None:
"""
This is a test for a bug where failure to connect to LDAP in sync_user_from_ldap
(e.g. due to invalid credentials) would cause the user to be deactivated if
LDAP_DEACTIVATE_NON_MATCHING_USERS was True.
Details: https://github.com/zulip/zulip/issues/13130
"""
with self.settings(
LDAP_DEACTIVATE_NON_MATCHING_USERS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='wrongpass'):
with self.assertRaises(ldap.INVALID_CREDENTIALS):
sync_user_from_ldap(self.example_user('hamlet'), mock.Mock())
mock_deactivate.assert_not_called()
# Make sure other types of LDAPError won't cause deactivation either:
with mock.patch.object(_LDAPUser, '_get_or_create_user', side_effect=ldap.LDAPError):
with self.assertRaises(PopulateUserLDAPError):
sync_user_from_ldap(self.example_user('hamlet'), mock.Mock())
mock_deactivate.assert_not_called()
@override_settings(LDAP_EMAIL_ATTR="mail")
def test_populate_user_returns_none(self) -> None:
with mock.patch.object(ZulipLDAPUser, 'populate_user', return_value=None):
with self.assertRaises(PopulateUserLDAPError):
sync_user_from_ldap(self.example_user('hamlet'), mock.Mock())
def test_update_full_name(self) -> None:
self.change_ldap_user_attr('hamlet', 'cn', 'New Name')
self.perform_ldap_sync(self.example_user('hamlet'))
hamlet = self.example_user('hamlet')
self.assertEqual(hamlet.full_name, 'New Name')
def test_update_with_hidden_emails(self) -> None:
hamlet = self.example_user('hamlet')
realm = get_realm("zulip")
do_set_realm_property(realm, 'email_address_visibility', Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
hamlet.refresh_from_db()
self.change_ldap_user_attr('hamlet', 'cn', 'New Name')
self.perform_ldap_sync(hamlet)
hamlet.refresh_from_db()
self.assertEqual(hamlet.full_name, 'New Name')
def test_update_split_full_name(self) -> None:
self.change_ldap_user_attr('hamlet', 'cn', 'Name')
self.change_ldap_user_attr('hamlet', 'sn', 'Full')
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'first_name': 'sn',
'last_name': 'cn'}):
self.perform_ldap_sync(self.example_user('hamlet'))
hamlet = self.example_user('hamlet')
self.assertEqual(hamlet.full_name, 'Full Name')
def test_same_full_name(self) -> None:
with mock.patch('zerver.lib.actions.do_change_full_name') as fn:
self.perform_ldap_sync(self.example_user('hamlet'))
fn.assert_not_called()
def test_too_short_name(self) -> None:
self.change_ldap_user_attr('hamlet', 'cn', 'a')
with self.assertRaises(ZulipLDAPException):
self.perform_ldap_sync(self.example_user('hamlet'))
def test_deactivate_user(self) -> None:
self.change_ldap_user_attr('hamlet', 'userAccountControl', '2')
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'userAccountControl': 'userAccountControl'}):
self.perform_ldap_sync(self.example_user('hamlet'))
hamlet = self.example_user('hamlet')
self.assertFalse(hamlet.is_active)
@mock.patch("zproject.backends.ZulipLDAPAuthBackendBase.sync_full_name_from_ldap")
def test_dont_sync_disabled_ldap_user(self, fake_sync: mock.MagicMock) -> None:
self.change_ldap_user_attr('hamlet', 'userAccountControl', '2')
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'userAccountControl': 'userAccountControl'}):
self.perform_ldap_sync(self.example_user('hamlet'))
fake_sync.assert_not_called()
def test_reactivate_user(self) -> None:
do_deactivate_user(self.example_user('hamlet'))
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'userAccountControl': 'userAccountControl'}):
self.perform_ldap_sync(self.example_user('hamlet'))
hamlet = self.example_user('hamlet')
self.assertTrue(hamlet.is_active)
def test_user_in_multiple_realms(self) -> None:
test_realm = do_create_realm('test', 'test', False)
hamlet = self.example_user('hamlet')
email = hamlet.delivery_email
hamlet2 = do_create_user(
email,
None,
test_realm,
hamlet.full_name,
hamlet.short_name)
self.change_ldap_user_attr('hamlet', 'cn', 'Second Hamlet')
expected_call_args = [hamlet2, 'Second Hamlet', None]
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn'}):
with mock.patch('zerver.lib.actions.do_change_full_name') as f:
self.perform_ldap_sync(hamlet2)
f.assert_called_once_with(*expected_call_args)
# Get the updated model and make sure the full name is changed correctly:
hamlet2 = get_user_by_delivery_email(email, test_realm)
self.assertEqual(hamlet2.full_name, "Second Hamlet")
# Now get the original hamlet and make he still has his name unchanged:
hamlet = self.example_user('hamlet')
self.assertEqual(hamlet.full_name, "King Hamlet")
def test_user_not_found_in_ldap(self) -> None:
with self.settings(
LDAP_DEACTIVATE_NON_MATCHING_USERS=False,
LDAP_APPEND_DOMAIN='zulip.com'):
othello = self.example_user("othello") # othello isn't in our test directory
mock_logger = mock.MagicMock()
result = sync_user_from_ldap(othello, mock_logger)
mock_logger.warning.assert_called_once_with(
"Did not find %s in LDAP.", othello.delivery_email)
self.assertFalse(result)
do_deactivate_user(othello)
mock_logger = mock.MagicMock()
result = sync_user_from_ldap(othello, mock_logger)
self.assertEqual(mock_logger.method_calls, []) # In this case the logger shouldn't be used.
self.assertFalse(result)
def test_update_user_avatar(self) -> None:
# Hamlet has jpegPhoto set in our test directory by default.
with mock.patch('zerver.lib.upload.upload_avatar_image') as fn, \
self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'avatar': 'jpegPhoto'}):
self.perform_ldap_sync(self.example_user('hamlet'))
fn.assert_called_once()
hamlet = self.example_user('hamlet')
self.assertEqual(hamlet.avatar_source, UserProfile.AVATAR_FROM_USER)
# Verify that the next time we do an LDAP sync, we don't
# end up updating this user's avatar again if the LDAP
# data hasn't changed.
self.perform_ldap_sync(self.example_user('hamlet'))
fn.assert_called_once()
# Now verify that if we do change the jpegPhoto image, we
# will upload a new avatar.
self.change_ldap_user_attr('hamlet', 'jpegPhoto', static_path("images/logo/zulip-icon-512x512.png"),
binary=True)
with mock.patch('zerver.lib.upload.upload_avatar_image') as fn, \
self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'avatar': 'jpegPhoto'}):
self.perform_ldap_sync(self.example_user('hamlet'))
fn.assert_called_once()
hamlet = self.example_user('hamlet')
self.assertEqual(hamlet.avatar_source, UserProfile.AVATAR_FROM_USER)
@use_s3_backend
def test_update_user_avatar_for_s3(self) -> None:
bucket = create_s3_buckets(settings.S3_AVATAR_BUCKET)[0]
test_image_file = get_test_image_file('img.png').name
with open(test_image_file, 'rb') as f:
test_image_data = f.read()
self.change_ldap_user_attr('hamlet', 'jpegPhoto', test_image_data)
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'avatar': 'jpegPhoto'}):
self.perform_ldap_sync(self.example_user('hamlet'))
hamlet = self.example_user('hamlet')
path_id = user_avatar_path(hamlet)
original_image_path_id = path_id + ".original"
medium_path_id = path_id + "-medium.png"
original_image_key = bucket.Object(original_image_path_id)
medium_image_key = bucket.Object(medium_path_id)
image_data = original_image_key.get()['Body'].read()
self.assertEqual(image_data, test_image_data)
test_medium_image_data = resize_avatar(test_image_data, MEDIUM_AVATAR_SIZE)
medium_image_data = medium_image_key.get()['Body'].read()
self.assertEqual(medium_image_data, test_medium_image_data)
# Try to use invalid data as the image:
self.change_ldap_user_attr('hamlet', 'jpegPhoto', b'00' + test_image_data)
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'avatar': 'jpegPhoto'}):
with mock.patch('logging.warning') as mock_warning:
self.perform_ldap_sync(self.example_user('hamlet'))
mock_warning.assert_called_once_with(
'Could not parse %s field for user %s', 'jpegPhoto', hamlet.id,
)
def test_deactivate_non_matching_users(self) -> None:
with self.settings(LDAP_APPEND_DOMAIN='zulip.com',
LDAP_DEACTIVATE_NON_MATCHING_USERS=True):
# othello isn't in our test directory
result = sync_user_from_ldap(self.example_user('othello'), mock.Mock())
self.assertTrue(result)
othello = self.example_user('othello')
self.assertFalse(othello.is_active)
def test_update_custom_profile_field(self) -> None:
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'custom_profile_field__phone_number': 'homePhone',
'custom_profile_field__birthday': 'birthDate'}):
self.perform_ldap_sync(self.example_user('hamlet'))
hamlet = self.example_user('hamlet')
test_data = [
{
'field_name': 'Phone number',
'expected_value': '123456789',
},
{
'field_name': 'Birthday',
'expected_value': '1900-09-08',
},
]
for test_case in test_data:
field = CustomProfileField.objects.get(realm=hamlet.realm, name=test_case['field_name'])
field_value = CustomProfileFieldValue.objects.get(user_profile=hamlet, field=field).value
self.assertEqual(field_value, test_case['expected_value'])
def test_update_non_existent_profile_field(self) -> None:
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'custom_profile_field__non_existent': 'homePhone'}):
with self.assertRaisesRegex(ZulipLDAPException, 'Custom profile field with name non_existent not found'):
self.perform_ldap_sync(self.example_user('hamlet'))
def test_update_custom_profile_field_invalid_data(self) -> None:
self.change_ldap_user_attr('hamlet', 'birthDate', '9999')
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'custom_profile_field__birthday': 'birthDate'}):
with self.assertRaisesRegex(ZulipLDAPException, 'Invalid data for birthday field'):
self.perform_ldap_sync(self.example_user('hamlet'))
def test_update_custom_profile_field_no_mapping(self) -> None:
hamlet = self.example_user('hamlet')
no_op_field = CustomProfileField.objects.get(realm=hamlet.realm, name='Phone number')
expected_value = CustomProfileFieldValue.objects.get(user_profile=hamlet, field=no_op_field).value
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'custom_profile_field__birthday': 'birthDate'}):
self.perform_ldap_sync(self.example_user('hamlet'))
actual_value = CustomProfileFieldValue.objects.get(user_profile=hamlet, field=no_op_field).value
self.assertEqual(actual_value, expected_value)
def test_update_custom_profile_field_no_update(self) -> None:
hamlet = self.example_user('hamlet')
phone_number_field = CustomProfileField.objects.get(realm=hamlet.realm, name='Phone number')
birthday_field = CustomProfileField.objects.get(realm=hamlet.realm, name='Birthday')
phone_number_field_value = CustomProfileFieldValue.objects.get(user_profile=hamlet,
field=phone_number_field)
phone_number_field_value.value = '123456789'
phone_number_field_value.save(update_fields=['value'])
expected_call_args = [hamlet, [
{
'id': birthday_field.id,
'value': '1900-09-08',
},
]]
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'custom_profile_field__birthday': 'birthDate',
'custom_profile_field__phone_number': 'homePhone'}):
with mock.patch('zproject.backends.do_update_user_custom_profile_data_if_changed') as f:
self.perform_ldap_sync(self.example_user('hamlet'))
f.assert_called_once_with(*expected_call_args)
def test_update_custom_profile_field_not_present_in_ldap(self) -> None:
hamlet = self.example_user('hamlet')
no_op_field = CustomProfileField.objects.get(realm=hamlet.realm, name='Birthday')
expected_value = CustomProfileFieldValue.objects.get(user_profile=hamlet, field=no_op_field).value
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'custom_profile_field__birthday': 'nonExistantAttr'}):
self.perform_ldap_sync(self.example_user('hamlet'))
actual_value = CustomProfileFieldValue.objects.get(user_profile=hamlet, field=no_op_field).value
self.assertEqual(actual_value, expected_value)
class TestQueryLDAP(ZulipLDAPTestCase):
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',))
def test_ldap_not_configured(self) -> None:
values = query_ldap(self.example_email('hamlet'))
self.assertEqual(values, ['LDAP backend not configured on this server.'])
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_user_not_present(self) -> None:
# othello doesn't have an entry in our test directory
values = query_ldap(self.example_email('othello'))
self.assert_length(values, 1)
self.assertIn('No such user found', values[0])
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_normal_query(self) -> None:
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'short_name': 'sn',
'avatar': 'jpegPhoto',
'custom_profile_field__birthday': 'birthDate',
'custom_profile_field__phone_number': 'nonExistentAttr',
}):
values = query_ldap(self.example_email('hamlet'))
self.assertEqual(len(values), 5)
self.assertIn('full_name: King Hamlet', values)
self.assertIn('short_name: Hamlet', values)
self.assertIn('avatar: (An avatar image file)', values)
self.assertIn('custom_profile_field__birthday: 1900-09-08', values)
self.assertIn('custom_profile_field__phone_number: LDAP field not present', values)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_query_email_attr(self) -> None:
with self.settings(AUTH_LDAP_USER_ATTR_MAP={'full_name': 'cn',
'short_name': 'sn'},
LDAP_EMAIL_ATTR='mail'):
# This will look up the user by email in our test dictionary,
# should successfully find hamlet's ldap entry.
values = query_ldap(self.example_email('hamlet'))
self.assertEqual(len(values), 3)
self.assertIn('full_name: King Hamlet', values)
self.assertIn('short_name: Hamlet', values)
self.assertIn('email: hamlet@zulip.com', values)
class TestZulipAuthMixin(ZulipTestCase):
def test_get_user(self) -> None:
backend = ZulipAuthMixin()
result = backend.get_user(11111)
self.assertIs(result, None)
class TestPasswordAuthEnabled(ZulipTestCase):
def test_password_auth_enabled_for_ldap(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',)):
realm = Realm.objects.get(string_id='zulip')
self.assertTrue(password_auth_enabled(realm))
class TestRequireEmailFormatUsernames(ZulipTestCase):
def test_require_email_format_usernames_for_ldap_with_append_domain(
self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
LDAP_APPEND_DOMAIN="zulip.com"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_ldap_with_email_attr(
self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
LDAP_EMAIL_ATTR="email"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_only(self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
realm = Realm.objects.get(string_id='zulip')
self.assertTrue(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_and_ldap_with_email_attr(
self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend'),
LDAP_EMAIL_ATTR="email"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_and_ldap_with_append_email(
self) -> None:
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend'),
LDAP_APPEND_DOMAIN="zulip.com"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
class TestMaybeSendToRegistration(ZulipTestCase):
def test_sso_only_when_preregistration_user_does_not_exist(self) -> None:
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form:
def is_valid(self) -> bool:
return True
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 0)
result = maybe_send_to_registration(request, self.example_email("hamlet"), is_signup=True)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
self.assert_in_response(f'value="{confirmation_key}" name="key"', result)
def test_sso_only_when_preregistration_user_exists(self) -> None:
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form:
def is_valid(self) -> bool:
return True
email = self.example_email("hamlet")
user = PreregistrationUser(email=email)
user.save()
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = maybe_send_to_registration(request, email, is_signup=True)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
class TestAdminSetBackends(ZulipTestCase):
def test_change_enabled_backends(self) -> None:
# Log in as admin
self.login('iago')
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({'Email': False, 'Dev': True})})
self.assert_json_error(result, 'Must be an organization owner')
self.login('desdemona')
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({'Email': False, 'Dev': True})})
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertFalse(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_disable_all_backends(self) -> None:
# Log in as admin
self.login('desdemona')
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({'Email': False, 'Dev': False})})
self.assert_json_error(result, 'At least one authentication method must be enabled.')
realm = get_realm('zulip')
self.assertTrue(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_supported_backends_only_updated(self) -> None:
# Log in as admin
self.login('desdemona')
# Set some supported and unsupported backends
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({'Email': False, 'Dev': True, 'GitHub': False})})
self.assert_json_success(result)
realm = get_realm('zulip')
# Check that unsupported backend is not enabled
self.assertFalse(github_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
self.assertFalse(password_auth_enabled(realm))
class EmailValidatorTestCase(ZulipTestCase):
def test_valid_email(self) -> None:
validate_login_email(self.example_email("hamlet"))
def test_invalid_email(self) -> None:
with self.assertRaises(JsonableError):
validate_login_email('hamlet')
def test_validate_email(self) -> None:
inviter = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
realm = inviter.realm
do_set_realm_property(realm, 'emails_restricted_to_domains', True)
inviter.realm.refresh_from_db()
error = validate_email_is_valid(
'fred+5555@zulip.com',
get_realm_email_validator(realm),
)
self.assertIn('containing + are not allowed', error)
cordelia_email = cordelia.delivery_email
errors = get_existing_user_errors(realm, {cordelia_email})
error, is_deactivated = errors[cordelia_email]
self.assertEqual(False, is_deactivated)
self.assertEqual(error, 'Already has an account.')
cordelia.is_active = False
cordelia.save()
errors = get_existing_user_errors(realm, {cordelia_email})
error, is_deactivated = errors[cordelia_email]
self.assertEqual(True, is_deactivated)
self.assertEqual(error, 'Account has been deactivated.')
errors = get_existing_user_errors(realm, {'fred-is-fine@zulip.com'})
self.assertEqual(errors, {})
class LDAPBackendTest(ZulipTestCase):
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_non_existing_realm(self) -> None:
self.init_default_ldap_database()
user = self.example_user('hamlet')
data = dict(
username=user.delivery_email,
password=initial_password(user.delivery_email),
)
error_type = ZulipLDAPAuthBackend.REALM_IS_NONE_ERROR
error = ZulipLDAPConfigurationError('Realm is None', error_type)
with mock.patch('zproject.backends.ZulipLDAPAuthBackend.get_or_build_user',
side_effect=error), \
mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'):
response = self.client_post('/login/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('config_error', kwargs={'error_category_name': 'ldap'}))
response = self.client_get(response.url)
self.assert_in_response('You are trying to login using LDAP '
'without creating an',
response)
| 51.313566 | 138 | 0.622133 |
c8be65b0d8ef95a4be1eab21572b2857ee5fe5a8 | 392 | py | Python | metaclass/Avatar.py | majianfei/practice | 345f63c2f4118617f0165700e079cb70a32eb4f8 | [
"MIT"
] | 1 | 2019-08-13T15:14:12.000Z | 2019-08-13T15:14:12.000Z | metaclass/Avatar.py | majianfei/practice | 345f63c2f4118617f0165700e079cb70a32eb4f8 | [
"MIT"
] | null | null | null | metaclass/Avatar.py | majianfei/practice | 345f63c2f4118617f0165700e079cb70a32eb4f8 | [
"MIT"
] | null | null | null | #import sys
#sys.path.append("./")
from MetaClass import MetaClass
class Avatar(metaclass=MetaClass):
def __init__(self):
print("Avatar init")
def func1(self):
print("func1")
if __name__ == "__main__":
print(hasattr(Avatar, "func1"))
print(hasattr(Avatar, "func2"))
a = Avatar()
a.func1()
a.func2()
a.func3()
| 17.818182 | 36 | 0.561224 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.