hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bca3ee9cd68298fb1d2551d1172ec04266ae7a3e
| 8,627
|
py
|
Python
|
csdl/tests/test_implicit_expose.py
|
LSDOlab/csdl
|
04c2c5764f6ca9b865ec87ecfeaf6f22ecacc5a3
|
[
"MIT"
] | null | null | null |
csdl/tests/test_implicit_expose.py
|
LSDOlab/csdl
|
04c2c5764f6ca9b865ec87ecfeaf6f22ecacc5a3
|
[
"MIT"
] | null | null | null |
csdl/tests/test_implicit_expose.py
|
LSDOlab/csdl
|
04c2c5764f6ca9b865ec87ecfeaf6f22ecacc5a3
|
[
"MIT"
] | 1
|
2021-10-04T19:40:32.000Z
|
2021-10-04T19:40:32.000Z
|
import numpy as np
import pytest
def test_implicit_nonlinear(backend):
from csdl.examples.valid.ex_implicit_expose_apply_nonlinear_with_expose import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
sim['x'] = 1.9
sim.run()
np.testing.assert_almost_equal(sim['x'], np.array([1.0]))
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
sim['x'] = 2.1
sim.run()
np.testing.assert_almost_equal(sim['x'], np.array([3.0]))
np.testing.assert_almost_equal(sim['t'], np.array([0.0]))
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
def test_fixed_point_iteration(backend):
from csdl.examples.valid.ex_implicit_expose_fixed_point_iteration_with_expose import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
np.testing.assert_approx_equal(
sim['a'],
1.1241230297043157,
)
np.testing.assert_approx_equal(
sim['b'],
1.0798960718178603,
)
np.testing.assert_almost_equal(sim['c'], 0.)
np.testing.assert_approx_equal(
sim['t1'],
1.1241230297043157**2,
)
np.testing.assert_approx_equal(
sim['t2'],
1.0798960718178603**2,
)
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
def test_implicit_nonlinear_with_subsystems_in_residual(backend):
from csdl.examples.valid.ex_implicit_expose_with_subsystems_with_expose import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
np.testing.assert_approx_equal(
sim['a'],
1.0798960718178603,
)
np.testing.assert_approx_equal(
sim['t2'],
1.0798960718178603**2,
)
np.testing.assert_approx_equal(
sim['t3'],
1.0798960718178603 - 4 + 18 - 15,
)
np.testing.assert_almost_equal(
sim['x'],
np.array([1.044583306084130]),
)
np.testing.assert_almost_equal(
sim['t4'],
np.array([1.044583306084130**2]),
)
sim['x'] = 1.9
sim.run()
np.testing.assert_approx_equal(
sim['a'],
1.0798960718178603,
)
np.testing.assert_approx_equal(
sim['t2'],
1.0798960718178603**2,
)
np.testing.assert_approx_equal(
sim['t3'],
1.0798960718178603 - 4 + 18 - 15,
)
np.testing.assert_approx_equal(
sim['x'],
np.array([2.659476838580102]),
)
np.testing.assert_approx_equal(
sim['t4'],
np.array([2.659476838580102**2]),
)
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
def test_implicit_multiple_residuals(backend):
from csdl.examples.valid.ex_implicit_expose_multiple_residuals_with_expose import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
np.testing.assert_almost_equal(
sim['x'],
np.array([np.sqrt(3)]),
)
np.testing.assert_almost_equal(
sim['t4'],
np.array([1. + np.sqrt(3)]),
)
np.testing.assert_almost_equal(
sim['t1'],
np.array([0.0]),
)
np.testing.assert_almost_equal(
sim['t2'],
np.array([3.0]),
)
np.testing.assert_almost_equal(
sim['y'],
np.array([1.]),
)
np.testing.assert_almost_equal(
sim['t3'],
np.array([2.]),
)
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
# ----------------------------------------------------------------------
def test_implicit_nonlinear_define_model_inline(backend):
from csdl.examples.valid.ex_implicit_expose_apply_nonlinear_with_expose_define_model_inline import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
sim['x'] = 1.9
sim.run()
np.testing.assert_almost_equal(sim['x'], np.array([1.0]))
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
sim['x'] = 2.1
sim.run()
np.testing.assert_almost_equal(sim['x'], np.array([3.0]))
np.testing.assert_almost_equal(sim['t'], np.array([0.0]))
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
def test_fixed_point_iteration_define_model_inline(backend):
from csdl.examples.valid.ex_implicit_expose_fixed_point_iteration_with_expose_define_model_inline import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
np.testing.assert_approx_equal(
sim['a'],
1.1241230297043157,
)
np.testing.assert_approx_equal(
sim['b'],
1.0798960718178603,
)
np.testing.assert_almost_equal(sim['c'], 0.)
np.testing.assert_approx_equal(
sim['t1'],
1.1241230297043157**2,
)
np.testing.assert_approx_equal(
sim['t2'],
1.0798960718178603**2,
)
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
def test_implicit_nonlinear_with_subsystems_in_residual_define_model_inline(
backend):
from csdl.examples.valid.ex_implicit_expose_with_subsystems_with_expose_define_model_inline import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
np.testing.assert_approx_equal(
sim['a'],
1.0798960718178603,
)
np.testing.assert_approx_equal(
sim['t2'],
1.0798960718178603**2,
)
np.testing.assert_approx_equal(
sim['t3'],
1.0798960718178603 - 4 + 18 - 15,
)
np.testing.assert_almost_equal(
sim['x'],
np.array([1.044583306084130]),
)
np.testing.assert_almost_equal(
sim['t4'],
np.array([1.044583306084130**2]),
)
sim['x'] = 1.9
sim.run()
np.testing.assert_approx_equal(
sim['a'],
1.0798960718178603,
)
np.testing.assert_approx_equal(
sim['t2'],
1.0798960718178603**2,
)
np.testing.assert_approx_equal(
sim['t3'],
1.0798960718178603 - 4 + 18 - 15,
)
np.testing.assert_approx_equal(
sim['x'],
np.array([2.659476838580102]),
)
np.testing.assert_approx_equal(
sim['t4'],
np.array([2.659476838580102**2]),
)
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
def test_implicit_multiple_residuals_define_model_inline(backend):
from csdl.examples.valid.ex_implicit_expose_multiple_residuals_with_expose_define_model_inline import example
exec('from {} import Simulator'.format(backend))
sim = example(eval('Simulator'))
np.testing.assert_almost_equal(
sim['x'],
np.array([np.sqrt(3)]),
)
np.testing.assert_almost_equal(
sim['t4'],
np.array([1. + np.sqrt(3)]),
)
np.testing.assert_almost_equal(
sim['t1'],
np.array([0.0]),
)
np.testing.assert_almost_equal(
sim['t2'],
np.array([3.0]),
)
np.testing.assert_almost_equal(
sim['y'],
np.array([1.]),
)
np.testing.assert_almost_equal(
sim['t3'],
np.array([2.]),
)
result = sim.check_partials(out_stream=None,
compact_print=True,
method='fd')
sim.assert_check_partials(result, atol=1.e-6, rtol=1.e-6)
| 29.851211
| 116
| 0.590124
| 1,062
| 8,627
| 4.560264
| 0.07533
| 0.089201
| 0.148668
| 0.104068
| 0.989469
| 0.989469
| 0.989469
| 0.989469
| 0.989469
| 0.989469
| 0
| 0.096355
| 0.268575
| 8,627
| 288
| 117
| 29.954861
| 0.671157
| 0.008114
| 0
| 0.768627
| 0
| 0
| 0.042314
| 0
| 0
| 0
| 0
| 0
| 0.227451
| 1
| 0.031373
| false
| 0
| 0.070588
| 0
| 0.101961
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bccfc5be0cd4a76286abb6d9c6a51b9cfedd1d30
| 20,563
|
py
|
Python
|
tests/test_write.py
|
RomCoch/lasio
|
c0abaffc1656e7acdfdb12efff37d0f2cf845c66
|
[
"MIT"
] | 1
|
2020-03-13T07:09:39.000Z
|
2020-03-13T07:09:39.000Z
|
tests/test_write.py
|
RomCoch/lasio
|
c0abaffc1656e7acdfdb12efff37d0f2cf845c66
|
[
"MIT"
] | null | null | null |
tests/test_write.py
|
RomCoch/lasio
|
c0abaffc1656e7acdfdb12efff37d0f2cf845c66
|
[
"MIT"
] | 1
|
2020-12-20T18:57:06.000Z
|
2020-12-20T18:57:06.000Z
|
import os, sys; sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import pytest
import numpy as np
import lasio
from lasio import read
from lasio.excel import ExcelConverter
from lasio.reader import StringIO
test_dir = os.path.dirname(__file__)
egfn = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
def test_write_sect_widths_12(capsys):
las = lasio.read(egfn("sample_write_sect_widths_12.las"))
las.write(sys.stdout, version=1.2)
assert capsys.readouterr()[0] == open(egfn('test_write_sect_widths_12.txt')).read()
def test_write_to_filename():
las = read(egfn("sample_write_sect_widths_12.las"))
las.write('test.las', version=1.2)
assert os.path.isfile('test.las')
os.remove('test.las')
def test_write_sect_widths_12_curves():
l = read(egfn("sample_write_sect_widths_12.las"))
s = StringIO()
l.write(s, version=1.2)
for start in ("D.M ", "A.US/M ", "B.K/M3 ", "C.V/V "):
s.seek(0)
assert "\n" + start in s.read()
def test_write_sect_widths_20_narrow():
l = read(egfn("sample_write_sect_widths_20_narrow.las"))
s = StringIO()
l.write(s, version=2)
s.seek(0)
assert s.read() == """~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : ONE LINE PER DEPTH STEP
~Well ------------------------------------------------------
STRT.M 1670.0 : START DEPTH
STOP.M 1669.75 : STOP DEPTH
STEP.M -0.125 : STEP
NULL. -999.25 : NULL VALUE
COMP. ANY : COMPANY
WELL. AAAAA_2 : WELL
FLD . WILDCAT : FIELD
LOC . 12 : LOCATION
PROV. ALBERTA : PROVINCE
SRVC. LOGGING : SERVICE COMPANY ARE YOU KIDDING THIS IS A REALLY REALLY LONG STRING
DATE. 13-DEC-86 : LOG DATE
UWI . 10012340 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT.M : 1 DEPTH
DT .US/M 60 520 32 00 : 2 SONIC TRANSIT TIME
RHOB.K/M3 45 350 01 00 : 3 BULK DENSITY
NPHI.V/V 42 890 00 00 : 4 NEUTRON POROSITY
SFLU.OHMM 07 220 04 00 : 5 SHALLOW RESISTIVITY
SFLA.OHMM 07 222 01 00 : 6 SHALLOW RESISTIVITY
ILM .OHMM 07 120 44 00 : 7 MEDIUM RESISTIVITY
ILD .OHMM 07 120 46 00 : 8 DEEP RESISTIVITY
~Params ----------------------------------------------------
MUD . GEL CHEM : MUD TYPE
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. SAND : NEUTRON MATRIX
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 metres causing the data
between 625 metres and 615 metres to be invalid.
~ASCII -----------------------------------------------------
1670.00000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.87500 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.75000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
"""
def test_write_sect_widths_20_wide():
l = read(egfn("sample_write_sect_widths_20_wide.las"))
s = StringIO()
l.write(s, version=2)
s.seek(0)
assert s.read() == """~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : ONE LINE PER DEPTH STEP
~Well ------------------------------------------------------
STRT.M 1670.0 : START DEPTH
STOP.M 1669.75 : STOP DEPTH
STEP.M -0.125 : STEP
NULL. -999.25 : NULL VALUE
COMP. ANY OIL COMPANY INC. : COMPANY
WELL. AAAAA_2 : WELL
FLD . WILDCAT : FIELD
LOC . 12-34-12-34W5M : LOCATION
PROV. ALBERTA : PROVINCE
SRVC. The company that did this logging has a very very long name.... : SERVICE COMPANY
DATE. 13-DEC-86 : LOG DATE
UWI . 100123401234W500 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT.M : 1 DEPTH
DT .US/M 60 520 32 00 : 2 SONIC TRANSIT TIME
RHOB.K/M3 45 350 01 00 : 3 BULK DENSITY
NPHI.V/V 42 890 00 00 : 4 NEUTRON POROSITY
SFLU.OHMM 07 220 04 00 : 5 SHALLOW RESISTIVITY
SFLA.OHMM 07 222 01 00 : 6 SHALLOW RESISTIVITY
ILM .OHMM 07 120 44 00 : 7 MEDIUM RESISTIVITY
ILD .OHMM 07 120 46 00 : 8 DEEP RESISTIVITY
~Params ----------------------------------------------------
MUD . GEL CHEM : MUD TYPE
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. SAND : NEUTRON MATRIX
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 metres causing the data
between 625 metres and 615 metres to be invalid.
~ASCII -----------------------------------------------------
1670.00000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.87500 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.75000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
"""
def test_write_sample_empty_params():
l = read(egfn("sample_write_empty_params.las"))
l.write(StringIO(), version=2)
def test_df_curve_addition_on_export():
l = read(egfn("sample.las"))
df = l.df()
df["ILD_COND"] = 1000 / df.ILD
l.set_data_from_df(df, truncate=False)
s = StringIO()
l.write(s, version=2, wrap=False, fmt="%.5f")
s.seek(0)
assert s.read() == """~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : One line per depth step
~Well ------------------------------------------------------
STRT.M 1670.0 :
STOP.M 1669.75 :
STEP.M -0.125 :
NULL. -999.25 :
COMP. # ANY OIL COMPANY LTD. : COMPANY
WELL. ANY ET AL OIL WELL #12 : WELL
FLD . EDAM : FIELD
LOC . A9-16-49-20W3M : LOCATION
PROV. SASKATCHEWAN : PROVINCE
SRVC. ANY LOGGING COMPANY LTD. : SERVICE COMPANY
DATE. 25-DEC-1988 : LOG DATE
UWI . 100091604920W300 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT .M : 1 DEPTH
DT .US/M : 2 SONIC TRANSIT TIME
RHOB .K/M3 : 3 BULK DENSITY
NPHI .V/V : 4 NEUTRON POROSITY
SFLU .OHMM : 5 RXO RESISTIVITY
SFLA .OHMM : 6 SHALLOW RESISTIVITY
ILM .OHMM : 7 MEDIUM RESISTIVITY
ILD .OHMM : 8 DEEP RESISTIVITY
ILD_COND. :
~Params ----------------------------------------------------
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. 0.0 : NEUTRON MATRIX(0=LIME,1=SAND,2=DOLO)
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 meters causing the data
between 625 meters and 615 meters to be invalid.
~ASCII -----------------------------------------------------
1670.00000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000 9.46970
1669.87500 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000 9.46970
1669.75000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000 9.46970
"""
def test_write_xlsx():
l = read(egfn("sample.las"))
e = ExcelConverter(l)
xlsxfn = "test.xlsx"
e.write(xlsxfn)
os.remove(xlsxfn)
def test_export_xlsx():
l = read(egfn("sample.las"))
xlsxfn = "test2.xlsx"
l.to_excel(xlsxfn)
os.remove(xlsxfn)
def test_multi_curve_mnemonics_rewrite():
l = read(egfn('sample_issue105_a.las'))
s = StringIO()
l.write(s, version=2, wrap=False, fmt="%.5f")
s.seek(0)
assert s.read() == '''~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : One line per depth step
~Well ------------------------------------------------------
STRT.M 1670.0 :
STOP.M 1669.75 :
STEP.M -0.125 :
NULL. -999.25 :
COMP. # ANY OIL COMPANY LTD. : COMPANY
WELL. ANY ET AL OIL WELL #12 : WELL
FLD . EDAM : FIELD
LOC . A9-16-49-20W3M : LOCATION
PROV. SASKATCHEWAN : PROVINCE
SRVC. ANY LOGGING COMPANY LTD. : SERVICE COMPANY
DATE. 25-DEC-1988 : LOG DATE
UWI . 100091604920W300 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT.M : 1 DEPTH
RHO .ohmm : curve 1,2,3
RHO .ohmm : curve 10,20,30
RHO .ohmm : curve 100,200,300
PHI . : porosity
~Params ----------------------------------------------------
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. 0.0 : NEUTRON MATRIX(0=LIME,1=SAND,2=DOLO)
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 meters causing the data
between 625 meters and 615 meters to be invalid.
~ASCII -----------------------------------------------------
1670.00000 1.00000 10.00000 100.00000 0.10000
1669.87500 2.00000 20.00000 200.00000 0.20000
1669.75000 3.00000 30.00000 300.00000 0.30000
'''
def test_multi_curve_missing_mnemonics_rewrite():
l = read(egfn('sample_issue105_b.las'))
s = StringIO()
l.write(s, version=2, wrap=False, fmt="%.5f")
s.seek(0)
assert s.read() == '''~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : One line per depth step
~Well ------------------------------------------------------
STRT.M 1670.0 :
STOP.M 1669.75 :
STEP.M -0.125 :
NULL. -999.25 :
COMP. # ANY OIL COMPANY LTD. : COMPANY
WELL. ANY ET AL OIL WELL #12 : WELL
FLD . EDAM : FIELD
LOC . A9-16-49-20W3M : LOCATION
PROV. SASKATCHEWAN : PROVINCE
SRVC. ANY LOGGING COMPANY LTD. : SERVICE COMPANY
DATE. 25-DEC-1988 : LOG DATE
UWI . 100091604920W300 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT.M : 1 DEPTH
.ohmm : curve 1,2,3
.ohmm : curve 10,20,30
.ohmm : curve 100,200,300
PHI . : porosity
~Params ----------------------------------------------------
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. 0.0 : NEUTRON MATRIX(0=LIME,1=SAND,2=DOLO)
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 meters causing the data
between 625 meters and 615 meters to be invalid.
~ASCII -----------------------------------------------------
1670.00000 1.00000 10.00000 100.00000 0.10000
1669.87500 2.00000 20.00000 200.00000 0.20000
1669.75000 3.00000 30.00000 300.00000 0.30000
'''
def test_write_units():
l = read(egfn("sample.las"))
l.curves[0].unit = 'FT'
s = StringIO()
l.write(s, version=2, wrap=False, fmt="%.5f")
s.seek(0)
assert s.read() == '''~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : One line per depth step
~Well ------------------------------------------------------
STRT.FT 1670.0 :
STOP.FT 1669.75 :
STEP.FT -0.125 :
NULL. -999.25 :
COMP. # ANY OIL COMPANY LTD. : COMPANY
WELL. ANY ET AL OIL WELL #12 : WELL
FLD . EDAM : FIELD
LOC . A9-16-49-20W3M : LOCATION
PROV. SASKATCHEWAN : PROVINCE
SRVC. ANY LOGGING COMPANY LTD. : SERVICE COMPANY
DATE. 25-DEC-1988 : LOG DATE
UWI . 100091604920W300 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT.FT : 1 DEPTH
DT .US/M : 2 SONIC TRANSIT TIME
RHOB.K/M3 : 3 BULK DENSITY
NPHI.V/V : 4 NEUTRON POROSITY
SFLU.OHMM : 5 RXO RESISTIVITY
SFLA.OHMM : 6 SHALLOW RESISTIVITY
ILM .OHMM : 7 MEDIUM RESISTIVITY
ILD .OHMM : 8 DEEP RESISTIVITY
~Params ----------------------------------------------------
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. 0.0 : NEUTRON MATRIX(0=LIME,1=SAND,2=DOLO)
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 meters causing the data
between 625 meters and 615 meters to be invalid.
~ASCII -----------------------------------------------------
1670.00000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.87500 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.75000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
'''
def test_to_csv_units_None():
las = read(egfn("sample.las"))
las.to_csv('test.csv', units_loc=None)
csv_output = open('test.csv', 'r').readlines()
proof_output = open(egfn('sample.las_units-none.csv'), 'r').readlines()
os.remove('test.csv')
assert csv_output[0] == proof_output[0]
# assert csv_output[1] == proof_output[1]
def test_to_csv_units_line():
las = read(egfn("sample.las"))
las.to_csv('test.csv', units_loc='line')
csv_output = open('test.csv', 'r').readlines()
proof_output = open(egfn('sample.las_units-line.csv'), 'r').readlines()
os.remove('test.csv')
assert csv_output[0] == proof_output[0]
assert csv_output[1] == proof_output[1]
def test_to_csv_units_parentheses():
las = read(egfn("sample.las"))
las.to_csv('test.csv', units_loc='()')
csv_output = open('test.csv', 'r').readlines()
proof_output = open(egfn('sample.las_units-parentheses.csv'), 'r').readlines()
os.remove('test.csv')
assert csv_output[0] == proof_output[0]
def test_to_csv_units_brackets():
las = read(egfn("sample.las"))
las.to_csv('test.csv', units_loc='[]')
csv_output = open('test.csv', 'r').readlines()
proof_output = open(egfn('sample.las_units-brackets.csv'), 'r').readlines()
os.remove('test.csv')
assert csv_output[0] == proof_output[0]
# assert csv_output[1] == proof_output[1]
def test_to_csv_specify_mnemonics():
las = read(egfn("sample.las"))
las.to_csv('test.csv', mnemonics=[str(i) for i in range(len(las.curves))])
csv_output = open('test.csv', 'r').readlines()
assert csv_output[0] == '0,1,2,3,4,5,6,7\n'
os.remove('test.csv')
def test_to_csv_specify_units():
las = read(egfn("sample.las"))
las.to_csv('test.csv', units=[str(i) for i in range(len(las.curves))])
csv_output = open('test.csv', 'r').readlines()
assert csv_output[1] == '0,1,2,3,4,5,6,7\n'
os.remove('test.csv')
def test_rename_and_write_curve_mnemonic():
l = read(egfn("sample.las"))
for curve in l.curves:
if curve.mnemonic != 'DEPT':
curve.mnemonic = "New_" + curve.mnemonic
for curve in l.curves:
print('mnemonic=%s original_mnemonic=%s' % (curve.mnemonic, curve.original_mnemonic))
s = StringIO()
l.write(s, version=2)
s.seek(0)
assert s.read() == '''~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : ONE LINE PER DEPTH STEP
~Well ------------------------------------------------------
STRT.M 1670.0 :
STOP.M 1669.75 :
STEP.M -0.125 :
NULL. -999.25 :
COMP. # ANY OIL COMPANY LTD. : COMPANY
WELL. ANY ET AL OIL WELL #12 : WELL
FLD . EDAM : FIELD
LOC . A9-16-49-20W3M : LOCATION
PROV. SASKATCHEWAN : PROVINCE
SRVC. ANY LOGGING COMPANY LTD. : SERVICE COMPANY
DATE. 25-DEC-1988 : LOG DATE
UWI . 100091604920W300 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT .M : 1 DEPTH
New_DT .US/M : 2 SONIC TRANSIT TIME
New_RHOB.K/M3 : 3 BULK DENSITY
New_NPHI.V/V : 4 NEUTRON POROSITY
New_SFLU.OHMM : 5 RXO RESISTIVITY
New_SFLA.OHMM : 6 SHALLOW RESISTIVITY
New_ILM .OHMM : 7 MEDIUM RESISTIVITY
New_ILD .OHMM : 8 DEEP RESISTIVITY
~Params ----------------------------------------------------
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. 0.0 : NEUTRON MATRIX(0=LIME,1=SAND,2=DOLO)
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 meters causing the data
between 625 meters and 615 meters to be invalid.
~ASCII -----------------------------------------------------
1670.00000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.87500 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
1669.75000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
'''
def test_write_large_depths():
las = lasio.read(egfn("sample.las"))
las.curves[0].data *= 10.5 + 0.1
las.write('write_large_depths.las')
las2 = lasio.read('write_large_depths.las')
os.remove('write_large_depths.las')
assert np.all(las.curves[0].data == las2.curves[0].data)
def test_write_single_step():
las = lasio.read(egfn("single_step_20.las"))
s = StringIO()
las.write(s, version=2)
s.seek(0)
assert s.read() == '''~Version ---------------------------------------------------
VERS. 2.0 : CWLS log ASCII Standard -VERSION 2.0
WRAP. NO : ONE LINE PER DEPTH STEP
~Well ------------------------------------------------------
STRT.M 1670.0 : START DEPTH
STOP.M 1670.0 : STOP DEPTH
STEP.M None : STEP
NULL. -999.25 : NULL VALUE
COMP. ANY OIL COMPANY INC. : COMPANY
WELL. AAAAA_2 : WELL
FLD . WILDCAT : FIELD
LOC . 12-34-12-34W5M : LOCATION
PROV. ALBERTA : PROVINCE
SRVC. ANY LOGGING COMPANY INC. : SERVICE COMPANY
DATE. 13-DEC-86 : LOG DATE
UWI . 100123401234W500 : UNIQUE WELL ID
~Curve Information -----------------------------------------
DEPT.M : 1 DEPTH
DT .US/M 60 520 32 00 : 2 SONIC TRANSIT TIME
RHOB.K/M3 45 350 01 00 : 3 BULK DENSITY
NPHI.V/V 42 890 00 00 : 4 NEUTRON POROSITY
SFLU.OHMM 07 220 04 00 : 5 SHALLOW RESISTIVITY
SFLA.OHMM 07 222 01 00 : 6 SHALLOW RESISTIVITY
ILM .OHMM 07 120 44 00 : 7 MEDIUM RESISTIVITY
ILD .OHMM 07 120 46 00 : 8 DEEP RESISTIVITY
~Params ----------------------------------------------------
MUD . GEL CHEM : MUD TYPE
BHT .DEGC 35.5 : BOTTOM HOLE TEMPERATURE
BS .MM 200.0 : BIT SIZE
FD .K/M3 1000.0 : FLUID DENSITY
MATR. SAND : NEUTRON MATRIX
MDEN. 2710.0 : LOGGING MATRIX DENSITY
RMF .OHMM 0.216 : MUD FILTRATE RESISTIVITY
DFD .K/M3 1525.0 : DRILL FLUID DENSITY
~Other -----------------------------------------------------
Note: The logging tools became stuck at 625 metres causing the data
between 625 metres and 615 metres to be invalid.
~ASCII -----------------------------------------------------
1670.00000 123.45000 2550.00000 0.45000 123.45000 123.45000 110.20000 105.60000
'''
| 41.625506
| 99
| 0.550309
| 2,844
| 20,563
| 3.910689
| 0.106188
| 0.034526
| 0.037403
| 0.024456
| 0.87718
| 0.840496
| 0.820446
| 0.807409
| 0.798597
| 0.798597
| 0
| 0.151633
| 0.236055
| 20,563
| 493
| 100
| 41.709939
| 0.556369
| 0.003842
| 0
| 0.763043
| 0
| 0.045652
| 0.780626
| 0.147258
| 0
| 0
| 0
| 0
| 0.041304
| 1
| 0.045652
| false
| 0
| 0.015217
| 0
| 0.06087
| 0.002174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bce321b6124ab1785ef783066c83831044082ea2
| 205
|
py
|
Python
|
hvad/compat/urls.py
|
aptivate/django-hvad
|
61457412eeae09b5df1c514a5b162230be125e1b
|
[
"BSD-3-Clause"
] | null | null | null |
hvad/compat/urls.py
|
aptivate/django-hvad
|
61457412eeae09b5df1c514a5b162230be125e1b
|
[
"BSD-3-Clause"
] | null | null | null |
hvad/compat/urls.py
|
aptivate/django-hvad
|
61457412eeae09b5df1c514a5b162230be125e1b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
try:
from urllib import urlencode
from urlparse import urlparse
from urllib import unquote
except ImportError:
from urllib.parse import urlencode, urlparse, unquote
| 25.625
| 57
| 0.726829
| 25
| 205
| 5.96
| 0.52
| 0.201342
| 0.214765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006135
| 0.204878
| 205
| 7
| 58
| 29.285714
| 0.907975
| 0.102439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4c1b247b35366e62cd0cc1d673134dfbd0fdc6b0
| 41,502
|
py
|
Python
|
lot/trees/migrations/0017_auto__add_carbongroup.py
|
CoyoPartners/forestplanner
|
342814619f023aa9177cd2cbcc319e89333749a2
|
[
"BSD-3-Clause"
] | 23
|
2015-10-08T15:15:19.000Z
|
2022-01-11T16:21:48.000Z
|
lot/trees/migrations/0017_auto__add_carbongroup.py
|
CoyoPartners/forestplanner
|
342814619f023aa9177cd2cbcc319e89333749a2
|
[
"BSD-3-Clause"
] | 245
|
2015-02-06T23:05:25.000Z
|
2021-09-10T23:41:54.000Z
|
lot/trees/migrations/0017_auto__add_carbongroup.py
|
CoyoPartners/forestplanner
|
342814619f023aa9177cd2cbcc319e89333749a2
|
[
"BSD-3-Clause"
] | 9
|
2016-01-09T21:47:54.000Z
|
2021-09-10T18:21:14.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CarbonGroup'
db.create_table('trees_carbongroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='trees_carbongroup_related', to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length='255')),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='trees_carbongroup_related', null=True, to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('manipulators', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('geometry_orig', self.gf('django.contrib.gis.db.models.fields.PolygonField')(srid=3857, null=True, blank=True)),
('geometry_final', self.gf('django.contrib.gis.db.models.fields.PolygonField')(srid=3857, null=True, blank=True)),
('group_name', self.gf('django.db.models.fields.TextField')()),
('manager', self.gf('django.db.models.fields.related.ForeignKey')(related_name='manager_set', to=orm['auth.User'])),
('description', self.gf('django.db.models.fields.TextField')()),
('accepted_properties', self.gf('django.db.models.fields.TextField')(default='[]')),
('private', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('trees', ['CarbonGroup'])
# Adding M2M table for field sharing_groups on 'CarbonGroup'
m2m_table_name = db.shorten_name('trees_carbongroup_sharing_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('carbongroup', models.ForeignKey(orm['trees.carbongroup'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['carbongroup_id', 'group_id'])
# Adding M2M table for field members on 'CarbonGroup'
m2m_table_name = db.shorten_name('trees_carbongroup_members')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('carbongroup', models.ForeignKey(orm['trees.carbongroup'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['carbongroup_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'CarbonGroup'
db.delete_table('trees_carbongroup')
# Removing M2M table for field sharing_groups on 'CarbonGroup'
db.delete_table(db.shorten_name('trees_carbongroup_sharing_groups'))
# Removing M2M table for field members on 'CarbonGroup'
db.delete_table(db.shorten_name('trees_carbongroup_members'))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'trees.carbongroup': {
'Meta': {'object_name': 'CarbonGroup'},
'accepted_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trees_carbongroup_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'geometry_final': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'geometry_orig': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'group_name': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manager': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'manager_set'", 'to': "orm['auth.User']"}),
'manipulators': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'members_set'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trees_carbongroup_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trees_carbongroup_related'", 'to': "orm['auth.User']"})
},
'trees.conditionvariantlookup': {
'Meta': {'object_name': 'ConditionVariantLookup'},
'cond_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variant_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'trees.county': {
'Meta': {'object_name': 'County'},
'cnty_fips': ('django.db.models.fields.IntegerField', [], {}),
'cntyname': ('django.db.models.fields.CharField', [], {'max_length': '23'}),
'fips': ('django.db.models.fields.IntegerField', [], {}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polytype': ('django.db.models.fields.IntegerField', [], {}),
'soc_cnty': ('django.db.models.fields.IntegerField', [], {}),
'st_fips': ('django.db.models.fields.IntegerField', [], {}),
'stname': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'trees.forestproperty': {
'Meta': {'object_name': 'ForestProperty'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trees_forestproperty_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'geometry_final': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trees_forestproperty_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trees_forestproperty_related'", 'to': "orm['auth.User']"})
},
'trees.fvsaggregate': {
'Meta': {'object_name': 'FVSAggregate'},
'after_ba': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'after_merch_bdft': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'after_merch_ft3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'after_total_ft3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'after_tpa': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'agl': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bgl': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'calc_carbon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cedr_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cedr_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ch_cf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ch_hw': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ch_tpa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cond': ('django.db.models.fields.IntegerField', [], {}),
'cut_type': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dead': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'df_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'df_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'es_btl': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'firehzd': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'hw_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'hw_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lg_cf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lg_hw': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lg_tpa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lp_btl': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'merch_carbon_removed': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'merch_carbon_stored': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mnconbf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mnconhrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mnhw_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mnhw_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nsodis': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nsofrg': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nsonest': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'pine_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pine_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pp_btl': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'removed_merch_bdft': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'removed_merch_ft3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'removed_total_ft3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'removed_tpa': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rx': ('django.db.models.fields.IntegerField', [], {}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'sm_cf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sm_hw': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sm_tpa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'spprich': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sppsimp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sprc_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sprc_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'start_ba': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_merch_bdft': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_merch_ft3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_total_ft3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_tpa': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_stand_carbon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'var': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'wj_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'wj_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ww_bf': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ww_hrv': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.FloatField', [], {})
},
'trees.fvsspecies': {
'AK': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'BM': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'CA': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'CI': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'CR': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'EC': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'EM': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'IE': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'KT': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'Meta': {'object_name': 'FVSSpecies'},
'NC': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'NI': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'PN': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'SO': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'TT': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'UT': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'WC': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'WS': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'common': ('django.db.models.fields.TextField', [], {}),
'fia': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'fvs': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scientific': ('django.db.models.fields.TextField', [], {}),
'usda': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'trees.fvsvariant': {
'Meta': {'object_name': 'FVSVariant'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'decision_tree_xml': ('django.db.models.fields.TextField', [], {'default': "''"}),
'fvsvariant': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'trees.idbsummary': {
'Meta': {'object_name': 'IdbSummary', 'db_table': "u'idb_summary'"},
'acres': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'acres_vol': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'age_dom': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'aspect_deg': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'avgofba_ft2_ac': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgofdbh_in': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgofht_ft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgofslope': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgoftpa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'baa_ge_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'baa_ge_3_stunits': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bac_ge_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bac_prop': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bah_ge_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'calc_aspect': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'calc_slope': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cancov': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cond_id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'countofsubplot_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'covcl': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'elev_ft': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fia_forest_type_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'firstofaspect_deg': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_type_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'for_type_secdry': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_type_secdry_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'forest_name': ('django.db.models.fields.CharField', [], {'max_length': '510', 'null': 'True', 'blank': 'True'}),
'fvs_variant': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'halfstate_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'latitude_fuzz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude_fuzz': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mai': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ogsi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'own_group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'plant_assoc_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'plot_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'qmd_hwd_cm': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'qmd_swd_cm': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'qmd_tot_cm': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'qmda_dom': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'qmda_dom_stunits': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'qmdc_dom': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'qmdh_dom': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sdi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sdi_reineke': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'site_class_fia': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'site_index_fia': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'site_species': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sizecl': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slope': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stand_age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stand_age_even_yn': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'stand_size_class': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'state_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'stdevofaspect_deg': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'stdevofslope': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'stndhgt': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'stndhgt_stunits': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'struccond': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'struccondr': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sumofba_ft2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tph_ge_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tph_ge_3_stunits': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'vegclass': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'vegclassr': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'trees.myrx': {
'Meta': {'ordering': "['date_modified']", 'object_name': 'MyRx'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trees_myrx_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rx': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.Rx']"}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trees_myrx_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trees_myrx_related'", 'to': "orm['auth.User']"})
},
'trees.rx': {
'Meta': {'object_name': 'Rx'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_desc': ('django.db.models.fields.TextField', [], {}),
'internal_name': ('django.db.models.fields.TextField', [], {}),
'internal_type': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '2'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.FVSVariant']"})
},
'trees.scenario': {
'Meta': {'ordering': "['-date_modified']", 'object_name': 'Scenario'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trees_scenario_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_age_class': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.ForestProperty']"}),
'input_rxs': ('trees.models.JSONField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'input_target_boardfeet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'input_target_carbon': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'output_scheduler_results': ('trees.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trees_scenario_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'spatial_constraints': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trees_scenario_related'", 'to': "orm['auth.User']"})
},
'trees.scenariostand': {
'Meta': {'object_name': 'ScenarioStand'},
'acres': ('django.db.models.fields.FloatField', [], {}),
'cond_id': ('django.db.models.fields.BigIntegerField', [], {}),
'constraint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.SpatialConstraint']", 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trees_scenariostand_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'geometry_final': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'geometry_orig': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manipulators': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rx': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.Rx']"}),
'rx_internal_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.Scenario']"}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trees_scenariostand_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'stand': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.Stand']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trees_scenariostand_related'", 'to': "orm['auth.User']"})
},
'trees.spatialconstraint': {
'Meta': {'object_name': 'SpatialConstraint'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'default_rx': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.Rx']"}),
'geom': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3857'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'trees.stand': {
'Meta': {'object_name': 'Stand'},
'aspect': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cond_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trees_stand_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'cost': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'elevation': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'geometry_final': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'geometry_orig': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manipulators': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'nn_savetime': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rast_savetime': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trees_stand_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'slope': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'strata': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['trees.Strata']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trees_stand_related'", 'to': "orm['auth.User']"})
},
'trees.strata': {
'Meta': {'object_name': 'Strata'},
'additional_desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trees_strata_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'search_age': ('django.db.models.fields.FloatField', [], {}),
'search_tpa': ('django.db.models.fields.FloatField', [], {}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'trees_strata_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'stand_list': ('trees.models.JSONField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trees_strata_related'", 'to': "orm['auth.User']"})
},
'trees.timberprice': {
'Meta': {'unique_together': "(('variant', 'timber_type'),)", 'object_name': 'TimberPrice'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {}),
'timber_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['trees.FVSVariant']"})
},
'trees.treelivesummary': {
'Meta': {'object_name': 'TreeliveSummary', 'db_table': "u'treelive_summary'"},
'avgofage_bh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgofba_ft2_ac': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgofdbh_in': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgofht_ft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avgoftpa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'calc_dbh_class': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'calc_tree_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'class_id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'cond_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'count_speciessizeclasses': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fia_forest_type_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'pct_of_totalba': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'plot_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sumofba_ft2_ac': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sumoftpa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'total_ba_ft2_ac': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'varname': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
}
}
complete_apps = ['trees']
| 90.814004
| 222
| 0.564575
| 4,313
| 41,502
| 5.309297
| 0.079991
| 0.119481
| 0.209092
| 0.288222
| 0.879995
| 0.855365
| 0.816761
| 0.783659
| 0.725927
| 0.598105
| 0
| 0.00562
| 0.181076
| 41,502
| 457
| 223
| 90.814004
| 0.668138
| 0.007301
| 0
| 0.184932
| 0
| 0
| 0.581521
| 0.33589
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004566
| false
| 0.002283
| 0.009132
| 0
| 0.020548
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c7125d5a967d3fdf61e06e85cc6de78ccbb65c5
| 4,994
|
py
|
Python
|
benchmark/runtime/dgl/rgcn.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 2,350
|
2021-09-12T08:32:50.000Z
|
2022-03-31T18:09:36.000Z
|
benchmark/runtime/dgl/rgcn.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 588
|
2021-09-12T08:49:08.000Z
|
2022-03-31T21:02:13.000Z
|
benchmark/runtime/dgl/rgcn.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 505
|
2021-09-13T13:13:32.000Z
|
2022-03-31T15:54:00.000Z
|
import dgl.function as fn
import torch
import torch.nn.functional as F
from torch.nn import Parameter as Param
from torch_geometric.nn.inits import uniform
class RGCNConv(torch.nn.Module):
def __init__(self, g, in_channels, out_channels, num_relations, num_bases):
super().__init__()
self.g = g
self.in_channels = in_channels
self.out_channels = out_channels
self.num_relations = num_relations
self.num_bases = num_bases
self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))
self.att = Param(torch.Tensor(num_relations, num_bases))
self.root = Param(torch.Tensor(in_channels, out_channels))
self.bias = Param(torch.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
size = self.num_bases * self.in_channels
uniform(size, self.basis)
uniform(size, self.att)
uniform(size, self.root)
uniform(size, self.bias)
def rgcn_reduce(self, node):
return {'x': node.mailbox['m'].sum(dim=1)}
def forward(self, x):
self.w = torch.matmul(self.att, self.basis.view(self.num_bases, -1))
self.w = self.w.view(self.num_relations, self.in_channels,
self.out_channels)
if x is None:
def msg_func(edge):
w = self.w.view(-1, self.out_channels)
index = edge.data['type'] * self.in_channels + edge.src['id']
m = w.index_select(0, index) * edge.data['norm'].unsqueeze(1)
return {'m': m}
else:
self.g.ndata['x'] = x
def msg_func(edge):
w = self.w.index_select(0, edge.data['type'])
m = torch.bmm(edge.src['x'].unsqueeze(1), w).squeeze()
m = m * edge.data['norm'].unsqueeze(1)
return {'m': m}
self.g.update_all(msg_func, self.rgcn_reduce)
out = self.g.ndata.pop('x')
if x is None:
out = out + self.root
else:
out = out + torch.matmul(x, self.root)
out = out + self.bias
return out
class RGCN(torch.nn.Module):
def __init__(self, g, in_channels, out_channels, num_relations):
super().__init__()
self.conv1 = RGCNConv(g, in_channels, 16, num_relations, num_bases=30)
self.conv2 = RGCNConv(g, 16, out_channels, num_relations, num_bases=30)
def forward(self, x):
x = F.relu(self.conv1(None))
x = self.conv2(x)
return F.log_softmax(x, dim=1)
class RGCNSPMVConv(torch.nn.Module):
def __init__(self, g, in_channels, out_channels, num_relations, num_bases):
super().__init__()
self.g = g
self.in_channels = in_channels
self.out_channels = out_channels
self.num_relations = num_relations
self.num_bases = num_bases
self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))
self.att = Param(torch.Tensor(num_relations, num_bases))
self.root = Param(torch.Tensor(in_channels, out_channels))
self.bias = Param(torch.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
size = self.num_bases * self.in_channels
uniform(size, self.basis)
uniform(size, self.att)
uniform(size, self.root)
uniform(size, self.bias)
def forward(self, x):
self.w = torch.matmul(self.att, self.basis.view(self.num_bases, -1))
self.w = self.w.view(self.num_relations, self.in_channels,
self.out_channels)
if x is None:
def msg_func(edge):
w = self.w.view(-1, self.out_channels)
index = edge.data['type'] * self.in_channels + edge.src['id']
m = w.index_select(0, index) * edge.data['norm'].unsqueeze(1)
return {'m': m}
else:
self.g.ndata['x'] = x
def msg_func(edge):
w = self.w.index_select(0, edge.data['type'])
m = torch.bmm(edge.src['x'].unsqueeze(1), w).squeeze()
m = m * edge.data['norm'].unsqueeze(1)
return {'m': m}
self.g.update_all(msg_func, fn.sum(msg='m', out='x'))
out = self.g.ndata.pop('x')
if x is None:
out = out + self.root
else:
out = out + torch.matmul(x, self.root)
out = out + self.bias
return out
class RGCNSPMV(torch.nn.Module):
def __init__(self, g, in_channels, out_channels, num_relations):
super().__init__()
self.conv1 = RGCNSPMVConv(g, in_channels, 16, num_relations,
num_bases=30)
self.conv2 = RGCNSPMVConv(g, 16, out_channels, num_relations,
num_bases=30)
def forward(self, x):
x = F.relu(self.conv1(None))
x = self.conv2(x)
return F.log_softmax(x, dim=1)
| 33.293333
| 79
| 0.576292
| 677
| 4,994
| 4.063516
| 0.121123
| 0.072701
| 0.069066
| 0.061069
| 0.897492
| 0.897492
| 0.897492
| 0.897492
| 0.897492
| 0.897492
| 0
| 0.011698
| 0.298158
| 4,994
| 149
| 80
| 33.516779
| 0.773181
| 0
| 0
| 0.852174
| 0
| 0
| 0.010012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.043478
| 0.008696
| 0.286957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5b2acd9097ef659969904921c97ae0a14b99468
| 138
|
py
|
Python
|
binary_search/tests/test_search_insert_position.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
binary_search/tests/test_search_insert_position.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
binary_search/tests/test_search_insert_position.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | 3
|
2020-10-07T20:24:45.000Z
|
2020-12-16T04:53:19.000Z
|
from binary_search.search_insert_position import search_insert
def test_search_insert():
assert search_insert([1, 3, 5, 6], 5) == 2
| 23
| 62
| 0.753623
| 22
| 138
| 4.409091
| 0.636364
| 0.494845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 0.144928
| 138
| 5
| 63
| 27.6
| 0.771186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d5c351a980b882d250f5e9a5467745719788ee8f
| 52
|
py
|
Python
|
x_access_token.py
|
Kadantte/twist.moe
|
7bdaf483192ad84f4b98502557a9bc68305afa79
|
[
"Unlicense"
] | 112
|
2018-07-13T18:16:58.000Z
|
2022-03-03T00:04:50.000Z
|
x_access_token.py
|
Kadantte/twist.moe
|
7bdaf483192ad84f4b98502557a9bc68305afa79
|
[
"Unlicense"
] | 29
|
2018-07-14T11:19:15.000Z
|
2022-01-07T10:48:21.000Z
|
x_access_token.py
|
Kadantte/twist.moe
|
7bdaf483192ad84f4b98502557a9bc68305afa79
|
[
"Unlicense"
] | 17
|
2018-10-23T12:56:07.000Z
|
2022-02-24T14:53:22.000Z
|
X_ACCESS_TOKEN = "0df14814b9e590a1f26d3071a4ed7974"
| 26
| 51
| 0.884615
| 4
| 52
| 11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.44898
| 0.057692
| 52
| 1
| 52
| 52
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5c3a3cf526ea142d20d623930e6ea205ea4ff6d
| 27,903
|
py
|
Python
|
dsul/distributions.py
|
EdgarTeixeira/eul
|
8ce3e567d43b41cae0217c0cb3f953ee9e9d8565
|
[
"MIT"
] | null | null | null |
dsul/distributions.py
|
EdgarTeixeira/eul
|
8ce3e567d43b41cae0217c0cb3f953ee9e9d8565
|
[
"MIT"
] | null | null | null |
dsul/distributions.py
|
EdgarTeixeira/eul
|
8ce3e567d43b41cae0217c0cb3f953ee9e9d8565
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import NamedTuple, Optional
import numpy as np
from scipy import special
from scipy.special import beta, digamma, erf, erfinv, hyp2f1
from scipy.stats import uniform
def check_is_probability(x):
raise NotImplementedError()
class Interval:
pass
class ContinuousInterval(NamedTuple):
lower: float
upper: float
class DiscreteInterval(NamedTuple):
lower: int
upper: int
class Distribution(ABC):
@abstractmethod
def mean(self) -> float:
pass
@abstractmethod
def var(self) -> float:
pass
@abstractmethod
def std(self) -> float:
pass
@abstractmethod
def median(self) -> float:
pass
@abstractmethod
def entropy(self) -> float:
pass
@abstractmethod
def support(self) -> Interval:
pass
@abstractmethod
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
@abstractmethod
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def sf(self, x: np.ndarray) -> np.ndarray:
pass
@abstractmethod
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@abstractmethod
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class ContinuousDistribution(Distribution):
@abstractmethod
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
class DiscreteDistribution(Distribution):
@abstractmethod
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
class Uniform(ContinuousDistribution):
def __init__(self, lower, upper) -> None:
self.lower = lower
self.upper = upper
def mean(self) -> float:
return (self.lower + self.upper) / 2
def var(self) -> float:
return pow(self.upper - self.lower, 2) / 12
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.mean()
def entropy(self) -> float:
return np.log(self.upper - self.lower)
def support(self) -> ContinuousInterval:
return ContinuousInterval(self.lower, self.upper)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
dist = uniform(self.lower, (self.upper - self.lower))
return dist.rvs(size, random_state=random_state)
def pdf(self, x: np.ndarray) -> np.ndarray:
constant = 1 / (self.upper - self.lower)
density = np.where((x < self.lower) | (x > self.upper), 0.0, constant)
return density
def cdf(self, x: np.ndarray) -> np.ndarray:
value = (x - self.lower) / (self.upper - self.lower)
return np.clip(value, 0.0, 1.0)
def sf(self, x: np.ndarray) -> np.ndarray:
return 1 - self.cdf(x)
def ppf(self, x: np.ndarray) -> np.ndarray:
check_is_probability(x)
return self.lower + x * (self.upper - self.lower)
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Uniform':
return Uniform(lower=dataset.min(), upper=dataset.max())
class Normal(ContinuousDistribution):
def __init__(self, mean, std) -> None:
self.mean_ = mean
self.std_ = std
def mean(self) -> float:
return self.mean_
def var(self) -> float:
return self.std_ ** 2
def std(self) -> float:
return self.std_
def median(self) -> float:
return self.mean_
def entropy(self) -> float:
return 0.5 * np.log(2 * np.pi * np.e * pow(self.std_, 2))
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
p = uniform(0, 1).rvs(size, random_state=random_state)
return self.ppf(p)
def pdf(self, x: np.ndarray) -> np.ndarray:
const = 1 / (self.std_ * pow(2 * np.pi, 0.5))
func = np.exp(-0.5 * np.square((x - self.mean_) / self.std_))
return const * func
def cdf(self, x: np.ndarray) -> np.ndarray:
return 0.5 * (1 + erf((x - self.mean_) / (self.std_ * pow(2, 0.5))))
def sf(self, x: np.ndarray) -> np.ndarray:
return 1 - self.cdf(x)
def ppf(self, x: np.ndarray) -> np.ndarray:
check_is_probability(x)
A = self.std_ * pow(2, 0.5) * erfinv(2 * x - 1)
return self.mean_ + A
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Normal':
mu = dataset.mean()
sigma = dataset.std(ddof=1)
return Normal(mean=mu, std=sigma)
class StudentT(ContinuousDistribution):
def __init__(self, df, loc, scale) -> None:
self.df = df
self.loc = loc
self.scale = scale
def mean(self) -> float:
if self.df > 1:
return self.loc
return np.nan
def var(self) -> float:
if self.df > 2:
return pow(self.scale, 2) * self.df / (self.df - 2)
elif self.df > 1:
return np.inf
return np.nan
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.loc
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
p = uniform(0, 1).rvs(size, random_state=random_state)
return self.ppf(p)
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
return 1 - self.cdf(x)
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'StudentT':
pass
class Laplace(ContinuousDistribution):
def __init__(self, mu, b) -> None:
self.mu = mu
self.b = b
def mean(self) -> float:
return self.mu
def var(self) -> float:
return 2 * pow(self.b, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.mu
def entropy(self) -> float:
return np.log(2 * self.b * np.e)
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Logistic(ContinuousDistribution):
def __init__(self, loc, scale) -> None:
self.loc = loc
self.scale = scale
def mean(self) -> float:
return self.loc
def var(self) -> float:
return pow(self.scale, 2) * pow(np.pi, 2) / 3
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.loc
def entropy(self) -> float:
return np.log(self.scale) + 2
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Cauchy(ContinuousDistribution):
def __init__(self, loc, scale) -> None:
self.loc = loc
self.scale = scale
def mean(self) -> float:
return np.nan
def var(self) -> float:
return np.nan
def std(self) -> float:
return np.nan
def median(self) -> float:
return self.loc
def entropy(self) -> float:
return np.log(4 * np.pi * self.scale)
def support(self) -> ContinuousInterval:
return ContinuousInterval(-np.inf, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Exponential(ContinuousDistribution):
def __init__(self, rate) -> None:
self.rate = rate
def mean(self) -> float:
return 1 / self.rate
def var(self) -> float:
return 1 / pow(self.rate, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return np.log(2) / self.rate
def entropy(self) -> float:
return 1 - np.log(self.rate)
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Pareto(ContinuousDistribution):
def __init__(self, xmin, shape) -> None:
self.xmin = xmin
self.shape = shape
def mean(self) -> float:
if self.shape <= 1:
return np.inf
return self.shape * self.xmin / (self.shape - 1)
def var(self) -> float:
if self.shape <= 2:
return np.inf
num = pow(self.xmin, 2) * self.shape
den = pow(self.shape - 1, 2) * (self.shape - 2)
return num / den
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.xmin * pow(2, self.shape)
def entropy(self) -> float:
A = self.xmin / self.shape
B = np.exp(1 + 1 / self.shape)
return np.log(A * B)
def support(self) -> ContinuousInterval:
return ContinuousInterval(self.xmin, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Lomax(ContinuousDistribution):
def __init__(self, shape, scale) -> None:
self.shape = shape
self.scale = scale
def mean(self) -> float:
if self.shape > 1:
return self.scale / (self.shape - 1)
return np.nan
def var(self) -> float:
if self.shape <= 1:
return np.nan
elif self.shape <= 2:
return np.inf
A = pow(self.scale, 2) * self.shape
B = pow(self.shape - 1, 2) * (self.shape - 2)
return A / B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.scale * (pow(2, 1 / self.shape) - 1)
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class LogNormal(ContinuousDistribution):
def __init__(self, mean, std) -> None:
self.mean_ = mean
self.std_ = std
def mean(self) -> float:
return np.exp(self.mean_ + pow(self.std_, 2) / 2)
def var(self) -> float:
A = np.exp(pow(self.std_, 2)) - 1
B = np.exp(2 * self.mean_ + pow(self.std_, 2))
return A * B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return np.exp(self.mean_)
def entropy(self) -> float:
A = self.std_ * pow(2 * np.pi, 0.5) * np.exp(self.mean_ + 0.5)
return np.log(A) / np.log(2)
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Weibull(ContinuousDistribution):
def __init__(self, scale, shape) -> None:
self.scale = scale
self.shape = shape
def mean(self) -> float:
return self.scale * special.gamma(1 + 1 / self.shape)
def var(self) -> float:
A = special.gamma(1 + 2 / self.shape)
B = special.gamma(1 + 1 / self.shape)
return pow(self.scale, 2) * (A - pow(B, 2))
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.scale * pow(np.log(2), 1 / self.shape)
def entropy(self) -> float:
A = np.euler_gamma * (1 - 1 / self.shape)
B = np.log(self.scale / self.shape)
return A + B + 1
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class Gamma(ContinuousDistribution):
def __init__(self, alpha, beta) -> None:
self.alpha = alpha
self.beta = beta
def mean(self) -> float:
return self.alpha / self.beta
def var(self) -> float:
return self.alpha / pow(self.beta, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
A = self.alpha - np.log(self.beta) + special.gammaln(self.alpha)
B = (1 - self.alpha) * special.digamma(self.alpha)
return A + B
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> 'Distribution':
pass
class ChiSquare(ContinuousDistribution):
def __init__(self, k) -> None:
self.k = k
def mean(self) -> float:
return self.k
def var(self) -> float:
return 2 * self.k
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
A = self.k / 2 + np.log(2 * special.gamma(self.k / 2))
B = (1 - self.k / 2) * special.digamma(self.k / 2)
return A + B
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Beta(ContinuousDistribution):
def __init__(self, alpha, beta) -> None:
self.alpha = alpha
self.beta = beta
def mean(self) -> float:
return self.alpha / (self.alpha + self.beta)
def var(self) -> float:
A = self.alpha * self.beta
B = pow(self.alpha + self.beta, 2) * (self.alpha + self.beta + 1)
return A / B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
A = special.betaln(self.alpha, self.beta)
B = (self.alpha - 1) * special.digamma(self.alpha)
C = (self.beta - 1) * special.digamma(self.beta)
D = (self.alpha + self.beta - 2) * \
special.digamma(self.alpha + self.beta)
return A - B - C + D
def support(self) -> ContinuousInterval:
return ContinuousInterval(0, 1)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pdf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Bernoulli(DiscreteDistribution):
def __init__(self, p) -> None:
self.p = p
def mean(self) -> float:
return self.p
def var(self) -> float:
return self.p * (1 - self.p)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
if self.p < 0.5:
return 0
elif self.p > 0.5:
return 1
return self.p
def entropy(self) -> float:
return -((1 - self.p) * np.log(1 - self.p) + self.p * np.log(self.p))
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, 1)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Binomial(DiscreteDistribution):
def __init__(self, p, n) -> None:
self.p = p
self.n = n
def mean(self) -> float:
return self.p * self.n
def var(self) -> float:
return self.p * (1 - self.p) * self.n
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
return self.mean()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, self.n)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Hypergeometric(DiscreteDistribution):
def __init__(self, n, N, K) -> None:
self.n = n
self.N = N
self.K = K
def mean(self) -> float:
return self.n * self.K / self.N
def var(self) -> float:
A = self.n * self.K / self.N
B = (self.N - self.K) / self.N
C = (self.N - self.n) / (self.N - 1)
return A * B * C
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(max(0, self.n + self.K - self.N), min(self.n, self.K))
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Geometric(DiscreteDistribution):
def __init__(self, p) -> None:
self.p = p
def mean(self) -> float:
return (1 - self.p) / self.p
def var(self) -> float:
return (1 - self.p) / pow(self.p, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
A = -1 / np.log2(1 - self.p)
return A - 1
def entropy(self) -> float:
return -((1 - self.p) * np.log(1 - self.p) + self.p * np.log(self.p)) / self.p
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Poisson(DiscreteDistribution):
def __init__(self, rate) -> None:
self.rate = rate
def mean(self) -> float:
return self.rate
def var(self) -> float:
return self.rate
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class ZeroInflatedPoisson(DiscreteDistribution):
def __init__(self, rate, p) -> None:
self.rate = rate
self.p = p
def mean(self) -> float:
return (1 - self.p) * self.rate
def var(self) -> float:
return self.rate * (1 - self.p) * (1 + self.p * self.rate)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class NegativeBinomial(DiscreteDistribution):
def __init__(self, p, r) -> None:
self.p = p
self.r = r
def mean(self) -> float:
return self.p * self.r / (1 - self.p)
def var(self) -> float:
return self.p * self.r / pow(1 - self.p, 2)
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class NegativeHypergeometric(DiscreteDistribution):
def __init__(self, r, N, K) -> None:
self.r = r
self.N = N
self.K = K
def mean(self) -> float:
return self.r * self.K / (self.N - self.K + 1)
def var(self) -> float:
num = self.r * (self.N + 1) * self.K
den = (self.N - self.K + 1) * (self.N - self.K + 2)
A = num / den
B = 1 - self.r / (self.N - self.K + 1)
return A * B
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, self.K)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
class Zeta(DiscreteDistribution):
def __init__(self, shape) -> None:
self.shape = shape
def mean(self) -> float:
if self.shape > 2:
return special.zeta(self.shape - 1) / special.zeros(self.shape)
raise NotImplementedError()
def var(self) -> float:
if self.shape > 3:
A = special.zeta(self.shape)
B = special.zeta(self.shape - 2)
C = pow(special.zeta(self.shape - 1), 2)
return (A * B - C) / pow(A, 2)
raise NotImplementedError()
def std(self) -> float:
return np.sqrt(self.var())
def median(self) -> float:
raise NotImplementedError()
def entropy(self) -> float:
raise NotImplementedError()
def support(self) -> DiscreteInterval:
return DiscreteInterval(0, np.inf)
def sample(self, size: int = 1, random_state: Optional[int] = None) -> np.ndarray:
pass
def pmf(self, x: np.ndarray) -> np.ndarray:
pass
def cdf(self, x: np.ndarray) -> np.ndarray:
pass
def sf(self, x: np.ndarray) -> np.ndarray:
pass
def ppf(self, x: np.ndarray) -> np.ndarray:
pass
@staticmethod
def fit(self, dataset: np.ndarray) -> Distribution:
pass
| 24.05431
| 86
| 0.571121
| 3,670
| 27,903
| 4.300545
| 0.037057
| 0.137997
| 0.08978
| 0.086042
| 0.833048
| 0.785846
| 0.753912
| 0.714376
| 0.692771
| 0.654628
| 0
| 0.010267
| 0.294879
| 27,903
| 1,159
| 87
| 24.075065
| 0.791919
| 0
| 0
| 0.763522
| 0
| 0
| 0.004623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.393711
| false
| 0.173585
| 0.007547
| 0.130818
| 0.623899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 10
|
d5d4c2ee447f4809db57d4656072506b94652b8e
| 154
|
py
|
Python
|
SeleniumTest/test/Module_1/__init__.py
|
NayakwadiS/Selenium_Python_UnitTest_HTML
|
dceb17ccfa2a7da4659a9820333330145d648772
|
[
"MIT"
] | 2
|
2022-01-06T04:58:22.000Z
|
2022-02-09T07:21:17.000Z
|
SeleniumTest/test/Module_1/__init__.py
|
NayakwadiS/Selenium_Python_UnitTest_HTML
|
dceb17ccfa2a7da4659a9820333330145d648772
|
[
"MIT"
] | null | null | null |
SeleniumTest/test/Module_1/__init__.py
|
NayakwadiS/Selenium_Python_UnitTest_HTML
|
dceb17ccfa2a7da4659a9820333330145d648772
|
[
"MIT"
] | 4
|
2020-08-20T05:33:54.000Z
|
2022-01-14T14:13:27.000Z
|
from test.Module_1.Scenario1 import *
from test.Module_1.Scenario2 import *
from test.Module_1.Scenario3 import *
from test.Module_1.Scenario4 import *
| 38.5
| 38
| 0.805195
| 24
| 154
| 5
| 0.375
| 0.266667
| 0.466667
| 0.5
| 0.525
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.116883
| 154
| 4
| 39
| 38.5
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
9106bfae542faeb3fc4e912241b4d1479ed491c0
| 23,163
|
py
|
Python
|
hs_access_control/tests/test_community_units.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 178
|
2015-01-08T23:03:36.000Z
|
2022-03-03T13:56:45.000Z
|
hs_access_control/tests/test_community_units.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 4,125
|
2015-01-01T14:26:15.000Z
|
2022-03-31T16:38:55.000Z
|
hs_access_control/tests/test_community_units.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 53
|
2015-03-15T17:56:51.000Z
|
2022-03-17T00:32:16.000Z
|
from django.test import TestCase
from django.contrib.auth.models import Group
from django.core.exceptions import PermissionDenied
from hs_access_control.models import \
UserCommunityProvenance, UserCommunityPrivilege, \
GroupCommunityProvenance, GroupCommunityPrivilege, \
PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
__author__ = 'Alva'
class UnitTests(MockIRODSTestCaseMixin, TestCase):
""" test basic behavior of each routine """
def setUp(self):
super(UnitTests, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.alva = hydroshare.create_account(
'alva@gmail.com',
username='alva',
first_name='alva',
last_name='couch',
superuser=False,
groups=[]
)
self.george = hydroshare.create_account(
'george@gmail.com',
username='george',
first_name='george',
last_name='miller',
superuser=False,
groups=[]
)
self.john = hydroshare.create_account(
'john@gmail.com',
username='john',
first_name='john',
last_name='miller',
superuser=False,
groups=[]
)
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='first_name_admin',
last_name='last_name_admin',
superuser=True,
groups=[]
)
# george creates a entity 'bikes'
self.bikes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.george,
title='Bikes',
metadata=[],
)
# george creates a entity 'bikers'
self.bikers = self.george.uaccess.create_group('Bikers', 'Of the human powered kind')
# george creates a community 'rebels'
self.rebels = self.george.uaccess.create_community('Rebels', 'Random rebels')
def test_usercommunityprivilege_get_current_record(self):
george = self.george
rebels = self.rebels
alva = self.alva
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
record = UserCommunityProvenance.get_current_record(
community=rebels, user=alva)
self.assertEqual(record.grantor, george)
self.assertEqual(record.community, rebels)
self.assertEqual(record.user, alva)
def test_usercommunityprivilege_get_undo_users(self):
george = self.george
rebels = self.rebels
alva = self.alva
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertTrue(
is_equal_to_as_set(
UserCommunityProvenance.get_undo_users(
community=rebels,
grantor=george),
[alva, george]))
def test_usercommunityprivilege_get_privilege(self):
george = self.george
rebels = self.rebels
alva = self.alva
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.CHANGE)
def test_usercommunityprivilege_update(self):
george = self.george
rebels = self.rebels
alva = self.alva
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.CHANGE)
def test_usercommunityprivilege_undo_share(self):
george = self.george
rebels = self.rebels
alva = self.alva
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.CHANGE)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.NONE,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.VIEW)
UserCommunityProvenance.undo_share(community=rebels, user=alva, grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
# no further undo is possible.
with self.assertRaises(PermissionDenied):
UserCommunityProvenance.undo_share(community=rebels, user=alva, grantor=george)
with self.assertRaises(PermissionDenied):
UserCommunityProvenance.undo_share(community=rebels, user=alva, grantor=george)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.VIEW)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.CHANGE)
UserCommunityProvenance.undo_share(community=rebels, user=alva, grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.VIEW)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.NONE,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
UserCommunityProvenance.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserCommunityProvenance.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.CHANGE)
def test_usercommunityresult_get_privilege(self):
george = self.george
rebels = self.rebels
alva = self.alva
self.assertEqual(
UserCommunityPrivilege.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
UserCommunityPrivilege.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserCommunityPrivilege.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.CHANGE)
def test_usercommunityresult_update(self):
george = self.george
rebels = self.rebels
alva = self.alva
self.assertEqual(
UserCommunityPrivilege.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.NONE)
UserCommunityPrivilege.update(
community=rebels,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserCommunityPrivilege.get_privilege(
community=rebels,
user=alva),
PrivilegeCodes.CHANGE)
def test_can_undo_share_community_with_user(self):
george = self.george
rebels = self.rebels
alva = self.alva
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, alva))
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, george))
self.assertFalse(alva.uaccess.can_undo_share_community_with_user(rebels, george))
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.NONE)
george.uaccess.share_community_with_user(rebels, alva, PrivilegeCodes.CHANGE)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(george.uaccess.can_undo_share_community_with_user(rebels, alva))
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, george))
self.assertFalse(alva.uaccess.can_undo_share_community_with_user(rebels, george))
george.uaccess.undo_share_community_with_user(rebels, alva)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.NONE)
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, alva))
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, george))
self.assertFalse(alva.uaccess.can_undo_share_community_with_user(rebels, george))
george.uaccess.share_community_with_user(rebels, alva, PrivilegeCodes.VIEW)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(george.uaccess.can_undo_share_community_with_user(rebels, alva))
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, george))
self.assertFalse(alva.uaccess.can_undo_share_community_with_user(rebels, george))
george.uaccess.undo_share_community_with_user(rebels, alva)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.NONE)
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, alva))
self.assertFalse(george.uaccess.can_undo_share_community_with_user(rebels, george))
self.assertFalse(alva.uaccess.can_undo_share_community_with_user(rebels, george))
def test_undo_share_community_with_user(self):
george = self.george
rebels = self.rebels
alva = self.alva
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.NONE)
george.uaccess.share_community_with_user(rebels, alva, PrivilegeCodes.CHANGE)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.CHANGE)
george.uaccess.undo_share_community_with_user(rebels, alva)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.NONE)
george.uaccess.share_community_with_user(rebels, alva, PrivilegeCodes.VIEW)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.VIEW)
george.uaccess.undo_share_community_with_user(rebels, alva)
self.assertEqual(
UserCommunityPrivilege.get_privilege(community=rebels, user=alva),
PrivilegeCodes.NONE)
def test_groupcommunityprivilege_get_current_record(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
record = GroupCommunityProvenance.get_current_record(
community=rebels, group=bikers)
self.assertEqual(record.grantor, george)
self.assertEqual(record.community, rebels)
self.assertEqual(record.group, bikers)
def test_groupcommunityprivilege_get_undo_groups(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertTrue(
is_equal_to_as_set(
GroupCommunityProvenance.get_undo_groups(
community=rebels,
grantor=george),
[bikers]))
def test_groupcommunityprivilege_get_privilege(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
def test_groupcommunityprivilege_update(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
def test_groupcommunityprivilege_undo_share(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.NONE,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
GroupCommunityProvenance.undo_share(community=rebels, group=bikers, grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
# no further undo is possible.
with self.assertRaises(PermissionDenied):
GroupCommunityProvenance.undo_share(community=rebels, group=bikers, grantor=george)
with self.assertRaises(PermissionDenied):
GroupCommunityProvenance.undo_share(community=rebels, group=bikers, grantor=george)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
GroupCommunityProvenance.undo_share(community=rebels, group=bikers, grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.NONE,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
GroupCommunityProvenance.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityProvenance.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
def test_groupcommunityresult_get_privilege(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
self.assertEqual(
GroupCommunityPrivilege.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
GroupCommunityPrivilege.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
def test_groupcommunityresult_update(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
self.assertEqual(
GroupCommunityPrivilege.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.NONE)
GroupCommunityPrivilege.update(
community=rebels,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(
community=rebels,
group=bikers),
PrivilegeCodes.VIEW)
def test_can_undo_share_community_with_group(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
self.assertFalse(george.uaccess.can_undo_share_community_with_group(rebels, bikers))
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.NONE)
george.uaccess.share_community_with_group(rebels, bikers, PrivilegeCodes.VIEW)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(george.uaccess.can_undo_share_community_with_group(rebels, bikers))
george.uaccess.undo_share_community_with_group(rebels, bikers)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.NONE)
self.assertFalse(george.uaccess.can_undo_share_community_with_group(rebels, bikers))
george.uaccess.share_community_with_group(rebels, bikers, PrivilegeCodes.VIEW)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(george.uaccess.can_undo_share_community_with_group(rebels, bikers))
george.uaccess.undo_share_community_with_group(rebels, bikers)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.NONE)
self.assertFalse(george.uaccess.can_undo_share_community_with_group(rebels, bikers))
def test_undo_share_community_with_group(self):
george = self.george
rebels = self.rebels
bikers = self.bikers
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.NONE)
george.uaccess.share_community_with_group(rebels, bikers, PrivilegeCodes.VIEW)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.VIEW)
george.uaccess.undo_share_community_with_group(rebels, bikers)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.NONE)
george.uaccess.share_community_with_group(rebels, bikers, PrivilegeCodes.VIEW)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.VIEW)
george.uaccess.undo_share_community_with_group(rebels, bikers)
self.assertEqual(
GroupCommunityPrivilege.get_privilege(community=rebels, group=bikers),
PrivilegeCodes.NONE)
| 37.786297
| 95
| 0.624142
| 1,970
| 23,163
| 7.166497
| 0.054315
| 0.104122
| 0.083298
| 0.107097
| 0.888369
| 0.877674
| 0.876682
| 0.870874
| 0.870874
| 0.866199
| 0
| 0
| 0.302897
| 23,163
| 612
| 96
| 37.848039
| 0.874342
| 0.008462
| 0
| 0.867958
| 0
| 0
| 0.010063
| 0
| 0
| 0
| 0
| 0
| 0.15493
| 1
| 0.033451
| false
| 0
| 0.012324
| 0
| 0.047535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
910f5671292300e7ed8558e5f380701d6d158812
| 9,215
|
py
|
Python
|
tests/test_EC2ConnectCLI.py
|
colwynmyself/aws-ec2-instance-connect-cli
|
c675258e0bf01a490cab555b71956eb8b2d3f89d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_EC2ConnectCLI.py
|
colwynmyself/aws-ec2-instance-connect-cli
|
c675258e0bf01a490cab555b71956eb8b2d3f89d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_EC2ConnectCLI.py
|
colwynmyself/aws-ec2-instance-connect-cli
|
c675258e0bf01a490cab555b71956eb8b2d3f89d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ec2instanceconnectcli.EC2InstanceConnectCLI import EC2InstanceConnectCLI
from ec2instanceconnectcli.EC2InstanceConnectCommand import EC2InstanceConnectCommand
from ec2instanceconnectcli.EC2InstanceConnectLogger import EC2InstanceConnectLogger
from testloader.test_base import TestBase
from unittest import mock
class TestEC2InstanceConnectCLI(TestBase):
@mock.patch('ec2instanceconnectcli.EC2InstanceConnectCLI.EC2InstanceConnectCLI.run_command')
@mock.patch('ec2instanceconnectcli.key_publisher.push_public_key')
@mock.patch('ec2instanceconnectcli.ec2_util.get_instance_data')
def test_mssh_no_target(self,
mock_instance_data,
mock_push_key,
mock_run):
mock_file = 'identity'
flag = '-f flag'
command = 'command arg'
logger = EC2InstanceConnectLogger()
instance_bundles = [{'username': self.default_user, 'instance_id': self.instance_id,
'target': None, 'zone': self.availability_zone, 'region': self.region,
'profile': self.profile}]
mock_instance_data.return_value = self.instance_info
mock_push_key.return_value = None
cli_command = EC2InstanceConnectCommand("ssh", instance_bundles, mock_file, flag, command, logger.get_logger())
cli = EC2InstanceConnectCLI(instance_bundles, "", cli_command, logger.get_logger())
cli.invoke_command()
expected_command = "ssh -i {0} {1} {2}@{3} {4}".format(mock_file, flag, self.default_user,
self.public_ip, command)
# Check that we successfully get to the run
self.assertTrue(mock_instance_data.called)
self.assertTrue(mock_push_key.called)
# Also check that we get the correct command generated
mock_run.assert_called_with(expected_command)
@mock.patch('ec2instanceconnectcli.EC2InstanceConnectCLI.EC2InstanceConnectCLI.run_command')
@mock.patch('ec2instanceconnectcli.key_publisher.push_public_key')
@mock.patch('ec2instanceconnectcli.ec2_util.get_instance_data')
def test_mssh_no_target_no_public_ip(self,
mock_instance_data,
mock_push_key,
mock_run):
mock_file = "identity"
flag = '-f flag'
command = 'command arg'
logger = EC2InstanceConnectLogger()
instance_bundles = [{'username': self.default_user, 'instance_id': self.instance_id,
'target': None, 'zone': self.availability_zone, 'region': self.region,
'profile': self.profile}]
mock_instance_data.return_value = self.private_instance_info
mock_push_key.return_value = None
cli_command = EC2InstanceConnectCommand("ssh", instance_bundles, mock_file, flag, command, logger.get_logger())
cli = EC2InstanceConnectCLI(instance_bundles, "", cli_command, logger.get_logger())
cli.invoke_command()
expected_command = "ssh -i {0} {1} {2}@{3} {4}".format(mock_file, flag, self.default_user,
self.private_ip, command)
# Check that we successfully get to the run
self.assertTrue(mock_instance_data.called)
self.assertTrue(mock_push_key.called)
mock_run.assert_called_with(expected_command)
@mock.patch('ec2instanceconnectcli.EC2InstanceConnectCLI.EC2InstanceConnectCLI.run_command')
@mock.patch('ec2instanceconnectcli.key_publisher.push_public_key')
@mock.patch('ec2instanceconnectcli.ec2_util.get_instance_data')
def test_mssh_with_target(self,
mock_instance_data,
mock_push_key,
mock_run):
mock_file = 'identity'
flag = '-f flag'
command = 'command arg'
host = '0.0.0.0'
logger = EC2InstanceConnectLogger()
instance_bundles = [{'username': self.default_user, 'instance_id': self.instance_id,
'target': host, 'zone': self.availability_zone, 'region': self.region,
'profile': self.profile}]
mock_instance_data.return_value = self.instance_info
mock_push_key.return_value = None
cli_command = EC2InstanceConnectCommand("ssh", instance_bundles, mock_file, flag, command, logger.get_logger())
cli = EC2InstanceConnectCLI(instance_bundles, "", cli_command, logger.get_logger())
cli.invoke_command()
expected_command = "ssh -i {0} {1} {2}@{3} {4}".format(mock_file, flag, self.default_user,
host, command)
# Check that we successfully get to the run
# Since both target and availability_zone are provided, mock_instance_data should not be called
self.assertFalse(mock_instance_data.called)
self.assertTrue(mock_push_key.called)
mock_run.assert_called_with(expected_command)
@mock.patch('ec2instanceconnectcli.EC2InstanceConnectCLI.EC2InstanceConnectCLI.run_command')
@mock.patch('ec2instanceconnectcli.key_publisher.push_public_key')
@mock.patch('ec2instanceconnectcli.ec2_util.get_instance_data')
def test_msftp(self,
mock_instance_data,
mock_push_key,
mock_run):
mock_file = 'identity'
flag = '-f flag'
command = 'file2 file3'
logger = EC2InstanceConnectLogger()
instance_bundles = [{'username': self.default_user, 'instance_id': self.instance_id,
'target': None, 'zone': self.availability_zone, 'region': self.region,
'profile': self.profile, 'file': 'file1'}]
mock_instance_data.return_value = self.instance_info
mock_push_key.return_value = None
expected_command = "sftp -i {0} {1} {2}@{3}:{4} {5}".format(mock_file, flag, self.default_user,
self.public_ip, 'file1', command)
cli_command = EC2InstanceConnectCommand("sftp", instance_bundles, mock_file, flag, command, logger.get_logger())
cli = EC2InstanceConnectCLI(instance_bundles, "", cli_command, logger.get_logger())
cli.invoke_command()
# Check that we successfully get to the run
self.assertTrue(mock_instance_data.called)
self.assertTrue(mock_push_key.called)
mock_run.assert_called_with(expected_command)
@mock.patch('ec2instanceconnectcli.EC2InstanceConnectCLI.EC2InstanceConnectCLI.run_command')
@mock.patch('ec2instanceconnectcli.key_publisher.push_public_key')
@mock.patch('ec2instanceconnectcli.ec2_util.get_instance_data')
def test_mscp(self,
mock_instance_data,
mock_push_key,
mock_run):
mock_file = 'identity'
flag = '-f flag'
command = 'file2 file3'
logger = EC2InstanceConnectLogger()
instance_bundles = [{'username': self.default_user, 'instance_id': self.instance_id,
'target': None, 'zone': self.availability_zone, 'region': self.region,
'profile': self.profile, 'file': 'file1'},
{'username': self.default_user, 'instance_id': self.instance_id,
'target': None, 'zone': self.availability_zone, 'region': self.region,
'profile': self.profile, 'file': 'file4'}]
mock_instance_data.return_value = self.instance_info
mock_push_key.return_value = None
expected_command = "scp -i {0} {1} {2}@{3}:{4} {5} {6}@{7}:{8}".format(mock_file, flag, self.default_user,
self.public_ip, 'file1', command,
self.default_user,
self.public_ip, 'file4')
cli_command = EC2InstanceConnectCommand("scp", instance_bundles, mock_file, flag, command, logger.get_logger())
cli = EC2InstanceConnectCLI(instance_bundles, "", cli_command, logger.get_logger())
cli.invoke_command()
# Check that we successfully get to the run
self.assertTrue(mock_instance_data.called)
self.assertTrue(mock_push_key.called)
mock_run.assert_called_with(expected_command)
| 51.769663
| 120
| 0.631253
| 970
| 9,215
| 5.740206
| 0.154639
| 0.045259
| 0.045977
| 0.039511
| 0.813039
| 0.813039
| 0.80819
| 0.805675
| 0.805675
| 0.798312
| 0
| 0.01603
| 0.275638
| 9,215
| 177
| 121
| 52.062147
| 0.818127
| 0.097341
| 0
| 0.765152
| 0
| 0.007576
| 0.178115
| 0.10605
| 0
| 0
| 0
| 0
| 0.113636
| 1
| 0.037879
| false
| 0
| 0.037879
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91124c172322c70479c9d55d5b29e7d5b8a059e4
| 6,545
|
py
|
Python
|
loldib/getratings/models/NA/na_ezreal/na_ezreal_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ezreal/na_ezreal_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ezreal/na_ezreal_mid.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Ezreal_Mid_Aatrox(Ratings):
pass
class NA_Ezreal_Mid_Ahri(Ratings):
pass
class NA_Ezreal_Mid_Akali(Ratings):
pass
class NA_Ezreal_Mid_Alistar(Ratings):
pass
class NA_Ezreal_Mid_Amumu(Ratings):
pass
class NA_Ezreal_Mid_Anivia(Ratings):
pass
class NA_Ezreal_Mid_Annie(Ratings):
pass
class NA_Ezreal_Mid_Ashe(Ratings):
pass
class NA_Ezreal_Mid_AurelionSol(Ratings):
pass
class NA_Ezreal_Mid_Azir(Ratings):
pass
class NA_Ezreal_Mid_Bard(Ratings):
pass
class NA_Ezreal_Mid_Blitzcrank(Ratings):
pass
class NA_Ezreal_Mid_Brand(Ratings):
pass
class NA_Ezreal_Mid_Braum(Ratings):
pass
class NA_Ezreal_Mid_Caitlyn(Ratings):
pass
class NA_Ezreal_Mid_Camille(Ratings):
pass
class NA_Ezreal_Mid_Cassiopeia(Ratings):
pass
class NA_Ezreal_Mid_Chogath(Ratings):
pass
class NA_Ezreal_Mid_Corki(Ratings):
pass
class NA_Ezreal_Mid_Darius(Ratings):
pass
class NA_Ezreal_Mid_Diana(Ratings):
pass
class NA_Ezreal_Mid_Draven(Ratings):
pass
class NA_Ezreal_Mid_DrMundo(Ratings):
pass
class NA_Ezreal_Mid_Ekko(Ratings):
pass
class NA_Ezreal_Mid_Elise(Ratings):
pass
class NA_Ezreal_Mid_Evelynn(Ratings):
pass
class NA_Ezreal_Mid_Ezreal(Ratings):
pass
class NA_Ezreal_Mid_Fiddlesticks(Ratings):
pass
class NA_Ezreal_Mid_Fiora(Ratings):
pass
class NA_Ezreal_Mid_Fizz(Ratings):
pass
class NA_Ezreal_Mid_Galio(Ratings):
pass
class NA_Ezreal_Mid_Gangplank(Ratings):
pass
class NA_Ezreal_Mid_Garen(Ratings):
pass
class NA_Ezreal_Mid_Gnar(Ratings):
pass
class NA_Ezreal_Mid_Gragas(Ratings):
pass
class NA_Ezreal_Mid_Graves(Ratings):
pass
class NA_Ezreal_Mid_Hecarim(Ratings):
pass
class NA_Ezreal_Mid_Heimerdinger(Ratings):
pass
class NA_Ezreal_Mid_Illaoi(Ratings):
pass
class NA_Ezreal_Mid_Irelia(Ratings):
pass
class NA_Ezreal_Mid_Ivern(Ratings):
pass
class NA_Ezreal_Mid_Janna(Ratings):
pass
class NA_Ezreal_Mid_JarvanIV(Ratings):
pass
class NA_Ezreal_Mid_Jax(Ratings):
pass
class NA_Ezreal_Mid_Jayce(Ratings):
pass
class NA_Ezreal_Mid_Jhin(Ratings):
pass
class NA_Ezreal_Mid_Jinx(Ratings):
pass
class NA_Ezreal_Mid_Kalista(Ratings):
pass
class NA_Ezreal_Mid_Karma(Ratings):
pass
class NA_Ezreal_Mid_Karthus(Ratings):
pass
class NA_Ezreal_Mid_Kassadin(Ratings):
pass
class NA_Ezreal_Mid_Katarina(Ratings):
pass
class NA_Ezreal_Mid_Kayle(Ratings):
pass
class NA_Ezreal_Mid_Kayn(Ratings):
pass
class NA_Ezreal_Mid_Kennen(Ratings):
pass
class NA_Ezreal_Mid_Khazix(Ratings):
pass
class NA_Ezreal_Mid_Kindred(Ratings):
pass
class NA_Ezreal_Mid_Kled(Ratings):
pass
class NA_Ezreal_Mid_KogMaw(Ratings):
pass
class NA_Ezreal_Mid_Leblanc(Ratings):
pass
class NA_Ezreal_Mid_LeeSin(Ratings):
pass
class NA_Ezreal_Mid_Leona(Ratings):
pass
class NA_Ezreal_Mid_Lissandra(Ratings):
pass
class NA_Ezreal_Mid_Lucian(Ratings):
pass
class NA_Ezreal_Mid_Lulu(Ratings):
pass
class NA_Ezreal_Mid_Lux(Ratings):
pass
class NA_Ezreal_Mid_Malphite(Ratings):
pass
class NA_Ezreal_Mid_Malzahar(Ratings):
pass
class NA_Ezreal_Mid_Maokai(Ratings):
pass
class NA_Ezreal_Mid_MasterYi(Ratings):
pass
class NA_Ezreal_Mid_MissFortune(Ratings):
pass
class NA_Ezreal_Mid_MonkeyKing(Ratings):
pass
class NA_Ezreal_Mid_Mordekaiser(Ratings):
pass
class NA_Ezreal_Mid_Morgana(Ratings):
pass
class NA_Ezreal_Mid_Nami(Ratings):
pass
class NA_Ezreal_Mid_Nasus(Ratings):
pass
class NA_Ezreal_Mid_Nautilus(Ratings):
pass
class NA_Ezreal_Mid_Nidalee(Ratings):
pass
class NA_Ezreal_Mid_Nocturne(Ratings):
pass
class NA_Ezreal_Mid_Nunu(Ratings):
pass
class NA_Ezreal_Mid_Olaf(Ratings):
pass
class NA_Ezreal_Mid_Orianna(Ratings):
pass
class NA_Ezreal_Mid_Ornn(Ratings):
pass
class NA_Ezreal_Mid_Pantheon(Ratings):
pass
class NA_Ezreal_Mid_Poppy(Ratings):
pass
class NA_Ezreal_Mid_Quinn(Ratings):
pass
class NA_Ezreal_Mid_Rakan(Ratings):
pass
class NA_Ezreal_Mid_Rammus(Ratings):
pass
class NA_Ezreal_Mid_RekSai(Ratings):
pass
class NA_Ezreal_Mid_Renekton(Ratings):
pass
class NA_Ezreal_Mid_Rengar(Ratings):
pass
class NA_Ezreal_Mid_Riven(Ratings):
pass
class NA_Ezreal_Mid_Rumble(Ratings):
pass
class NA_Ezreal_Mid_Ryze(Ratings):
pass
class NA_Ezreal_Mid_Sejuani(Ratings):
pass
class NA_Ezreal_Mid_Shaco(Ratings):
pass
class NA_Ezreal_Mid_Shen(Ratings):
pass
class NA_Ezreal_Mid_Shyvana(Ratings):
pass
class NA_Ezreal_Mid_Singed(Ratings):
pass
class NA_Ezreal_Mid_Sion(Ratings):
pass
class NA_Ezreal_Mid_Sivir(Ratings):
pass
class NA_Ezreal_Mid_Skarner(Ratings):
pass
class NA_Ezreal_Mid_Sona(Ratings):
pass
class NA_Ezreal_Mid_Soraka(Ratings):
pass
class NA_Ezreal_Mid_Swain(Ratings):
pass
class NA_Ezreal_Mid_Syndra(Ratings):
pass
class NA_Ezreal_Mid_TahmKench(Ratings):
pass
class NA_Ezreal_Mid_Taliyah(Ratings):
pass
class NA_Ezreal_Mid_Talon(Ratings):
pass
class NA_Ezreal_Mid_Taric(Ratings):
pass
class NA_Ezreal_Mid_Teemo(Ratings):
pass
class NA_Ezreal_Mid_Thresh(Ratings):
pass
class NA_Ezreal_Mid_Tristana(Ratings):
pass
class NA_Ezreal_Mid_Trundle(Ratings):
pass
class NA_Ezreal_Mid_Tryndamere(Ratings):
pass
class NA_Ezreal_Mid_TwistedFate(Ratings):
pass
class NA_Ezreal_Mid_Twitch(Ratings):
pass
class NA_Ezreal_Mid_Udyr(Ratings):
pass
class NA_Ezreal_Mid_Urgot(Ratings):
pass
class NA_Ezreal_Mid_Varus(Ratings):
pass
class NA_Ezreal_Mid_Vayne(Ratings):
pass
class NA_Ezreal_Mid_Veigar(Ratings):
pass
class NA_Ezreal_Mid_Velkoz(Ratings):
pass
class NA_Ezreal_Mid_Vi(Ratings):
pass
class NA_Ezreal_Mid_Viktor(Ratings):
pass
class NA_Ezreal_Mid_Vladimir(Ratings):
pass
class NA_Ezreal_Mid_Volibear(Ratings):
pass
class NA_Ezreal_Mid_Warwick(Ratings):
pass
class NA_Ezreal_Mid_Xayah(Ratings):
pass
class NA_Ezreal_Mid_Xerath(Ratings):
pass
class NA_Ezreal_Mid_XinZhao(Ratings):
pass
class NA_Ezreal_Mid_Yasuo(Ratings):
pass
class NA_Ezreal_Mid_Yorick(Ratings):
pass
class NA_Ezreal_Mid_Zac(Ratings):
pass
class NA_Ezreal_Mid_Zed(Ratings):
pass
class NA_Ezreal_Mid_Ziggs(Ratings):
pass
class NA_Ezreal_Mid_Zilean(Ratings):
pass
class NA_Ezreal_Mid_Zyra(Ratings):
pass
| 15.695444
| 46
| 0.766692
| 972
| 6,545
| 4.736626
| 0.151235
| 0.209818
| 0.389661
| 0.479583
| 0.803432
| 0.803432
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169748
| 6,545
| 416
| 47
| 15.733173
| 0.847258
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
e67103751a623815f0850d038a34b16b5cda64dd
| 27,372
|
py
|
Python
|
code/post_process.py
|
andyrevell/pokemon_kanji
|
548d5f59c0778ebcaac7ad755cd13eeb4a5f75ca
|
[
"Apache-2.0"
] | null | null | null |
code/post_process.py
|
andyrevell/pokemon_kanji
|
548d5f59c0778ebcaac7ad755cd13eeb4a5f75ca
|
[
"Apache-2.0"
] | null | null | null |
code/post_process.py
|
andyrevell/pokemon_kanji
|
548d5f59c0778ebcaac7ad755cd13eeb4a5f75ca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 25 07:54:32 2022
@author: arevell
"""
from pathlib import Path
import pprint
import json
from typing import List
import copy
import time
import pandas as pd
import os
from os.path import join
import unicodedata # to detect if kanji or kana
import re
import numpy as np
import collections
import math
# %%
text_path = "text"
path_pla_common_hir = join(text_path, "PLA_text", "common", "ja-hiragana.txt")
path_pla_common_kat = join(text_path, "PLA_text", "common", "ja-katakana.txt")
path_pla_story_hir = join(text_path, "PLA_text", "story", "ja-hiragana.txt")
path_pla_story_kat = join(text_path, "PLA_text", "story", "ja-katakana.txt")
path_pla_common_en = join(text_path, "PLA_text", "common", "en.txt")
path_pla_story_en = join(text_path, "PLA_text", "story", "en.txt")
os.path.exists(path_pla_common_en)
os.path.exists(path_pla_story_en)
with open(path_pla_common_hir, encoding='utf16') as f:
pla_common_hir = f.readlines()
with open(path_pla_common_kat, encoding='utf16') as f:
pla_common_kat = f.readlines()
with open(path_pla_story_hir, encoding='utf16') as f:
pla_story_hir = f.readlines()
with open(path_pla_story_kat, encoding='utf16') as f:
pla_story_kat = f.readlines()
with open(path_pla_common_en, encoding='utf16') as f:
pla_common_en = f.readlines()
with open(path_pla_story_en, encoding='utf16') as f:
pla_story_en = f.readlines()
remove_characters = ["\n", "\t", "\ue30a", "\ue30b", "\ue30c", "\ue30d", "\ue30e", "\ue30f", "\ue301", "\ue302", "\ue303", "\ue304", "\ue305", "\ue306", "\ue307",
"\ue308", "\ue309", "\ue310", "\ue31a", "\ue31b", "\ue31c", "\ue31d", "\ue31e", "\ue311", "\ue312", "\ue313", "\ue314", "\ue315", "\ue316", "\ue317", "\ue319", "\u3000"]
columns_dialogue = columns=["dialogue", "furigana", "kana", "english", "vocab",
"kanji", "kanji_style", "furigana_style", "kana_style", "vocab_style"]
# %% dialogue 1
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
dialogue_tags = pd.read_csv("text/output/PLA_dialogue1.txt", sep="\t", header=None)
dialogue_tags.columns = ["index"] + columns_dialogue
dialogue_tags = dialogue_tags.drop("index", axis = 1)
dialogue_tags["tags"] = ""
for t in range(len(pla_story_hir)):
print(f"\r{t+1}/{len(pla_story_hir)}; {np.round((t+1)/len(pla_story_hir)*100, 1)}", end = "\r")
line = pla_story_hir[t]
if "Text File :" in line:
tag = pla_story_hir[t]
for character in remove_characters:
tag = tag.replace(character, "")
tag = tag.replace("Text File : ", "")
if "sub_" in tag:
add = "sub"
elif "chap_" in tag:
add = "chapter"
elif "z_area" in tag:
add = "z_area"
else:
add = ""
if len(add)==0:
tag_h = f"#PLA::dialogue::story::{tag}"
else:
tag_h = f"#PLA::dialogue::story::{add}::{tag}"
for character in remove_characters:
line = line.replace(character, "")
contains_japanse = []
for l in range(len(line)):
letter = line[l]
unic = unicodedata.name(letter)
# print(unic)
if "CJK" in unic or "HIRAGANA" in unic or "KATAKANA" in unic:
contains_japanse.append(True)
else:
contains_japanse.append(False)
if any(contains_japanse):
line_jp = re.sub("[\[]VAR.*?[\]]", " ___ ", line)
#print(tag_h)
#print(line_jp)
if len( np.where(line_jp == dialogue_tags["dialogue"])[0]) > 0:
ind = np.where(line_jp == dialogue_tags["dialogue"])[0][0]
tags_dia = dialogue_tags.loc[ind, "tags"]
if tag_h in tags_dia:
continue
else:
dialogue_tags.loc[ind, "tags"] = f"{tags_dia} {tag_h}"
# %% remove weird blanks
dialogue_blanks = copy.deepcopy(dialogue_tags)
for d in range(len(dialogue_blanks)):
print(f"\r{d+1}/{len(dialogue_blanks)}; {np.round((d+1)/len(dialogue_blanks)*100,1)}% ",end = "\r")
jp = dialogue_blanks.loc[d, "dialogue"]
fu = dialogue_blanks.loc[d, "furigana"]
kana = dialogue_blanks.loc[d, "kana"]
kanji_st = dialogue_blanks.loc[d, "kanji_style"]
fu_st = dialogue_blanks.loc[d, "furigana_style"]
kana_st = dialogue_blanks.loc[d, "kana_style"]
en= dialogue_blanks.loc[d, "english"]
if " ___ isare ___ wisp ___ s "in en:
en = en.replace(" ___ isare ___ wisp ___ s ", "is/are ___ wisp(s)")
if "wisp ___ s"in en:
en = en.replace("wisp ___ s", "wisp(s)")
jp = jp.replace(" ___", "")
fu = fu.replace("___", "")
kana = kana.replace("___", "")
kanji_st = fu_st.replace("___", "")
fu_st = fu_st.replace("___", "")
kana_st = kana_st.replace("___", "")
en = en.replace(" ___ ", "")
dialogue_blanks.loc[d, "dialogue"]= jp
dialogue_blanks.loc[d, "furigana"] = fu
dialogue_blanks.loc[d, "kana"] = kana
dialogue_blanks.loc[d, "kanji_style"] = kanji_st
dialogue_blanks.loc[d, "furigana_style"] = fu_st
dialogue_blanks.loc[d, "kana_style"] = kana_st
dialogue_blanks.loc[d, "english"] = en
#swtich sinnoh
shinnoh = 'シンオウ: to do, to carry out, to perform'
shinnoh_style = 'シンオウ</font>: to do, to carry out, to perform'
shinnoh_replace = "シンオウ: Sinnoh"
shinnoh_style_replace = "シンオウ</font>: Sinnoh"
if isinstance( dialogue_blanks.loc[d, "vocab"], str):
if shinnoh in dialogue_blanks.loc[d, "vocab"]:
print("changing Sinnoh")
dialogue_blanks.loc[d, "vocab"] = dialogue_blanks.loc[d, "vocab"].replace(shinnoh, shinnoh_replace)
if shinnoh_style in dialogue_blanks.loc[d, "vocab_style"]:
dialogue_blanks.loc[d, "vocab_style"] = dialogue_blanks.loc[d, "vocab_style"].replace(shinnoh_style, shinnoh_style_replace)
#swtich Hisui
shinnoh = 'ヒスイ: Jade'
shinnoh_style = 'ヒスイ</font>: Jade'
shinnoh_replace = "ヒスイ: Hisui"
shinnoh_style_replace = "ヒスイ</font>: Hisui"
if isinstance( dialogue_blanks.loc[d, "vocab"], str):
if shinnoh in dialogue_blanks.loc[d, "vocab"]:
print("changing Hisui")
dialogue_blanks.loc[d, "vocab"] = dialogue_blanks.loc[d, "vocab"].replace(shinnoh, shinnoh_replace)
if shinnoh_style in dialogue_blanks.loc[d, "vocab_style"]:
dialogue_blanks.loc[d, "vocab_style"] = dialogue_blanks.loc[d, "vocab_style"].replace(shinnoh_style, shinnoh_style_replace)
dialogue_blanks_index = copy.deepcopy(dialogue_blanks)
dialogue_blanks_index = dialogue_blanks_index.reset_index()
# %%
dialogue_blanks_index.to_csv("text/output/PLA_dialogue1_blanks_removed.txt", sep="\t", header=None, index=None)
# %%
dia1 = pd.read_csv("text/output/PLA_dialogue1_blanks_removed.txt", sep="\t", header=None)
dia1.columns= ["index", "dialogue", "furigana", "kana", "english", "vocab","kanji", "kanji_style", "furigana_style", "kana_style", "vocab_style", "tags"]
# =============================================================================
# %%
df_vocab1 = pd.DataFrame(columns = ["word", "word_furigana", "word_english", "dialogue", "furigana", "kana", "english", "vocab","kanji", "kanji_style", "furigana_style", "kana_style", "vocab_style", "tags", "dialogue_length"])
# Get vocab
for i in range(len(dia1)):
print(f"\r{i+1}/{len(dia1)}; {np.round((i+1)/len(dia1)*100,1)} ", end = "\r")
vocabs =dia1.loc[i, "vocab"]
if isinstance(vocabs, str):
splits = vocabs.split("<br>")
for f in range(len(splits)):
vo = splits[f]
word_furigana, word_english = vo.split(":",1)
word_furigana = word_furigana.strip()
word_english = word_english.strip()
word = re.sub("[\[].*?[\]]", "", word_furigana)
word = word.replace(" ", "")
input_entry = dict(word = word,
word_furigana = word_furigana,
word_english = word_english,
dialogue = dia1.loc[i, "dialogue"],
furigana = dia1.loc[i, "furigana"],
kana = dia1.loc[i, "kana"],
english =dia1.loc[i, "english"] ,
vocab = dia1.loc[i, "vocab"],
kanji = dia1.loc[i, "kanji"],
kanji_style = dia1.loc[i, "kanji_style"],
furigana_style = dia1.loc[i, "furigana_style"],
kana_style = dia1.loc[i, "kana_style"],
vocab_style = dia1.loc[i, "vocab_style"] ,
tags = dia1.loc[i, "tags"],
dialogue_length = len(dia1.loc[i, "dialogue"]))
df_vocab1 = df_vocab1.append(input_entry, ignore_index=True )
elif math.isnan(vocabs):
word_furigana, word_english = dia1.loc[i, "furigana"], dia1.loc[i, "english"]
word_furigana = word_furigana.strip()
word_english = word_english.strip()
word = dia1.loc[i, "dialogue"]
word = word.replace(" ", "")
input_entry = dict(word = word,
word_furigana = word_furigana,
word_english = word_english,
dialogue = dia1.loc[i, "dialogue"],
furigana = dia1.loc[i, "furigana"],
kana = dia1.loc[i, "kana"],
english =dia1.loc[i, "english"] ,
vocab = dia1.loc[i, "vocab"],
kanji = dia1.loc[i, "kanji"],
kanji_style = dia1.loc[i, "kanji_style"],
furigana_style = dia1.loc[i, "furigana_style"],
kana_style = dia1.loc[i, "kana_style"],
vocab_style = dia1.loc[i, "vocab_style"] ,
tags = dia1.loc[i, "tags"],
dialogue_length = len(dia1.loc[i, "dialogue"]))
df_vocab1 = df_vocab1.append(input_entry, ignore_index=True )
#%%
df_vocab1_index = df_vocab1.reset_index()
df_vocab1_index_sort = df_vocab1_index.sort_values("dialogue_length")
# %% drop duplicates and combine tags
g = df_vocab1_index_sort.groupby("word")
combine_tags = g.agg("first")
combine_tags.update(g.agg({"tags": " ".join}))
combine_tags = combine_tags.reset_index()
combine_tags_sort = combine_tags.sort_values("index")
combine_tags_sort = combine_tags_sort.rename(columns = {'index':'index_by_dialogue'})
combine_tags_sort = combine_tags_sort.reset_index(drop=True)
combine_tags_sort.loc[ combine_tags_sort["word_english"] == "Jade", "word_english"] = "Hisui"
combine_tags_sort.loc[ combine_tags_sort["word_english"] == "American", "word_english"] = "Candy"
combine_tags_sort = combine_tags_sort.reset_index(drop=True)
counter_vocab = collections.Counter(df_vocab1_index["word"])
counter_dict = dict(counter_vocab)
combine_tags_sort["frequency"] = np.nan
for i in range(len(combine_tags_sort)):
word = combine_tags_sort.loc[i, "word"]
combine_tags_sort.loc[i, "frequency"] = counter_dict[word]
combine_tags_sort["frequency"] = pd.to_numeric(combine_tags_sort["frequency"], downcast='integer')
combine_tags_sort_freq = combine_tags_sort.sort_values("frequency", ascending = False)
#%%
df_vocab1_save = combine_tags_sort_freq.reset_index(drop=True)
df_vocab1_save = df_vocab1_save.reset_index()
df_vocab1_save = df_vocab1_save.drop("dialogue_length", axis = 1)
# %%
# remove duplicacte tags
for i in range(len(df_vocab1_save)):
tags = df_vocab1_save.loc[i,"tags"]
tags = tags.replace("dialogue", "vocab")
words = tags.split()
df_vocab1_save.loc[i,"tags"] = " ".join(sorted(set(words), key=words.index))
# %%
#Changing the first entry for "no"
x = 2363
ind = np.where(df_vocab1_save["word"] == "の")[0][0]
df_vocab1_save.loc[ind,"dialogue"] = dia1.loc[x, "dialogue"]
df_vocab1_save.loc[ind,"furigana"] = dia1.loc[x, "furigana"]
df_vocab1_save.loc[ind,"kana"] = dia1.loc[x, "kana"]
df_vocab1_save.loc[ind,"english"] = dia1.loc[x, "english"]
df_vocab1_save.loc[ind,"vocab"] = dia1.loc[x, "vocab"]
df_vocab1_save.loc[ind,"kanji"] = dia1.loc[x, "kanji"]
df_vocab1_save.loc[ind,"kanji_style"] = dia1.loc[x, "kanji_style"]
df_vocab1_save.loc[ind,"furigana_style"] = dia1.loc[x, "furigana_style"]
df_vocab1_save.loc[ind,"kana_style"] = dia1.loc[x, "kana_style"]
df_vocab1_save.loc[ind,"vocab_style"] =dia1.loc[x, "vocab_style"]
# %%
ind = np.where(df_vocab1_save["word"] == "で")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "ん")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "そう")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "ギリ")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "バサ")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "どう")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
# %%
df_vocab1_save = df_vocab1_save.reset_index(drop=True)
df_vocab1_save = df_vocab1_save.drop("index", axis = 1)
df_vocab1_save = df_vocab1_save.reset_index()
# %%
def swap_columns(df, c1, c2):
df = copy.deepcopy(df)
df['temp'] = df[c1]
df[c1] = df[c2]
df[c2] = df['temp']
df.drop(columns=['temp'], inplace=True)
df.columns
df.rename(columns={c1:'temp'}, inplace=True)
df.rename(columns={c2:c1}, inplace=True)
df.rename(columns={'temp':c2}, inplace=True)
return df
df_vocab1_save = swap_columns(df = df_vocab1_save, c1 = 'index_by_dialogue', c2 = 'frequency')
df_vocab1_save = swap_columns(df = df_vocab1_save, c1 = 'word', c2 = 'frequency')
df_vocab1_save = swap_columns(df = df_vocab1_save, c1 = 'index_by_dialogue', c2 = 'tags')
df_vocab1_save.to_csv("text/output/PLA_vocab1.txt", sep="\t", header=None, index=None)
#%%
# =============================================================================
#
# =============================================================================
# =============================================================================
#
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# =============================================================================
#
# =============================================================================
# %% Dialogue 2
dialogue_tags = pd.read_csv("text/output/PLA_dialogue2.txt", sep="\t", header=None)
dialogue_tags.columns = ["index"] + columns_dialogue
dialogue_tags = dialogue_tags.drop("index", axis = 1)
dialogue_tags["tags"] = ""
for t in range(len(pla_common_hir)):
print(f"\r{t+1}/{len(pla_common_hir)}; {np.round((t+1)/len(pla_common_hir)*100, 1)}", end = "\r")
line = pla_common_hir[t]
if "Text File :" in line:
tag = pla_common_hir[t]
for character in remove_characters:
tag = tag.replace(character, "")
tag = tag.replace("Text File : ", "")
if "sub_" in tag:
add = "sub"
elif "chap_" in tag:
add = "chapter"
elif "z_area" in tag:
add = "z_area"
else:
add = ""
if len(add)==0:
tag_h = f"#PLA::dialogue::common::{tag}"
else:
tag_h = f"#PLA::dialogue::common::{add}::{tag}"
for character in remove_characters:
line = line.replace(character, "")
contains_japanse = []
for l in range(len(line)):
letter = line[l]
unic = unicodedata.name(letter)
# print(unic)
if "CJK" in unic or "HIRAGANA" in unic or "KATAKANA" in unic:
contains_japanse.append(True)
else:
contains_japanse.append(False)
if any(contains_japanse):
line_jp = re.sub("[\[]VAR.*?[\]]", " ___ ", line)
#print(tag_h)
#print(line_jp)
if len( np.where(line_jp == dialogue_tags["dialogue"])[0]) > 0:
ind = np.where(line_jp == dialogue_tags["dialogue"])[0][0]
tags_dia = dialogue_tags.loc[ind, "tags"]
if tag_h in tags_dia:
continue
else:
dialogue_tags.loc[ind, "tags"] = f"{tags_dia} {tag_h}"
# %%
dialogue_blanks = copy.deepcopy(dialogue_tags)
for d in range(len(dialogue_blanks)):
print(f"\r{d+1}/{len(dialogue_blanks)}; {np.round((d+1)/len(dialogue_blanks)*100,1)}% ",end = "\r")
jp = dialogue_blanks.loc[d, "dialogue"]
fu = dialogue_blanks.loc[d, "furigana"]
kana = dialogue_blanks.loc[d, "kana"]
kanji_st = dialogue_blanks.loc[d, "kanji_style"]
fu_st = dialogue_blanks.loc[d, "furigana_style"]
kana_st = dialogue_blanks.loc[d, "kana_style"]
en= dialogue_blanks.loc[d, "english"]
#swtich sinnoh
shinnoh = 'シンオウ: to do, to carry out, to perform'
shinnoh_style = 'シンオウ</font>: to do, to carry out, to perform'
shinnoh_replace = "シンオウ: Sinnoh"
shinnoh_style_replace = "シンオウ</font>: Sinnoh"
if isinstance( dialogue_blanks.loc[d, "vocab"], str):
if shinnoh in dialogue_blanks.loc[d, "vocab"]:
print("changing Sinnoh ")
dialogue_blanks.loc[d, "vocab"] = dialogue_blanks.loc[d, "vocab"].replace(shinnoh, shinnoh_replace)
if shinnoh_style in dialogue_blanks.loc[d, "vocab_style"]:
dialogue_blanks.loc[d, "vocab_style"] = dialogue_blanks.loc[d, "vocab_style"].replace(shinnoh_style, shinnoh_style_replace)
#swtich Hisui
shinnoh = 'ヒスイ: Jade'
shinnoh_style = 'ヒスイ</font>: Jade'
shinnoh_replace = "ヒスイ: Hisui"
shinnoh_style_replace = "ヒスイ</font>: Hisui"
if isinstance( dialogue_blanks.loc[d, "vocab"], str):
if shinnoh in dialogue_blanks.loc[d, "vocab"]:
print("changing Hisui")
dialogue_blanks.loc[d, "vocab"] = dialogue_blanks.loc[d, "vocab"].replace(shinnoh, shinnoh_replace)
if shinnoh_style in dialogue_blanks.loc[d, "vocab_style"]:
dialogue_blanks.loc[d, "vocab_style"] = dialogue_blanks.loc[d, "vocab_style"].replace(shinnoh_style, shinnoh_style_replace)
dialogue_blanks_index = copy.deepcopy(dialogue_blanks)
dialogue_blanks_index = dialogue_blanks_index.reset_index()
# %%
dialogue_blanks_index.to_csv("text/output/PLA_dialogue2_blanks_removed.txt", sep="\t", header=None, index=None)
# =============================================================================
#
# =============================================================================
#%% dialogue 2 vocab
dia2 = pd.read_csv("text/output/PLA_dialogue2_blanks_removed.txt", sep="\t", header=None)
dia2.columns= ["index", "dialogue", "furigana", "kana", "english", "vocab","kanji", "kanji_style", "furigana_style", "kana_style", "vocab_style", "tags"]
# =============================================================================
# %%
df_vocab1 = pd.DataFrame(columns = ["word", "word_furigana", "word_english", "dialogue", "furigana", "kana", "english", "vocab","kanji", "kanji_style", "furigana_style", "kana_style", "vocab_style", "tags", "dialogue_length"])
# Get vocab
for i in range(len(dia2)):
print(f"\r{i+1}/{len(dia2)}; {np.round((i+1)/len(dia2)*100,1)} ", end = "\r")
vocabs =dia2.loc[i, "vocab"]
if isinstance(vocabs, str):
splits = vocabs.split("<br>")
for f in range(len(splits)):
vo = splits[f]
word_furigana, word_english = vo.split(":",1)
word_furigana = word_furigana.strip()
word_english = word_english.strip()
word = re.sub("[\[].*?[\]]", "", word_furigana)
word = word.replace(" ", "")
input_entry = dict(word = word,
word_furigana = word_furigana,
word_english = word_english,
dialogue = dia2.loc[i, "dialogue"],
furigana = dia2.loc[i, "furigana"],
kana = dia2.loc[i, "kana"],
english =dia2.loc[i, "english"] ,
vocab = dia2.loc[i, "vocab"],
kanji = dia2.loc[i, "kanji"],
kanji_style = dia2.loc[i, "kanji_style"],
furigana_style = dia2.loc[i, "furigana_style"],
kana_style = dia2.loc[i, "kana_style"],
vocab_style = dia2.loc[i, "vocab_style"] ,
tags = dia2.loc[i, "tags"],
dialogue_length = len(dia2.loc[i, "dialogue"]))
df_vocab1 = df_vocab1.append(input_entry, ignore_index=True )
elif math.isnan(vocabs):
word_furigana, word_english = dia2.loc[i, "furigana"], dia2.loc[i, "english"]
word_furigana = word_furigana.strip()
word_english = word_english.strip()
word = dia2.loc[i, "dialogue"]
word = word.replace(" ", "")
input_entry = dict(word = word,
word_furigana = word_furigana,
word_english = word_english,
dialogue = dia2.loc[i, "dialogue"],
furigana = dia2.loc[i, "furigana"],
kana = dia2.loc[i, "kana"],
english =dia2.loc[i, "english"] ,
vocab = dia2.loc[i, "vocab"],
kanji = dia2.loc[i, "kanji"],
kanji_style = dia2.loc[i, "kanji_style"],
furigana_style = dia2.loc[i, "furigana_style"],
kana_style = dia2.loc[i, "kana_style"],
vocab_style = dia2.loc[i, "vocab_style"] ,
tags = dia2.loc[i, "tags"],
dialogue_length = len(dia2.loc[i, "dialogue"]))
df_vocab1 = df_vocab1.append(input_entry, ignore_index=True )
#%%
df_vocab1_index = df_vocab1.reset_index()
df_vocab1_index_sort = df_vocab1_index.sort_values("dialogue_length")
# %% drop duplicates and combine tags
df_vocab1_index_sort["tags"]
df_vocab1_index_sort['tags'].isnull().values.any()
df_vocab1_index_sort.fillna('', inplace=True)
g = df_vocab1_index_sort.groupby("word")
combine_tags = g.agg("first")
combine_tags.update(g.agg({"tags": " ".join}))
combine_tags = combine_tags.reset_index()
combine_tags_sort = combine_tags.sort_values("index")
combine_tags_sort = combine_tags_sort.rename(columns = {'index':'index_by_dialogue'})
combine_tags_sort = combine_tags_sort.reset_index(drop=True)
combine_tags_sort.loc[ combine_tags_sort["word_english"] == "Jade", "word_english"] = "Hisui"
combine_tags_sort.loc[ combine_tags_sort["word_english"] == "American", "word_english"] = "Candy"
combine_tags_sort = combine_tags_sort.reset_index(drop=True)
counter_vocab = collections.Counter(df_vocab1_index["word"])
counter_dict = dict(counter_vocab)
combine_tags_sort["frequency"] = np.nan
for i in range(len(combine_tags_sort)):
word = combine_tags_sort.loc[i, "word"]
combine_tags_sort.loc[i, "frequency"] = counter_dict[word]
combine_tags_sort["frequency"] = pd.to_numeric(combine_tags_sort["frequency"], downcast='integer')
combine_tags_sort_freq = combine_tags_sort.sort_values("frequency", ascending = False)
#%%
df_vocab1_save = combine_tags_sort_freq.reset_index(drop=True)
df_vocab1_save = df_vocab1_save.reset_index()
df_vocab1_save = df_vocab1_save.drop("dialogue_length", axis = 1)
# %%
# remove duplicacte tags
for i in range(len(df_vocab1_save)):
tags = df_vocab1_save.loc[i,"tags"]
tags = tags.replace("dialogue", "vocab")
words = tags.split()
df_vocab1_save.loc[i,"tags"] = " ".join(sorted(set(words), key=words.index))
# %%
ind = np.where(df_vocab1_save["word"] == "で")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "ん")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "そう")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "ギリ")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "バサ")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
ind = np.where(df_vocab1_save["word"] == "どう")[0][0]
df_vocab1_save = df_vocab1_save.drop(df_vocab1_save.index[ind])
# %%
df_vocab1_save = df_vocab1_save.reset_index(drop=True)
df_vocab1_save = df_vocab1_save.drop("index", axis = 1)
df_vocab1_save = df_vocab1_save.reset_index()
# %%
def swap_columns(df, c1, c2):
df = copy.deepcopy(df)
df['temp'] = df[c1]
df[c1] = df[c2]
df[c2] = df['temp']
df.drop(columns=['temp'], inplace=True)
df.columns
df.rename(columns={c1:'temp'}, inplace=True)
df.rename(columns={c2:c1}, inplace=True)
df.rename(columns={'temp':c2}, inplace=True)
return df
df_vocab1_save = swap_columns(df = df_vocab1_save, c1 = 'index_by_dialogue', c2 = 'frequency')
df_vocab1_save = swap_columns(df = df_vocab1_save, c1 = 'word', c2 = 'frequency')
df_vocab1_save = swap_columns(df = df_vocab1_save, c1 = 'index_by_dialogue', c2 = 'tags')
df_vocab1_save.to_csv("text/output/PLA_vocab2.txt", sep="\t", header=None, index=None)
| 36.989189
| 226
| 0.56145
| 3,360
| 27,372
| 4.302679
| 0.07619
| 0.069724
| 0.083835
| 0.061009
| 0.921007
| 0.899564
| 0.85986
| 0.818496
| 0.811302
| 0.803417
| 0
| 0.019447
| 0.182815
| 27,372
| 739
| 227
| 37.039242
| 0.626878
| 0.134079
| 0
| 0.766816
| 0
| 0.013453
| 0.185383
| 0.032642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004484
| false
| 0
| 0.03139
| 0
| 0.040359
| 0.024664
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e67746523e0f2d67961b300d869e8498abcc6eb9
| 7,076
|
py
|
Python
|
mnist/dataset.py
|
ragavvenkatesan/Incremental-GAN
|
66db2760d43defe36feec7e049f74a659e810fed
|
[
"MIT"
] | 15
|
2017-05-03T21:22:03.000Z
|
2020-03-11T05:36:43.000Z
|
mnist/dataset.py
|
ragavvenkatesan/Incremental-GAN
|
66db2760d43defe36feec7e049f74a659e810fed
|
[
"MIT"
] | null | null | null |
mnist/dataset.py
|
ragavvenkatesan/Incremental-GAN
|
66db2760d43defe36feec7e049f74a659e810fed
|
[
"MIT"
] | 4
|
2017-05-04T15:55:22.000Z
|
2018-10-15T06:38:43.000Z
|
from yann.special.datasets import split_all, split_only_train
def cook_mnist_complete(verbose = 1, **kwargs):
"""
Wrapper to cook mnist dataset that creates the whole thing. Will take as input,
Args:
save_directory: which directory to save the cooked dataset onto.
dataset_parms: default is the dictionary. Refer to :mod:`setup_dataset`
preprocess_params: default is the dictionary. Refer to :mod:`setup_dataset`
Notes:
This will also have the split parameter.
"""
if not 'data_params' in kwargs.keys():
data_params = {
"source" : 'skdata',
"name" : 'mnist',
"location" : '',
"mini_batch_size" : 500,
"mini_batches_per_batch" : (100, 20, 20),
"batches2train" : 1,
"batches2test" : 1,
"batches2validate" : 1,
"height" : 28,
"width" : 28,
"channels" : 1 }
else:
data_params = kwargs['data_params']
if not 'preprocess_params' in kwargs.keys():
# parameters relating to preprocessing.
preprocess_params = {
"normalize" : True,
"ZCA" : False,
"grayscale" : False,
"zero_mean" : True,
}
else:
preprocess_params = kwargs['preprocess_params']
if not 'save_directory' in kwargs.keys():
save_directory = '_datasets'
else:
save_directory = kwargs ['save_directory']
if not 'splits' in kwargs.keys():
splits = {
"base" : [0,1,2,3,4,5,6,7,8,9],
"shot" : [],
"p" : 0
}
else:
splits = kwargs ['splits']
dataset = split_only_train(dataset_init_args = data_params,
save_directory = save_directory,
preprocess_init_args = preprocess_params,
split_args = splits,
verbose = 3)
return dataset
def cook_split_base(verbose = 1, **kwargs):
"""
Wrapper to cook mnist dataset that also creates a split dataset. Will take as input,
Args:
save_directory: which directory to save the cooked dataset onto.
dataset_parms: default is the dictionary. Refer to :mod:`setup_dataset`
preprocess_params: default is the dictionary. Refer to :mod:`setup_dataset`
Notes:
The base of this dataset will be classes 0,1,2,4,5 and the split will be classes
6,7,8,9.
"""
if not 'data_params' in kwargs.keys():
data_params = {
"source" : 'skdata',
"name" : 'mnist',
"location" : '',
"mini_batch_size" : 500,
"mini_batches_per_batch" : (100, 20, 20),
"batches2train" : 1,
"batches2test" : 1,
"batches2validate" : 1,
"height" : 28,
"width" : 28,
"channels" : 1 }
else:
data_params = kwargs['data_params']
if not 'preprocess_params' in kwargs.keys():
# parameters relating to preprocessing.
preprocess_params = {
"normalize" : True,
"ZCA" : False,
"grayscale" : False,
"zero_mean" : True,
}
else:
preprocess_params = kwargs['preprocess_params']
if not 'save_directory' in kwargs.keys():
save_directory = '_datasets'
else:
save_directory = kwargs ['save_directory']
if not 'splits' in kwargs.keys():
splits = {
"base" : [0,1,2,3,4,5],
"shot" : [6,7,8,9],
"p" : 0
}
else:
splits = kwargs ['splits']
dataset = split_all(dataset_init_args = data_params,
save_directory = save_directory,
preprocess_init_args = preprocess_params,
split_args = splits,
verbose = 3)
return dataset
def cook_split_inc(verbose = 1, **kwargs):
"""
Wrapper to cook mnist dataset that also creates the rest of the dataset. Will take as input,
Args:
save_directory: which directory to save the cooked dataset onto.
dataset_parms: default is the dictionary. Refer to :mod:`setup_dataset`
preprocess_params: default is the dictionary. Refer to :mod:`setup_dataset`
Notes:
The base of this dataset will be classes 0,1,2,4,5,7,9 and the split will be classes
3,6,8.
"""
if not 'data_params' in kwargs.keys():
data_params = {
"source" : 'skdata',
"name" : 'mnist',
"location" : '',
"mini_batch_size" : 500,
"mini_batches_per_batch" : (100, 20, 20),
"batches2train" : 1,
"batches2test" : 1,
"batches2validate" : 1,
"height" : 28,
"width" : 28,
"channels" : 1 }
else:
data_params = kwargs['data_params']
if not 'preprocess_params' in kwargs.keys():
# parameters relating to preprocessing.
preprocess_params = {
"normalize" : True,
"ZCA" : False,
"grayscale" : False,
"zero_mean" : True,
}
else:
preprocess_params = kwargs['preprocess_params']
if not 'save_directory' in kwargs.keys():
save_directory = '_datasets'
else:
save_directory = kwargs ['save_directory']
if not 'splits' in kwargs.keys():
splits = {
"base" : [6,7,8,9],
"shot" : [0,1,2,3,4,5],
"p" : 0
}
else:
splits = kwargs ['splits']
dataset = split_only_train(dataset_init_args = data_params,
save_directory = save_directory,
preprocess_init_args = preprocess_params,
split_args = splits,
verbose = 3)
return dataset
if __name__ == '__main__':
pass
| 35.20398
| 96
| 0.453646
| 647
| 7,076
| 4.774343
| 0.165379
| 0.088378
| 0.046617
| 0.042732
| 0.940758
| 0.935578
| 0.918096
| 0.918096
| 0.906442
| 0.892522
| 0
| 0.031609
| 0.459016
| 7,076
| 201
| 97
| 35.20398
| 0.77534
| 0.190644
| 0
| 0.856061
| 0
| 0
| 0.14628
| 0.011803
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0.007576
| 0.007576
| 0
| 0.05303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6cb5ab52f374f5daea43f9188eacf6f92b37e23
| 15,993
|
py
|
Python
|
_lambda/ask_sdk_model/request.py
|
desarroyo/alexa-skill-mi-abecedario
|
71fb9dc5a9ce2aeb7e336474d5162053e3af0369
|
[
"MIT"
] | null | null | null |
_lambda/ask_sdk_model/request.py
|
desarroyo/alexa-skill-mi-abecedario
|
71fb9dc5a9ce2aeb7e336474d5162053e3af0369
|
[
"MIT"
] | null | null | null |
_lambda/ask_sdk_model/request.py
|
desarroyo/alexa-skill-mi-abecedario
|
71fb9dc5a9ce2aeb7e336474d5162053e3af0369
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from abc import ABCMeta, abstractmethod
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class Request(object):
"""
A request object that provides the details of the user’s request. The request body contains the parameters necessary for the service to perform its logic and generate a response.
:param object_type: Describes the type of the request.
:type object_type: (optional) str
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
.. note::
This is an abstract class. Use the following mapping, to figure out
the model class to be instantiated, that sets ``type`` variable.
| AudioPlayer.PlaybackFinished: :py:class:`ask_sdk_model.interfaces.audioplayer.playback_finished_request.PlaybackFinishedRequest`,
|
| AlexaSkillEvent.SkillEnabled: :py:class:`ask_sdk_model.events.skillevents.skill_enabled_request.SkillEnabledRequest`,
|
| AlexaHouseholdListEvent.ListUpdated: :py:class:`ask_sdk_model.services.list_management.list_updated_event_request.ListUpdatedEventRequest`,
|
| AlexaSkillEvent.ProactiveSubscriptionChanged: :py:class:`ask_sdk_model.events.skillevents.proactive_subscription_changed_request.ProactiveSubscriptionChangedRequest`,
|
| Alexa.Presentation.APL.UserEvent: :py:class:`ask_sdk_model.interfaces.alexa.presentation.apl.user_event.UserEvent`,
|
| AlexaSkillEvent.SkillDisabled: :py:class:`ask_sdk_model.events.skillevents.skill_disabled_request.SkillDisabledRequest`,
|
| Display.ElementSelected: :py:class:`ask_sdk_model.interfaces.display.element_selected_request.ElementSelectedRequest`,
|
| AlexaSkillEvent.SkillPermissionChanged: :py:class:`ask_sdk_model.events.skillevents.permission_changed_request.PermissionChangedRequest`,
|
| AlexaHouseholdListEvent.ItemsCreated: :py:class:`ask_sdk_model.services.list_management.list_items_created_event_request.ListItemsCreatedEventRequest`,
|
| Reminders.ReminderUpdated: :py:class:`ask_sdk_model.services.reminder_management.reminder_updated_event_request.ReminderUpdatedEventRequest`,
|
| SessionEndedRequest: :py:class:`ask_sdk_model.session_ended_request.SessionEndedRequest`,
|
| IntentRequest: :py:class:`ask_sdk_model.intent_request.IntentRequest`,
|
| AudioPlayer.PlaybackFailed: :py:class:`ask_sdk_model.interfaces.audioplayer.playback_failed_request.PlaybackFailedRequest`,
|
| CanFulfillIntentRequest: :py:class:`ask_sdk_model.canfulfill.can_fulfill_intent_request.CanFulfillIntentRequest`,
|
| Reminders.ReminderStarted: :py:class:`ask_sdk_model.services.reminder_management.reminder_started_event_request.ReminderStartedEventRequest`,
|
| LaunchRequest: :py:class:`ask_sdk_model.launch_request.LaunchRequest`,
|
| Reminders.ReminderCreated: :py:class:`ask_sdk_model.services.reminder_management.reminder_created_event_request.ReminderCreatedEventRequest`,
|
| AudioPlayer.PlaybackStopped: :py:class:`ask_sdk_model.interfaces.audioplayer.playback_stopped_request.PlaybackStoppedRequest`,
|
| PlaybackController.PreviousCommandIssued: :py:class:`ask_sdk_model.interfaces.playbackcontroller.previous_command_issued_request.PreviousCommandIssuedRequest`,
|
| AlexaHouseholdListEvent.ItemsUpdated: :py:class:`ask_sdk_model.services.list_management.list_items_updated_event_request.ListItemsUpdatedEventRequest`,
|
| AlexaSkillEvent.SkillAccountLinked: :py:class:`ask_sdk_model.events.skillevents.account_linked_request.AccountLinkedRequest`,
|
| AlexaHouseholdListEvent.ListCreated: :py:class:`ask_sdk_model.services.list_management.list_created_event_request.ListCreatedEventRequest`,
|
| AudioPlayer.PlaybackStarted: :py:class:`ask_sdk_model.interfaces.audioplayer.playback_started_request.PlaybackStartedRequest`,
|
| AudioPlayer.PlaybackNearlyFinished: :py:class:`ask_sdk_model.interfaces.audioplayer.playback_nearly_finished_request.PlaybackNearlyFinishedRequest`,
|
| Reminders.ReminderStatusChanged: :py:class:`ask_sdk_model.services.reminder_management.reminder_status_changed_event_request.ReminderStatusChangedEventRequest`,
|
| AlexaHouseholdListEvent.ItemsDeleted: :py:class:`ask_sdk_model.services.list_management.list_items_deleted_event_request.ListItemsDeletedEventRequest`,
|
| Reminders.ReminderDeleted: :py:class:`ask_sdk_model.services.reminder_management.reminder_deleted_event_request.ReminderDeletedEventRequest`,
|
| Connections.Response: :py:class:`ask_sdk_model.interfaces.connections.connections_response.ConnectionsResponse`,
|
| Messaging.MessageReceived: :py:class:`ask_sdk_model.interfaces.messaging.message_received_request.MessageReceivedRequest`,
|
| Connections.Request: :py:class:`ask_sdk_model.interfaces.connections.connections_request.ConnectionsRequest`,
|
| System.ExceptionEncountered: :py:class:`ask_sdk_model.interfaces.system.exception_encountered_request.ExceptionEncounteredRequest`,
|
| AlexaSkillEvent.SkillPermissionAccepted: :py:class:`ask_sdk_model.events.skillevents.permission_accepted_request.PermissionAcceptedRequest`,
|
| AlexaHouseholdListEvent.ListDeleted: :py:class:`ask_sdk_model.services.list_management.list_deleted_event_request.ListDeletedEventRequest`,
|
| GameEngine.InputHandlerEvent: :py:class:`ask_sdk_model.interfaces.game_engine.input_handler_event_request.InputHandlerEventRequest`,
|
| PlaybackController.NextCommandIssued: :py:class:`ask_sdk_model.interfaces.playbackcontroller.next_command_issued_request.NextCommandIssuedRequest`,
|
| PlaybackController.PauseCommandIssued: :py:class:`ask_sdk_model.interfaces.playbackcontroller.pause_command_issued_request.PauseCommandIssuedRequest`,
|
| PlaybackController.PlayCommandIssued: :py:class:`ask_sdk_model.interfaces.playbackcontroller.play_command_issued_request.PlayCommandIssuedRequest`
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str'
}
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale'
}
discriminator_value_class_map = {
'AudioPlayer.PlaybackFinished': 'ask_sdk_model.interfaces.audioplayer.playback_finished_request.PlaybackFinishedRequest',
'AlexaSkillEvent.SkillEnabled': 'ask_sdk_model.events.skillevents.skill_enabled_request.SkillEnabledRequest',
'AlexaHouseholdListEvent.ListUpdated': 'ask_sdk_model.services.list_management.list_updated_event_request.ListUpdatedEventRequest',
'AlexaSkillEvent.ProactiveSubscriptionChanged': 'ask_sdk_model.events.skillevents.proactive_subscription_changed_request.ProactiveSubscriptionChangedRequest',
'Alexa.Presentation.APL.UserEvent': 'ask_sdk_model.interfaces.alexa.presentation.apl.user_event.UserEvent',
'AlexaSkillEvent.SkillDisabled': 'ask_sdk_model.events.skillevents.skill_disabled_request.SkillDisabledRequest',
'Display.ElementSelected': 'ask_sdk_model.interfaces.display.element_selected_request.ElementSelectedRequest',
'AlexaSkillEvent.SkillPermissionChanged': 'ask_sdk_model.events.skillevents.permission_changed_request.PermissionChangedRequest',
'AlexaHouseholdListEvent.ItemsCreated': 'ask_sdk_model.services.list_management.list_items_created_event_request.ListItemsCreatedEventRequest',
'Reminders.ReminderUpdated': 'ask_sdk_model.services.reminder_management.reminder_updated_event_request.ReminderUpdatedEventRequest',
'SessionEndedRequest': 'ask_sdk_model.session_ended_request.SessionEndedRequest',
'IntentRequest': 'ask_sdk_model.intent_request.IntentRequest',
'AudioPlayer.PlaybackFailed': 'ask_sdk_model.interfaces.audioplayer.playback_failed_request.PlaybackFailedRequest',
'CanFulfillIntentRequest': 'ask_sdk_model.canfulfill.can_fulfill_intent_request.CanFulfillIntentRequest',
'Reminders.ReminderStarted': 'ask_sdk_model.services.reminder_management.reminder_started_event_request.ReminderStartedEventRequest',
'LaunchRequest': 'ask_sdk_model.launch_request.LaunchRequest',
'Reminders.ReminderCreated': 'ask_sdk_model.services.reminder_management.reminder_created_event_request.ReminderCreatedEventRequest',
'AudioPlayer.PlaybackStopped': 'ask_sdk_model.interfaces.audioplayer.playback_stopped_request.PlaybackStoppedRequest',
'PlaybackController.PreviousCommandIssued': 'ask_sdk_model.interfaces.playbackcontroller.previous_command_issued_request.PreviousCommandIssuedRequest',
'AlexaHouseholdListEvent.ItemsUpdated': 'ask_sdk_model.services.list_management.list_items_updated_event_request.ListItemsUpdatedEventRequest',
'AlexaSkillEvent.SkillAccountLinked': 'ask_sdk_model.events.skillevents.account_linked_request.AccountLinkedRequest',
'AlexaHouseholdListEvent.ListCreated': 'ask_sdk_model.services.list_management.list_created_event_request.ListCreatedEventRequest',
'AudioPlayer.PlaybackStarted': 'ask_sdk_model.interfaces.audioplayer.playback_started_request.PlaybackStartedRequest',
'AudioPlayer.PlaybackNearlyFinished': 'ask_sdk_model.interfaces.audioplayer.playback_nearly_finished_request.PlaybackNearlyFinishedRequest',
'Reminders.ReminderStatusChanged': 'ask_sdk_model.services.reminder_management.reminder_status_changed_event_request.ReminderStatusChangedEventRequest',
'AlexaHouseholdListEvent.ItemsDeleted': 'ask_sdk_model.services.list_management.list_items_deleted_event_request.ListItemsDeletedEventRequest',
'Reminders.ReminderDeleted': 'ask_sdk_model.services.reminder_management.reminder_deleted_event_request.ReminderDeletedEventRequest',
'Connections.Response': 'ask_sdk_model.interfaces.connections.connections_response.ConnectionsResponse',
'Messaging.MessageReceived': 'ask_sdk_model.interfaces.messaging.message_received_request.MessageReceivedRequest',
'Connections.Request': 'ask_sdk_model.interfaces.connections.connections_request.ConnectionsRequest',
'System.ExceptionEncountered': 'ask_sdk_model.interfaces.system.exception_encountered_request.ExceptionEncounteredRequest',
'AlexaSkillEvent.SkillPermissionAccepted': 'ask_sdk_model.events.skillevents.permission_accepted_request.PermissionAcceptedRequest',
'AlexaHouseholdListEvent.ListDeleted': 'ask_sdk_model.services.list_management.list_deleted_event_request.ListDeletedEventRequest',
'GameEngine.InputHandlerEvent': 'ask_sdk_model.interfaces.game_engine.input_handler_event_request.InputHandlerEventRequest',
'PlaybackController.NextCommandIssued': 'ask_sdk_model.interfaces.playbackcontroller.next_command_issued_request.NextCommandIssuedRequest',
'PlaybackController.PauseCommandIssued': 'ask_sdk_model.interfaces.playbackcontroller.pause_command_issued_request.PauseCommandIssuedRequest',
'PlaybackController.PlayCommandIssued': 'ask_sdk_model.interfaces.playbackcontroller.play_command_issued_request.PlayCommandIssuedRequest'
}
json_discriminator_key = "type"
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, object_type=None, request_id=None, timestamp=None, locale=None):
# type: (Optional[str], Optional[str], Optional[datetime], Optional[str]) -> None
"""A request object that provides the details of the user’s request. The request body contains the parameters necessary for the service to perform its logic and generate a response.
:param object_type: Describes the type of the request.
:type object_type: (optional) str
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
"""
self.__discriminator_value = None
self.object_type = object_type
self.request_id = request_id
self.timestamp = timestamp
self.locale = locale
@classmethod
def get_real_child_model(cls, data):
# type: (Dict[str, str]) -> str
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[cls.json_discriminator_key]
return cls.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 61.275862
| 189
| 0.748015
| 1,637
| 15,993
| 7.04215
| 0.199145
| 0.038515
| 0.070611
| 0.041725
| 0.810461
| 0.808033
| 0.804997
| 0.801353
| 0.762578
| 0.75772
| 0
| 0.001946
| 0.164572
| 15,993
| 260
| 190
| 61.511538
| 0.860864
| 0.500094
| 0
| 0.018349
| 0
| 0
| 0.588501
| 0.558424
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06422
| false
| 0
| 0.073395
| 0
| 0.256881
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc2628c9f86b1111de48ec4fb420d47ebc4737ab
| 20,110
|
py
|
Python
|
bigtempo/tests/core_tests.py
|
rhlobo/bigtempo3
|
848eda5f07f7e61f7659bac335726c567b41083e
|
[
"MIT"
] | null | null | null |
bigtempo/tests/core_tests.py
|
rhlobo/bigtempo3
|
848eda5f07f7e61f7659bac335726c567b41083e
|
[
"MIT"
] | null | null | null |
bigtempo/tests/core_tests.py
|
rhlobo/bigtempo3
|
848eda5f07f7e61f7659bac335726c567b41083e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from bigtempo.tagselection import TagSelector
import unittest
from mockito import mock, when, any as anyx, verify
import bigtempo.utils as utils
import bigtempo.core as core
class TestDatasourceEngine_for_datasources_without_dependencies(unittest.TestCase):
def setUp(self):
def builder(cls):
return self.builder_mock.build(cls)
self.builder_mock = mock()
def processing_task_factory(instance, deps, lookback):
return self.processing_task_factory_mock.create(instance)
self.processing_task_factory_mock = mock()
self.engine = core.DatasourceEngine(builder, processing_task_factory)
class _Task(object):
def __init__(self, instance):
self.instance = instance
self.instances = []
self.classes = []
for i in range(3):
@self.engine.datasource('REGISTERED_KEY_%i' % i)
class _SampleDatasource(object):
pass
instance = _SampleDatasource()
self.classes.append(_SampleDatasource)
self.instances.append(instance)
when(self.builder_mock).build(_SampleDatasource).thenReturn(instance)
when(self.processing_task_factory_mock).create(instance).thenReturn(_Task(instance))
def test_get_should_raise_error_when_reference_was_not_registered(self):
self.assertRaises(KeyError, self.engine.get, 'NOT_REGISTERED_KEY')
def test_get_should_not_raise_error_when_reference_was_registered(self):
self.engine.get('REGISTERED_KEY_1')
def test_get_should_not_use_builder_when_reference_was_not_registered(self):
self.assertRaises(KeyError, self.engine.get, 'NOT_REGISTERED_KEY_1')
verify(self.builder_mock, times=0).build(anyx())
def test_get_should_use_builder_when_reference_was_registered(self):
self.engine.get('REGISTERED_KEY_1')
verify(self.builder_mock, times=1).build(anyx())
def test_get_should_only_use_builder_once_for_a_registered_reference(self):
for i in range(5):
self.engine.get('REGISTERED_KEY_1')
verify(self.builder_mock, times=1).build(anyx())
def test_get_should_only_use_builder_once_for_each_registered_reference(self):
for i in range(2):
self.engine.get('REGISTERED_KEY_1')
for i in range(2):
self.engine.get('REGISTERED_KEY_2')
self.engine.get('REGISTERED_KEY_1')
verify(self.builder_mock, times=2).build(anyx())
def test_get_should_use_processing_task_factory_in_each_call_for_registered_references(self):
repetition = 3
for i in range(repetition):
self.engine.get('REGISTERED_KEY_1')
self.engine.get('REGISTERED_KEY_2')
verify(self.processing_task_factory_mock, times=repetition).create(self.instances[1])
verify(self.processing_task_factory_mock, times=repetition).create(self.instances[2])
class TestDatasourceEngine_for_datasources_with_dependencies(unittest.TestCase):
def setUp(self):
def builder(cls):
return self.builder_mock.build(cls)
self.builder_mock = mock()
def processing_task_factory(instance, deps, lookback):
return self.processing_task_factory_mock.create(instance)
self.processing_task_factory_mock = mock()
self.engine = core.DatasourceEngine(builder, processing_task_factory)
class _Task(object):
def __init__(self, instance):
self.instance = instance
self.classes = []
self.instances = []
registered_keys = []
for i in range(3):
@self.engine.datasource('REGISTERED_KEY_%i' % i, dependencies=list(registered_keys))
class _SampleDatasource(object):
pass
instance = _SampleDatasource()
self.classes.append(_SampleDatasource)
self.instances.append(instance)
registered_keys.append('REGISTERED_KEY_%i' % i)
when(self.builder_mock).build(_SampleDatasource).thenReturn(instance)
when(self.processing_task_factory_mock).create(instance).thenReturn(_Task(instance))
def test_get_should_use_builder_for_required_reference_and_for_its_dependency(self):
self.engine.get('REGISTERED_KEY_1')
verify(self.builder_mock, times=1).build(self.classes[1])
verify(self.builder_mock, times=1).build(self.classes[0])
def test_get_should_use_builder_for_required_reference_and_for_each_dependency(self):
self.engine.get('REGISTERED_KEY_2')
verify(self.builder_mock, times=1).build(self.classes[2])
verify(self.builder_mock, times=1).build(self.classes[1])
verify(self.builder_mock, times=1).build(self.classes[0])
def test_get_should_only_use_builder_once_for_each_reference_including_dependencies(self):
for i in range(5):
self.engine.get('REGISTERED_KEY_1')
verify(self.builder_mock, times=1).build(self.classes[1])
verify(self.builder_mock, times=1).build(self.classes[0])
def test_get_should_use_processing_task_factory_in_each_call_for_registered_references_including_dependencies(self):
self.engine.get('REGISTERED_KEY_1')
self.engine.get('REGISTERED_KEY_2')
verify(self.processing_task_factory_mock, times=3).create(self.instances[0])
verify(self.processing_task_factory_mock, times=2).create(self.instances[1])
verify(self.processing_task_factory_mock, times=1).create(self.instances[2])
class TestDatasourceEngine_tag_related_behaviours_not_considering_tag_inference(unittest.TestCase):
def setUp(self):
self.TagSelector = core.tagselection.TagSelector
self.tagSelectorMock = mock(core.tagselection.TagSelector)
core.tagselection.TagSelector = utils.CallableMock(self.tagSelectorMock)
when(self.tagSelectorMock).__call__(anyx()).thenReturn(self.tagSelectorMock)
when(self.tagSelectorMock).register(...).thenReturn(None)
self.TagManager = core.tagselection.TagManager
self.tagManagerMock = mock(core.tagselection.TagManager)
core.tagselection.TagManager = utils.CallableMock(self.tagManagerMock)
when(self.tagManagerMock).__call__(anyx()).thenReturn(self.tagManagerMock)
when(self.tagManagerMock).infere_tags(anyx()).thenReturn(set())
when(self.tagManagerMock).evaluate_new_candidate(...).thenReturn(None)
self.engine = core.DatasourceEngine()
def tearDown(self):
core.tagselection.TagManager = self.TagManager
core.tagselection.TagSelector = self.TagSelector
def test_register_datasource_should_instantiate_tag_selector_on_initialization(self):
verify(self.tagSelectorMock, times=1).__call__(anyx())
def test_register_datasource_should_trigger_tag_registration_on_tag_selector_passing_empty_set_when_no_tags_where_given(self):
reference = 'REFERENCE'
@self.engine.datasource(reference)
class DatasourceWithTags(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, set())
def test_register_datasource_should_trigger_tag_registration_on_tag_selector_passing_given_list_as_set(self):
reference = 'REFERENCE'
expected_tags = ['tag1', 'tag2']
@self.engine.datasource(reference, tags=expected_tags)
class DatasourceWithTags(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, set(expected_tags))
def test_register_datasource_should_trigger_tag_registration_on_tag_selector_passing_given_set(self):
reference = 'REFERENCE'
expected_tags = set(['tag1', 'tag2'])
@self.engine.datasource(reference, tags=expected_tags)
class DatasourceWithTags(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, expected_tags)
class TestDatasourceEngine_delegators(unittest.TestCase):
def setUp(self):
self.TagSelector = core.tagselection.TagSelector
self.tagSelectorMock = mock(core.tagselection.TagSelector)
core.tagselection.TagSelector = utils.CallableMock(self.tagSelectorMock)
when(self.tagSelectorMock).__call__(anyx()).thenReturn(self.tagSelectorMock)
when(self.tagSelectorMock).register(...).thenReturn(None)
self.TagManager = core.tagselection.TagManager
self.tagManagerMock = mock(core.tagselection.TagManager)
core.tagselection.TagManager = utils.CallableMock(self.tagManagerMock)
when(self.tagManagerMock).__call__(anyx()).thenReturn(self.tagManagerMock)
when(self.tagManagerMock).infere_tags(anyx()).thenReturn(set())
when(self.tagManagerMock).register(...).thenReturn(None)
when(self.tagManagerMock).register_synched(...).thenReturn(None)
self.engine = core.DatasourceEngine()
def tearDown(self):
core.tagselection.TagManager = self.TagManager
core.tagselection.TagSelector = self.TagSelector
def test_select_should_delegate_to_tag_selector(self):
args = ['a', 'b', 'c']
expected = object()
when(self.tagSelectorMock).get(*args).thenReturn(expected)
result = self.engine.select(*args)
verify(self.tagSelectorMock, times=1).get(*args)
assert expected is result
def test_tags_should_delegate_to_tag_selector(self):
args = ['a', 'b', 'c']
expected = object()
when(self.tagSelectorMock).tags(*args).thenReturn(expected)
result = self.engine.tags(*args)
verify(self.tagSelectorMock, times=1).tags(*args)
assert expected is result
def test_for_each_should_delegate_to_tagManager_register_method(self):
selection = object()
def function():
pass
self.engine.for_each(selection)(function)
verify(self.tagManagerMock, times=1).register(function, selection)
def test_for_synched_should_delegate_to_tagManager_register_synched_method(self):
selection = object()
def function():
pass
self.engine.for_synched(selection)(function)
verify(self.tagManagerMock, times=1).register_synched(function, anyx())
class TestDatasourceEngine_tag_related_behaviours_considering_tag_inference(unittest.TestCase):
def setUp(self):
self.TagSelector = core.tagselection.TagSelector
self.tagSelectorMock = mock(core.tagselection.TagSelector)
when(self.tagSelectorMock).__call__(anyx()).thenReturn(self.tagSelectorMock)
when(self.tagSelectorMock).register(...).thenReturn(None)
core.tagselection.TagSelector = utils.CallableMock(self.tagSelectorMock)
self.engine = core.DatasourceEngine()
def tearDown(self):
core.tagselection.TagSelector = self.TagSelector
def test_register_datasource_should_trigger_tag_registration_with_reference_itself_as_a_tag(self):
reference = 'REFERENCE'
@self.engine.datasource(reference)
class DatasourceWithTags(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, set([reference]))
def test_register_datasource_should_trigger_tag_registration_with_reference_itself_as_a_tag_plus_declared_tags(self):
reference = 'REFERENCE'
declared_tags = ['tag1', 'tag2']
expected_tags = ['tag1', 'tag2', 'REFERENCE']
@self.engine.datasource(reference, tags=declared_tags)
class DatasourceWithTags(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, set(expected_tags))
def test_register_datasource_should_trigger_tag_registration_with_reference_itself_as_a_tag_plus_declared_tags_using_set(self):
reference = 'REFERENCE'
declared_tags = set(['tag1', 'tag2'])
expected_tags = set(['tag1', 'tag2', 'REFERENCE'])
@self.engine.datasource(reference, tags=declared_tags)
class DatasourceWithTags(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, expected_tags)
def test_register_datasource_should_trigger_tag_registration_with_dependency_as_tag_when_datasources_has_one_dependency(self):
reference = 'REFERENCE'
@self.engine.datasource('REFERENCE_DEPENDENCY_A')
class DatasourceDependencyA(object):
pass
@self.engine.datasource(reference,
dependencies=['REFERENCE_DEPENDENCY_A'],
tags=['tag1', 'tag2'])
class Datasource(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, set(['tag1', 'tag2', 'REFERENCE', '{REFERENCE_DEPENDENCY_A}']))
def test_register_datasource_should_trigger_tag_registration_with_dependencies_as_tags_when_datasources_has_multiple_dependencies(self):
reference = 'REFERENCE'
expected_tags = set(['tag1', 'tag2', 'REFERENCE',
'{REFERENCE_DEPENDENCY_A}',
'{REFERENCE_DEPENDENCY_B}',
'{REFERENCE_DEPENDENCY_C}'])
@self.engine.datasource('REFERENCE_DEPENDENCY_A')
class DatasourceDependencyA(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_B')
class DatasourceDependencyB(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_C')
class DatasourceDependencyC(object):
pass
@self.engine.datasource(reference,
dependencies=['REFERENCE_DEPENDENCY_A', 'REFERENCE_DEPENDENCY_B', 'REFERENCE_DEPENDENCY_C'],
tags=['tag1', 'tag2'])
class Datasource(object):
pass
verify(self.tagSelectorMock, times=1).register(reference, expected_tags)
def test_register_datasource_should_trigger_tag_registration_with_dependencies_and_subdependencies_as_tags(self):
reference = 'REFERENCE'
expected_tags = set(['tag1', 'tag2', 'REFERENCE',
'{REFERENCE_DEPENDENCY_A}',
'{REFERENCE_DEPENDENCY_B}',
'{REFERENCE_DEPENDENCY_C}'])
@self.engine.datasource('REFERENCE_DEPENDENCY_A')
class DatasourceDependencyA(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_B',
dependencies=['REFERENCE_DEPENDENCY_A'])
class DatasourceDependencyB(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_C',
dependencies=['REFERENCE_DEPENDENCY_B'])
class DatasourceDependencyC(object):
pass
@self.engine.datasource(reference,
dependencies=['REFERENCE_DEPENDENCY_C'],
tags=['tag1', 'tag2'])
class Datasource(object):
pass
print(self.engine._registrations[reference]['tags'])
verify(self.tagSelectorMock, times=1).register(reference, expected_tags)
def test_register_datasource_should_trigger_tag_registration_with_dependencies_and_its_tags_as_tags_when_datasources_has_dependencies_with_tags(self):
reference = 'REFERENCE'
expected_tags = set(['tag1', 'tag2', 'REFERENCE',
'{tag1A}', '{tag2A}', '{REFERENCE_DEPENDENCY_A}'])
@self.engine.datasource('REFERENCE_DEPENDENCY_A',
tags=['tag1A', 'tag2A'])
class DatasourceDependencyA(object):
pass
@self.engine.datasource(reference,
dependencies=['REFERENCE_DEPENDENCY_A'],
tags=['tag1', 'tag2'])
class Datasource(object):
pass
print(self.engine._registrations[reference]['tags'])
verify(self.tagSelectorMock, times=1).register(reference, expected_tags)
def test_register_datasource_should_trigger_tag_registration_with_multiple_dependencies_and_its_tags_as_tags_when(self):
reference = 'REFERENCE'
expected_tags = set(['tag1', 'tag2', 'REFERENCE',
'{tag1A}', '{tag2A}', '{REFERENCE_DEPENDENCY_A}',
'{tag1B}', '{tag2B}', '{REFERENCE_DEPENDENCY_B}',
'{tag1C}', '{tag2C}', '{REFERENCE_DEPENDENCY_C}'])
@self.engine.datasource('REFERENCE_DEPENDENCY_A',
tags=['tag1A', 'tag2A'])
class DatasourceDependencyA(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_B',
tags=['tag1B', 'tag2B'])
class DatasourceDependencyB(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_C',
tags=['tag1C', 'tag2C'])
class DatasourceDependencyC(object):
pass
@self.engine.datasource(reference,
dependencies=['REFERENCE_DEPENDENCY_A', 'REFERENCE_DEPENDENCY_B', 'REFERENCE_DEPENDENCY_C'],
tags=['tag1', 'tag2'])
class Datasource(object):
pass
print(self.engine._registrations[reference]['tags'])
verify(self.tagSelectorMock, times=1).register(reference, expected_tags)
def test_register_datasource_should_trigger_tag_registration_with_multiple_nested_dependencies_and_its_tags_as_tags(self):
reference = 'REFERENCE'
expected_tags = set(['tag1', 'tag2', 'REFERENCE',
'{tag1A}', '{tag2A}', '{REFERENCE_DEPENDENCY_A}',
'{tag1B}', '{tag2B}', '{REFERENCE_DEPENDENCY_B}',
'{tag1C}', '{tag2C}', '{REFERENCE_DEPENDENCY_C}'])
@self.engine.datasource('REFERENCE_DEPENDENCY_A',
tags=['tag1A', 'tag2A'])
class DatasourceDependencyA(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_B',
dependencies=['REFERENCE_DEPENDENCY_A'],
tags=['tag1B', 'tag2B'])
class DatasourceDependencyB(object):
pass
@self.engine.datasource('REFERENCE_DEPENDENCY_C',
dependencies=['REFERENCE_DEPENDENCY_B'],
tags=['tag1C', 'tag2C'])
class DatasourceDependencyC(object):
pass
@self.engine.datasource(reference,
dependencies=['REFERENCE_DEPENDENCY_C'],
tags=['tag1', 'tag2'])
class Datasource(object):
pass
print(self.engine._registrations[reference]['tags'])
verify(self.tagSelectorMock, times=1).register(reference, expected_tags)
class TestDatasourceEngine_tag_inference_and_declaration(unittest.TestCase):
def setUp(self):
self.TagManager = core.tagselection.TagManager
self.tagManagerMock = mock(core.tagselection.TagManager)
when(self.tagManagerMock).__call__(anyx(dict)).thenReturn(self.tagManagerMock)
core.tagselection.TagManager = utils.CallableMock(self.tagManagerMock)
self.engine = core.DatasourceEngine()
def tearDown(self):
core.tagselection.TagManager = self.TagManager
def test_register_datasource_should_register_tags_based_on_declared_and_infered(self):
reference = 'REFERENCE'
infered_tags = set(['infered1', 'infered2'])
declared_tags = set(['declared1', 'declared2'])
when(self.tagManagerMock).infere_tags(reference).thenReturn(infered_tags)
when(self.tagManagerMock).evaluate_new_candidate(...).thenReturn(None)
@self.engine.datasource(reference,
tags=declared_tags)
class Datasource(object):
pass
assert self.engine._registrations[reference]['tags'] == (infered_tags | declared_tags)
| 41.293634
| 154
| 0.666683
| 2,062
| 20,110
| 6.190592
| 0.078565
| 0.04622
| 0.045437
| 0.06134
| 0.911007
| 0.874422
| 0.848649
| 0.813631
| 0.79483
| 0.791304
| 0
| 0.0086
| 0.23093
| 20,110
| 486
| 155
| 41.378601
| 0.816759
| 0.001044
| 0
| 0.749304
| 0
| 0
| 0.090108
| 0.047394
| 0
| 0
| 0
| 0
| 0.013928
| 1
| 0.130919
| false
| 0.094708
| 0.013928
| 0.011142
| 0.259053
| 0.011142
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
fc2b958babef0531e288e929e94acd3ccef4a15c
| 79,418
|
py
|
Python
|
venv/lib/python3.8/site-packages/ansible_collections/community/dns/tests/unit/plugins/modules/test_hosttech_dns_record_set.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/community/dns/tests/unit/plugins/modules/test_hosttech_dns_record_set.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/community/dns/tests/unit/plugins/modules/test_hosttech_dns_record_set.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# (c) 2021 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.community.internal_test_tools.tests.unit.utils.fetch_url_module_framework import (
BaseTestModule,
FetchUrlCall,
)
from ansible_collections.community.dns.plugins.modules import hosttech_dns_record_set
# These imports are needed so patching below works
import ansible_collections.community.dns.plugins.module_utils.http # noqa
from .hosttech import (
expect_wsdl_authentication,
expect_wsdl_value,
validate_wsdl_call,
validate_wsdl_add_request,
validate_wsdl_update_request,
validate_wsdl_del_request,
create_wsdl_add_result,
create_wsdl_update_result,
create_wsdl_del_result,
HOSTTECH_WSDL_DEFAULT_ENTRIES,
HOSTTECH_WSDL_DEFAULT_ZONE_RESULT,
HOSTTECH_WSDL_ZONE_NOT_FOUND,
HOSTTECH_JSON_DEFAULT_ENTRIES,
HOSTTECH_JSON_ZONE_GET_RESULT,
HOSTTECH_JSON_ZONE_LIST_RESULT,
HOSTTECH_JSON_ZONE_RECORDS_GET_RESULT,
)
try:
import lxml.etree
HAS_LXML_ETREE = True
except ImportError:
HAS_LXML_ETREE = False
@pytest.mark.skipif(not HAS_LXML_ETREE, reason="Need lxml.etree for WSDL tests")
class TestHosttechDNSRecordWSDL(BaseTestModule):
MOCK_ANSIBLE_MODULEUTILS_BASIC_ANSIBLEMODULE = 'ansible_collections.community.dns.plugins.modules.hosttech_dns_record_set.AnsibleModule'
MOCK_ANSIBLE_MODULEUTILS_URLS_FETCH_URL = 'ansible_collections.community.dns.plugins.module_utils.http.fetch_url'
def test_unknown_zone(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_name': 'example.org',
'record': 'example.org',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.org',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_ZONE_NOT_FOUND),
])
assert result['msg'] == 'Zone not found'
def test_unknown_zone_id(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_id': 23,
'record': 'example.org',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'23',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_ZONE_NOT_FOUND),
])
assert result['msg'] == 'Zone not found'
def test_unknown_zone_id_prefix(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_id': 23,
'prefix': '',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'23',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_ZONE_NOT_FOUND),
])
assert result['msg'] == 'Zone not found'
def test_idempotency_present(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_idempotency_absent_value(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'absent',
'zone_name': 'example.com',
'record': '*.example.com',
'type': 'A',
'ttl': 3600,
'value': [
'1.2.3.6',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_idempotency_absent_ttl(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'absent',
'zone_name': 'example.com',
'record': '*.example.com',
'type': 'A',
'ttl': 1800,
'value': [
'1.2.3.5',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_idempotency_absent_type(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'absent',
'zone_id': 42,
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'42',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_idempotency_absent_record(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'absent',
'zone_name': 'example.com.',
'record': 'somewhere.example.com.',
'type': 'A',
'ttl': 3600,
'value': [
'1.2.3.6',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_absent(self, mocker):
record = HOSTTECH_WSDL_DEFAULT_ENTRIES[0]
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'absent',
'zone_name': 'example.com',
'record': record[3] + 'example.com',
'type': record[2],
'ttl': record[5],
'value': [
record[4],
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
validate_wsdl_del_request(record),
]))
.result_str(create_wsdl_del_result(True)),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_add_one_check_mode(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
'_ansible_check_mode': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_add_one(self, mocker):
new_entry = (131, 42, 'CAA', 'foo', '0 issue "letsencrypt.org"', 3600, None, None)
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_name': 'example.com',
'record': 'foo.example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
validate_wsdl_add_request('42', new_entry),
]))
.result_str(create_wsdl_add_result(new_entry)),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_modify_list_fail(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'ns1.hostserv.eu',
'ns4.hostserv.eu',
],
'on_existing': 'keep_and_fail',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
])
assert result['msg'] == "Record already exists with different value. Set on_existing=replace to replace it"
def test_change_modify_list(self, mocker):
del_entry = (130, 42, 'NS', '', 'ns3.hostserv.eu', 10800, None, None)
update_entry = (131, 42, 'NS', '', 'ns4.hostserv.eu', 10800, None, None)
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_username': 'foo',
'hosttech_password': 'bar',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'ns1.hostserv.eu',
'ns4.hostserv.eu',
],
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
expect_wsdl_value(
[lxml.etree.QName('https://ns1.hosttech.eu/public/api', 'getZone').text, 'sZoneName'],
'example.com',
('http://www.w3.org/2001/XMLSchema', 'string')
),
]))
.result_str(HOSTTECH_WSDL_DEFAULT_ZONE_RESULT),
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
validate_wsdl_del_request(del_entry),
]))
.result_str(create_wsdl_del_result(True)),
FetchUrlCall('POST', 200)
.expect_content_predicate(validate_wsdl_call([
expect_wsdl_authentication('foo', 'bar'),
validate_wsdl_update_request(update_entry),
]))
.result_str(create_wsdl_update_result(update_entry)),
])
assert result['changed'] is True
assert result['zone_id'] == 42
assert 'diff' in result
assert 'before' in result['diff']
assert 'after' in result['diff']
assert result['diff']['before'] == {
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': 10800,
'value': ['ns1.hostserv.eu', 'ns2.hostserv.eu', 'ns3.hostserv.eu'],
}
assert result['diff']['after'] == {
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': 10800,
'value': ['ns1.hostserv.eu', 'ns4.hostserv.eu'],
}
class TestHosttechDNSRecordJSON(BaseTestModule):
MOCK_ANSIBLE_MODULEUTILS_BASIC_ANSIBLEMODULE = 'ansible_collections.community.dns.plugins.modules.hosttech_dns_record_set.AnsibleModule'
MOCK_ANSIBLE_MODULEUTILS_URLS_FETCH_URL = 'ansible_collections.community.dns.plugins.module_utils.http.fetch_url'
def test_unknown_zone(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.org',
'record': 'example.org',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.org')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
])
assert result['msg'] == 'Zone not found'
def test_unknown_zone_id(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_id': 23,
'record': 'example.org',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 404)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/23')
.return_header('Content-Type', 'application/json')
.result_json(dict(message="")),
])
assert result['msg'] == 'Zone not found'
def test_unknown_zone_id_prefix(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_id': 23,
'prefix': '',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 404)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/23/records', without_query=True)
.expect_query_values('type', 'MX')
.return_header('Content-Type', 'application/json')
.result_json(dict(message="")),
])
assert result['msg'] == 'Zone not found'
def test_auth_error(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.org',
'record': 'example.org',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 401)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.org')
.result_str(''),
])
assert result['msg'] == 'Cannot authenticate: Unauthorized: the authentication parameters are incorrect (HTTP status 401)'
def test_auth_error_forbidden(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_id': 23,
'record': 'example.org',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 403)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/23')
.result_json(dict(message="")),
])
assert result['msg'] == 'Cannot authenticate: Forbidden: you do not have access to this resource (HTTP status 403)'
def test_other_error(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.org',
'record': 'example.org',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 500)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.org')
.result_str(''),
])
assert result['msg'].startswith('Error: GET https://api.ns1.hosttech.eu/api/user/v1/zones?')
assert 'did not yield JSON data, but HTTP status code 500 with Content-Type' in result['msg']
def test_idempotency_present(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
assert result['diff']['before'] == {
'record': 'example.com',
'prefix': '',
'type': 'MX',
'ttl': 3600,
'value': ['10 example.com'],
}
assert result['diff']['before'] == result['diff']['after']
def test_idempotency_absent_value(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com',
'record': '*.example.com',
'type': 'A',
'ttl': 3600,
'value': [
'1.2.3.6',
],
'on_existing': 'keep',
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
assert result['diff']['before'] == {
'record': '*.example.com',
'prefix': '*',
'type': 'A',
'ttl': 3600,
'value': ['1.2.3.5'],
}
assert result['diff']['before'] == result['diff']['after']
def test_idempotency_absent_value_prefix(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com',
'prefix': '*',
'type': 'A',
'ttl': 3600,
'value': [
'1.2.3.6',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_idempotency_absent_ttl(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com',
'record': '*.example.com',
'type': 'A',
'ttl': 1800,
'value': [
'1.2.3.5',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_idempotency_absent_type(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
def test_idempotency_absent_record(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com.',
'record': 'somewhere.example.com.',
'type': 'A',
'ttl': 3600,
'value': [
'1.2.3.6',
],
'on_existing': 'keep',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
assert 'warnings' not in result
def test_idempotency_absent_record_warn(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com.',
'record': 'somewhere.example.com.',
'type': 'A',
'ttl': 3600,
'value': [
'1.2.3.6',
],
'on_existing': 'keep_and_warn',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
assert list(result['warnings']) == ["Record already exists with different value. Set on_existing=replace to remove it"]
def test_idempotency_absent_record_fail(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com.',
'record': 'somewhere.example.com.',
'type': 'A',
'ttl': 3600,
'value': [
'1.2.3.6',
],
'on_existing': 'keep_and_fail',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['msg'] == "Record already exists with different value. Set on_existing=replace to remove it"
def test_absent(self, mocker):
record = HOSTTECH_JSON_DEFAULT_ENTRIES[0]
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com',
'record': record['name'] + 'example.com',
'type': record['type'],
'ttl': record['ttl'],
'value': [
record['ipv4'],
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('DELETE', 204)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/{0}'.format(record['id']))
.result_str(''),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_absent_bulk(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'value': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('DELETE', 204)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/130')
.result_str(''),
FetchUrlCall('DELETE', 204)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/131')
.result_str(''),
# Record 132 has been deleted between querying and we trying to delete it
FetchUrlCall('DELETE', 404)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/132')
.return_header('Content-Type', 'application/json')
.result_json({'message': 'record does not exist'}),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_absent_bulk_error(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'value': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('DELETE', 204)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/130')
.result_str(''),
FetchUrlCall('DELETE', 500)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/131')
.return_header('Content-Type', 'application/json')
.result_json({'message': 'Internal Server Error'}),
])
assert result['msg'] == (
'Error: Expected HTTP status 204, 404 for DELETE https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/131,'
' but got HTTP status 500 (Internal Server Error) with message "Internal Server Error"'
)
def test_absent_other_value(self, mocker):
record = HOSTTECH_JSON_DEFAULT_ENTRIES[0]
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'absent',
'zone_name': 'example.com',
'record': record['name'] + 'example.com',
'type': record['type'],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('DELETE', 204)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/{0}'.format(record['id']))
.result_str(''),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_add_one_check_mode(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_id': 42,
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
'_ansible_check_mode': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_add_one_check_mode_prefix(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_id': 42,
'prefix': '',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
'_ansible_diff': True,
'_ansible_check_mode': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records', without_query=True)
.expect_query_values('type', 'CAA')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_RECORDS_GET_RESULT),
])
assert result['changed'] is True
assert result['zone_id'] == 42
assert 'diff' in result
assert 'before' in result['diff']
assert 'after' in result['diff']
assert result['diff']['before'] == {}
assert result['diff']['after'] == {
'prefix': '',
'type': 'CAA',
'ttl': 3600,
'value': ['0 issue "letsencrypt.org"'],
}
def test_change_add_one(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org xxx"',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('POST', 201)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['comment'], '')
.expect_json_value(['name'], '')
.expect_json_value(['flag'], '128')
.expect_json_value(['tag'], 'issue')
.expect_json_value(['value'], 'letsencrypt.org xxx')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 133,
'type': 'CAA',
'name': '',
'flag': '128',
'tag': 'issue',
'value': 'letsencrypt.org xxx',
'ttl': 3600,
'comment': '',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_add_one_prefix(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'prefix': '',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org"',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('POST', 201)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['comment'], '')
.expect_json_value(['name'], '')
.expect_json_value(['flag'], '128')
.expect_json_value(['tag'], 'issue')
.expect_json_value(['value'], 'letsencrypt.org')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 133,
'type': 'CAA',
'name': '',
'flag': '128',
'tag': 'issue',
'value': 'letsencrypt.org',
'ttl': 3600,
'comment': '',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_add_one_idn_prefix(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'prefix': '☺',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org"',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('POST', 201)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['comment'], '')
.expect_json_value(['name'], 'xn--74h')
.expect_json_value(['flag'], '128')
.expect_json_value(['tag'], 'issue')
.expect_json_value(['value'], 'letsencrypt.org')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 133,
'type': 'CAA',
'name': 'xn--74h',
'flag': '128',
'tag': 'issue',
'value': 'letsencrypt.org',
'ttl': 3600,
'comment': '',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == 42
def test_change_add_one_fail(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'prefix': '☺',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org"',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('POST', 500)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['comment'], '')
.expect_json_value(['name'], 'xn--74h')
.expect_json_value(['flag'], '128')
.expect_json_value(['tag'], 'issue')
.expect_json_value(['value'], 'letsencrypt.org')
.return_header('Content-Type', 'application/json')
.result_json({'message': 'Internal Server Error'}),
])
assert result['msg'] == (
'Error: Expected HTTP status 201 for POST https://api.ns1.hosttech.eu/api/user/v1/zones/42/records,'
' but got HTTP status 500 (Internal Server Error) with message "Internal Server Error"'
)
def test_change_modify_list_fail(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'ns1.hostserv.eu',
'ns4.hostserv.eu',
],
'on_existing': 'keep_and_fail',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['msg'] == "Record already exists with different value. Set on_existing=replace to replace it"
def test_change_modify_list_warn(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'ns1.hostserv.eu',
'ns4.hostserv.eu',
],
'on_existing': 'keep_and_warn',
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == 42
assert 'diff' in result
assert 'before' in result['diff']
assert 'after' in result['diff']
assert result['diff']['before'] == {
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': 10800,
'value': ['ns1.hostserv.eu', 'ns2.hostserv.eu', 'ns3.hostserv.eu'],
}
assert result['diff']['after'] == result['diff']['before']
assert list(result['warnings']) == ["Record already exists with different value. Set on_existing=replace to replace it"]
def test_change_modify_list_keep(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'ns1.hostserv.eu',
'ns4.hostserv.eu',
],
'on_existing': 'keep',
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
])
assert 'warnings' not in result
assert result['changed'] is False
assert result['zone_id'] == 42
assert 'diff' in result
assert 'before' in result['diff']
assert 'after' in result['diff']
assert result['diff']['before'] == {
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': 10800,
'value': ['ns1.hostserv.eu', 'ns2.hostserv.eu', 'ns3.hostserv.eu'],
}
assert result['diff']['after'] == result['diff']['before']
def test_change_modify_list(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'ns1.hostserv.eu',
'ns4.hostserv.eu',
],
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('DELETE', 204)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/130')
.result_str(''),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/131')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'ns4.hostserv.eu')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 131,
'type': 'NS',
'ownername': '',
'targetname': 'ns4.hostserv.eu',
'ttl': 10800,
'comment': '',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == 42
assert 'diff' in result
assert 'before' in result['diff']
assert 'after' in result['diff']
assert result['diff']['before'] == {
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': 10800,
'value': ['ns1.hostserv.eu', 'ns2.hostserv.eu', 'ns3.hostserv.eu'],
}
assert result['diff']['after'] == {
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': 10800,
'value': ['ns1.hostserv.eu', 'ns4.hostserv.eu'],
}
def test_change_modify_bulk(self, mocker):
result = self.run_module_success(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'a1',
'a2',
'a3',
'a4',
'a5',
'a6',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/132')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a1')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 132,
'type': 'NS',
'ownername': '',
'targetname': 'a1',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/131')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a2')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 131,
'type': 'NS',
'ownername': '',
'targetname': 'a2',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/130')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a3')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 130,
'type': 'NS',
'ownername': '',
'targetname': 'a3',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('POST', 201)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'NS')
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a4')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 300,
'type': 'NS',
'ownername': '',
'targetname': 'a4',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('POST', 201)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'NS')
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a5')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 301,
'type': 'NS',
'ownername': '',
'targetname': 'a5',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('POST', 201)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'NS')
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a6')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 302,
'type': 'NS',
'ownername': '',
'targetname': 'a6',
'ttl': 10800,
'comment': '',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == 42
assert 'diff' not in result
def test_change_modify_bulk_errors_update(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'a1',
'a2',
'a3',
'a4',
'a5',
'a6',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('PUT', 500)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/132')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a1')
.return_header('Content-Type', 'application/json')
.result_json({'message': 'Internal Server Error'}),
])
assert result['msg'] == (
'Error: Expected HTTP status 200 for PUT https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/132,'
' but got HTTP status 500 (Internal Server Error) with message "Internal Server Error"'
)
def test_change_modify_bulk_errors_create(self, mocker):
result = self.run_module_failed(mocker, hosttech_dns_record_set, {
'hosttech_token': 'foo',
'state': 'present',
'zone_name': 'example.com',
'record': 'example.com',
'type': 'NS',
'ttl': 10800,
'value': [
'a1',
'a2',
'a3',
'a4',
'a5',
'a6',
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones', without_query=True)
.expect_query_values('query', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HOSTTECH_JSON_ZONE_GET_RESULT),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/132')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a1')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 132,
'type': 'NS',
'ownername': '',
'targetname': 'a1',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/131')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a2')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 131,
'type': 'NS',
'ownername': '',
'targetname': 'a2',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records/130')
.expect_json_value_absent(['id'])
.expect_json_value_absent(['type'])
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a3')
.return_header('Content-Type', 'application/json')
.result_json({
'data': {
'id': 130,
'type': 'NS',
'ownername': '',
'targetname': 'a3',
'ttl': 10800,
'comment': '',
},
}),
FetchUrlCall('POST', 500)
.expect_header('accept', 'application/json')
.expect_header('authorization', 'Bearer foo')
.expect_url('https://api.ns1.hosttech.eu/api/user/v1/zones/42/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'NS')
.expect_json_value(['ttl'], 10800)
.expect_json_value(['comment'], '')
.expect_json_value(['ownername'], '')
.expect_json_value(['targetname'], 'a4')
.return_header('Content-Type', 'application/json')
.result_json({'message': 'Internal Server Error'}),
])
assert result['msg'] == (
'Error: Expected HTTP status 201 for POST https://api.ns1.hosttech.eu/api/user/v1/zones/42/records,'
' but got HTTP status 500 (Internal Server Error) with message "Internal Server Error"'
)
| 41.820958
| 140
| 0.532323
| 7,863
| 79,418
| 5.110009
| 0.036246
| 0.04659
| 0.038825
| 0.039248
| 0.958935
| 0.950299
| 0.947063
| 0.942384
| 0.938776
| 0.935341
| 0
| 0.02629
| 0.319411
| 79,418
| 1,898
| 141
| 41.842993
| 0.717045
| 0.003526
| 0
| 0.923077
| 0
| 0.01728
| 0.296508
| 0.019512
| 0
| 0
| 0
| 0
| 0.059643
| 1
| 0.024526
| false
| 0.007246
| 0.004459
| 0
| 0.03233
| 0.000557
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5dc7cfeab4fb02a850c14aeb0eae1ccca696df5b
| 16,726
|
py
|
Python
|
tests/plot/test_imshow_grid.py
|
hjgray10/landlab
|
fb3238d46a7ce8897f148fe315a492e0f8028046
|
[
"MIT"
] | null | null | null |
tests/plot/test_imshow_grid.py
|
hjgray10/landlab
|
fb3238d46a7ce8897f148fe315a492e0f8028046
|
[
"MIT"
] | 1
|
2021-11-11T21:23:46.000Z
|
2021-11-11T21:23:46.000Z
|
tests/plot/test_imshow_grid.py
|
hjgray10/landlab
|
fb3238d46a7ce8897f148fe315a492e0f8028046
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.backends.backend_pdf import PdfPages
import landlab
@pytest.mark.slow
def test_imshow_grid():
rmg = landlab.RasterModelGrid((4, 5))
pp = PdfPages("test.pdf")
values = np.arange(rmg.number_of_nodes)
landlab.plot.imshow_grid(rmg, values, values_at="node", limits=(0, 20))
pp.savefig()
plt.clf()
rmg.status_at_node[7] = rmg.BC_NODE_IS_CLOSED
values = np.arange(rmg.number_of_cells)
landlab.plot.imshow_grid(rmg, values, values_at="cell", symmetric_cbar=True)
pp.savefig()
pp.close()
def test_imshow_grid_input():
rmg = landlab.RasterModelGrid((4, 5))
values = np.arange(rmg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshow_grid(rmg, values, values_at="node", limits=(0, 20))
def test_imshowhs_grid_input():
rmg = landlab.RasterModelGrid((4, 5))
values = np.arange(rmg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(rmg, values, values_at="node", limits=(0, 20))
def test_imshowhs_grid_input_Layer1():
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
values1 = np.arange(mg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=values1,
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
)
def test_imshowhs_grid_input_Layer2():
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
values1 = np.arange(mg.number_of_nodes)
values2 = np.arange(mg.number_of_nodes - 1)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=values1,
drape2=values2,
plot_type="Drape2",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
)
def test_imshowhs_grid_1():
"""
Show DEM draped over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
var_name="Topo",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
symmetric_cbar=True,
limits=(0, 10),
)
# %%
def test_imshowhs_grid_2():
"""
Show DEM draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
var_name="Topo",
var_units=r"m",
grid_units=("m", "m"),
vertical_exa=2,
ticks_km=True,
symmetric_cbar=True,
vmin=0,
vmax=10,
)
def test_imshowhs_grid_3():
"""
Show Hillshade
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
plot_type="Hillshade",
var_name="Topo",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
plt_contour=True,
vmax=10,
vmin=0,
)
def test_imshowhs_grid_4a():
"""
Show Drape1 draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
# Show Soil thickness draped over the shaded topographic relief
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["soil__depth"],
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
)
def test_imshowhs_grid_4b():
"""
Show Drape1 draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
# Show Soil thickness draped over the shaded topographic relief
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["soil__depth"],
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
vmin=0,
vmax=2,
plt_contour=True,
)
def test_imshowhs_grid_4c():
"""
Show Drape1 draped over the shaded topographic relief with exaggeration
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
# Show Soil thickness draped over the shaded topographic relief
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["soil__depth"],
plot_type="Drape1",
var_name="Soil",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
symmetric_cbar=True,
)
# %%
def test_imshowhs_grid_5():
"""
Show Drape1 draped over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
plot_type="Drape1",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
)
def test_imshowhs_grid_6a():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 200),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
)
def test_imshowhs_grid_6b():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <10
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
vmin=0,
vmax=9,
)
def test_imshowhs_grid_6c():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
vmin=0,
vmax=99,
)
def test_imshowhs_grid_6d():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
vmin=0,
vmax=999,
)
# %%
def test_imshowhs_grid_6e():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax <100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
drape2=mg.at_node["Layer_2"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
add_double_colorbar=True,
vmin=0,
vmax=99999,
)
# %%
def test_imshowhs_grid_7():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1="topographic__elevation",
drape2="soil__depth",
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
thres_drape2=1,
cmap2=None,
add_double_colorbar=True,
)
# %%
def test_imshowhs_grid_8():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax >10<100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
L1 = mg.add_zeros("Layer_1", at="node")
L2 = mg.add_zeros("Layer_2", at="node")
L1[:] += 10
L2[:] += 100
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1="topographic__elevation",
drape2="soil__depth",
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
thres_drape2=1,
cmap2=None,
add_double_colorbar=True,
vmin=0,
vmax=99,
)
# %%
def test_imshowhs_grid_9():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief, vmax>100
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1="topographic__elevation",
drape2="soil__depth",
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
color_for_closed="red",
thres_drape2=1,
cmap2=None,
add_double_colorbar=True,
vmin=0,
vmax=99999,
)
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
plot_type="Oops",
)
def test_imshowhs_grid_10():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
drape1=mg.at_node["Layer_1"],
plot_type="Drape2",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
)
def test_imshowhs_grid_11():
"""
Show Layer 1 and Layer 2 over the shaded topographic relief
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = mg.add_zeros("soil__depth", at="node")
_ = mg.add_zeros("Layer_1", at="node")
_ = mg.add_zeros("Layer_2", at="node")
with pytest.raises(ValueError):
_ = landlab.plot.imshowhs_grid(
mg,
"topographic__elevation",
plot_type="Drape1",
var_name="Layer 1",
var_units=r"m",
grid_units=("m", "m"),
cmap="terrain",
ticks_km=False,
limits=(0, 2),
colorbar_label_y=-55,
add_label_bbox=True,
thres_drape1=0.001,
)
def test_imshowhs_grid_12():
"""
Test imshowhs without units
"""
# %%
mg = landlab.RasterModelGrid((4, 5))
_ = mg.add_zeros("topographic__elevation", at="node")
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation")
def test_hex_mfd():
"""
Currently no support for hex
"""
# %%
mg = landlab.HexModelGrid((5, 3))
_ = mg.add_field("topographic__elevation", mg.node_x + mg.node_y, at="node")
with pytest.raises(NotImplementedError):
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation")
# %%
def test_at_cell():
"""
Currently no support for at cell
"""
# %%
mg = landlab.HexModelGrid((5, 3))
_ = mg.add_field("topographic__elevation", np.zeros((7,)), at="cell")
with pytest.raises(NotImplementedError):
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation", at="cell")
# %%
def test_at_other():
"""
Currently no support for non at node valley locations
"""
# %%
mg = landlab.HexModelGrid((5, 3))
_ = mg.add_field("topographic__elevation", np.zeros((24,)), at="corner")
with pytest.raises(TypeError):
_ = landlab.plot.imshowhs_grid(mg, "topographic__elevation", at="corner")
| 27.021002
| 85
| 0.574973
| 2,065
| 16,726
| 4.351574
| 0.072639
| 0.050746
| 0.061206
| 0.063988
| 0.912642
| 0.895393
| 0.892611
| 0.889272
| 0.867794
| 0.865346
| 0
| 0.035162
| 0.28076
| 16,726
| 618
| 86
| 27.064725
| 0.711804
| 0.089023
| 0
| 0.824295
| 0
| 0
| 0.150705
| 0.073875
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056399
| false
| 0
| 0.010846
| 0
| 0.067245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5de11632cd4fc501816db819807e1c9e6b8b2356
| 61,433
|
py
|
Python
|
mysite/patterns/44.py
|
BioinfoNet/prepub
|
e19c48cabf8bd22736dcef9308a5e196cfd8119a
|
[
"MIT"
] | 19
|
2016-06-17T23:36:27.000Z
|
2020-01-13T16:41:55.000Z
|
mysite/patterns/44.py
|
BioinfoNet/prepub
|
e19c48cabf8bd22736dcef9308a5e196cfd8119a
|
[
"MIT"
] | 13
|
2016-06-06T12:57:05.000Z
|
2019-02-05T02:21:00.000Z
|
patterns/44.py
|
OmnesRes/GRIMMER
|
173c99ebdb6a9edb1242d24a791d0c5d778ff643
|
[
"MIT"
] | 7
|
2017-03-28T18:12:22.000Z
|
2021-06-16T09:32:59.000Z
|
pattern_zero=[0.0, 0.022210743802, 0.043388429752, 0.045454545455, 0.063533057851, 0.067665289256, 0.082644628099, 0.088842975207, 0.090909090909, 0.100723140496, 0.108987603306, 0.113119834711, 0.117768595041, 0.128099173554, 0.133780991736, 0.134297520661, 0.136363636364, 0.14617768595, 0.148760330579, 0.15444214876, 0.158574380165, 0.16270661157, 0.163223140496, 0.173553719008, 0.175619834711, 0.17923553719, 0.179752066116, 0.181818181818, 0.1875, 0.191632231405, 0.194214876033, 0.198347107438, 0.199896694215, 0.20402892562, 0.208161157025, 0.20867768595, 0.21694214876, 0.219008264463, 0.221074380165, 0.224690082645, 0.22520661157, 0.227272727273, 0.231404958678, 0.232954545455, 0.23708677686, 0.239669421488, 0.24173553719, 0.243801652893, 0.245351239669, 0.247933884298, 0.249483471074, 0.25, 0.253615702479, 0.254132231405, 0.262396694215, 0.264462809917, 0.26652892562, 0.270144628099, 0.270661157025, 0.272727272727, 0.276859504132, 0.278409090909, 0.282541322314, 0.285123966942, 0.287190082645, 0.289256198347, 0.290805785124, 0.293388429752, 0.294938016529, 0.295454545455, 0.299070247934, 0.29958677686, 0.307851239669, 0.309917355372, 0.311983471074, 0.315599173554, 0.316115702479, 0.318181818182, 0.322314049587, 0.323863636364, 0.327995867769, 0.330578512397, 0.332644628099, 0.334710743802, 0.336260330579, 0.338842975207, 0.340392561983, 0.340909090909, 0.344524793388, 0.345041322314, 0.353305785124, 0.355371900826, 0.357438016529, 0.361053719008, 0.361570247934, 0.363636363636, 0.367768595041, 0.369318181818, 0.373450413223, 0.376033057851, 0.378099173554, 0.380165289256, 0.381714876033, 0.384297520661, 0.385847107438, 0.386363636364, 0.389979338843, 0.390495867769, 0.398760330579, 0.400826446281, 0.402892561983, 0.406508264463, 0.407024793388, 0.409090909091, 0.413223140496, 0.414772727273, 0.418904958678, 0.421487603306, 0.423553719008, 0.425619834711, 0.427169421488, 0.429752066116, 0.431301652893, 0.431818181818, 0.435433884298, 0.435950413223, 0.444214876033, 0.446280991736, 0.448347107438, 0.451962809917, 0.452479338843, 0.454545454545, 0.45867768595, 0.460227272727, 0.464359504132, 0.46694214876, 0.469008264463, 0.471074380165, 0.472623966942, 0.47520661157, 0.476756198347, 0.477272727273, 0.480888429752, 0.481404958678, 0.489669421488, 0.49173553719, 0.493801652893, 0.497417355372, 0.497933884298, 0.5, 0.504132231405, 0.505681818182, 0.509814049587, 0.512396694215, 0.514462809917, 0.51652892562, 0.518078512397, 0.520661157025, 0.522210743802, 0.522727272727, 0.526342975207, 0.526859504132, 0.535123966942, 0.537190082645, 0.539256198347, 0.542871900826, 0.543388429752, 0.545454545455, 0.54958677686, 0.551136363636, 0.555268595041, 0.557851239669, 0.559917355372, 0.561983471074, 0.563533057851, 0.566115702479, 0.567665289256, 0.568181818182, 0.571797520661, 0.572314049587, 0.580578512397, 0.582644628099, 0.584710743802, 0.588326446281, 0.588842975207, 0.590909090909, 0.595041322314, 0.596590909091, 0.600723140496, 0.603305785124, 0.605371900826, 0.607438016529, 0.608987603306, 0.611570247934, 0.613119834711, 0.613636363636, 0.617252066116, 0.617768595041, 0.626033057851, 0.628099173554, 0.630165289256, 0.633780991736, 0.634297520661, 0.636363636364, 0.640495867769, 0.642045454545, 0.64617768595, 0.648760330579, 0.650826446281, 0.652892561983, 0.65444214876, 0.657024793388, 0.658574380165, 0.659090909091, 0.66270661157, 0.663223140496, 0.671487603306, 0.673553719008, 0.675619834711, 0.67923553719, 0.679752066116, 0.681818181818, 0.685950413223, 0.6875, 0.691632231405, 0.694214876033, 0.696280991736, 0.698347107438, 0.699896694215, 0.702479338843, 0.70402892562, 0.704545454545, 0.708161157025, 0.70867768595, 0.71694214876, 0.719008264463, 0.721074380165, 0.724690082645, 0.72520661157, 0.727272727273, 0.731404958678, 0.732954545455, 0.73708677686, 0.739669421488, 0.74173553719, 0.743801652893, 0.745351239669, 0.747933884298, 0.749483471074, 0.75, 0.753615702479, 0.754132231405, 0.762396694215, 0.764462809917, 0.76652892562, 0.770144628099, 0.770661157025, 0.772727272727, 0.776859504132, 0.778409090909, 0.782541322314, 0.785123966942, 0.787190082645, 0.789256198347, 0.790805785124, 0.793388429752, 0.794938016529, 0.795454545455, 0.799070247934, 0.79958677686, 0.807851239669, 0.809917355372, 0.811983471074, 0.815599173554, 0.816115702479, 0.818181818182, 0.822314049587, 0.823863636364, 0.827995867769, 0.830578512397, 0.832644628099, 0.834710743802, 0.836260330579, 0.838842975207, 0.840392561983, 0.840909090909, 0.844524793388, 0.845041322314, 0.853305785124, 0.855371900826, 0.857438016529, 0.861053719008, 0.861570247934, 0.863636363636, 0.867768595041, 0.869318181818, 0.873450413223, 0.876033057851, 0.878099173554, 0.880165289256, 0.881714876033, 0.884297520661, 0.885847107438, 0.886363636364, 0.889979338843, 0.890495867769, 0.898760330579, 0.900826446281, 0.902892561983, 0.906508264463, 0.907024793388, 0.909090909091, 0.913223140496, 0.914772727273, 0.918904958678, 0.921487603306, 0.923553719008, 0.925619834711, 0.927169421488, 0.929752066116, 0.931301652893, 0.931818181818, 0.935433884298, 0.935950413223, 0.944214876033, 0.946280991736, 0.948347107438, 0.951962809917, 0.952479338843, 0.954545454545, 0.95867768595, 0.960227272727, 0.964359504132, 0.96694214876, 0.969008264463, 0.971074380165, 0.972623966942, 0.97520661157, 0.976756198347, 0.977272727273, 0.980888429752, 0.981404958678, 0.989669421488, 0.99173553719, 0.993801652893, 0.997417355372, 0.997933884298]
pattern_odd=[0.0, 0.00413223141, 0.00568181818, 0.00981404959, 0.01239669422, 0.01446280992, 0.01652892562, 0.0180785124, 0.02066115703, 0.0222107438, 0.02272727273, 0.02634297521, 0.02685950413, 0.03512396694, 0.03719008265, 0.03925619835, 0.04287190083, 0.04338842975, 0.04545454546, 0.04958677686, 0.05113636364, 0.05526859504, 0.05785123967, 0.05991735537, 0.06198347107, 0.06353305785, 0.06611570248, 0.06766528926, 0.06818181818, 0.07179752066, 0.07231404959, 0.0805785124, 0.0826446281, 0.0847107438, 0.08832644628, 0.08884297521, 0.09090909091, 0.09504132231, 0.09659090909, 0.1007231405, 0.10330578512, 0.10537190083, 0.10743801653, 0.10898760331, 0.11157024793, 0.11311983471, 0.11363636364, 0.11725206612, 0.11776859504, 0.12603305785, 0.12809917355, 0.13016528926, 0.13378099174, 0.13429752066, 0.13636363636, 0.14049586777, 0.14204545455, 0.14617768595, 0.14876033058, 0.15082644628, 0.15289256198, 0.15444214876, 0.15702479339, 0.15857438017, 0.15909090909, 0.16270661157, 0.1632231405, 0.17148760331, 0.17355371901, 0.17561983471, 0.17923553719, 0.17975206612, 0.18181818182, 0.18595041322, 0.1875, 0.19163223141, 0.19421487603, 0.19628099174, 0.19834710744, 0.19989669422, 0.20247933884, 0.20402892562, 0.20454545455, 0.20816115703, 0.20867768595, 0.21694214876, 0.21900826446, 0.22107438017, 0.22469008265, 0.22520661157, 0.22727272727, 0.23140495868, 0.23295454546, 0.23708677686, 0.23966942149, 0.24173553719, 0.24380165289, 0.24535123967, 0.2479338843, 0.24948347107, 0.25, 0.25361570248, 0.25413223141, 0.26239669422, 0.26446280992, 0.26652892562, 0.2701446281, 0.27066115703, 0.27272727273, 0.27685950413, 0.27840909091, 0.28254132231, 0.28512396694, 0.28719008265, 0.28925619835, 0.29080578512, 0.29338842975, 0.29493801653, 0.29545454546, 0.29907024793, 0.29958677686, 0.30785123967, 0.30991735537, 0.31198347107, 0.31559917355, 0.31611570248, 0.31818181818, 0.32231404959, 0.32386363636, 0.32799586777, 0.3305785124, 0.3326446281, 0.3347107438, 0.33626033058, 0.33884297521, 0.34039256198, 0.34090909091, 0.34452479339, 0.34504132231, 0.35330578512, 0.35537190083, 0.35743801653, 0.36105371901, 0.36157024793, 0.36363636364, 0.36776859504, 0.36931818182, 0.37345041322, 0.37603305785, 0.37809917355, 0.38016528926, 0.38171487603, 0.38429752066, 0.38584710744, 0.38636363636, 0.38997933884, 0.39049586777, 0.39876033058, 0.40082644628, 0.40289256198, 0.40650826446, 0.40702479339, 0.40909090909, 0.4132231405, 0.41477272727, 0.41890495868, 0.42148760331, 0.42355371901, 0.42561983471, 0.42716942149, 0.42975206612, 0.43130165289, 0.43181818182, 0.4354338843, 0.43595041322, 0.44421487603, 0.44628099174, 0.44834710744, 0.45196280992, 0.45247933884, 0.45454545455, 0.45867768595, 0.46022727273, 0.46435950413, 0.46694214876, 0.46900826446, 0.47107438017, 0.47262396694, 0.47520661157, 0.47675619835, 0.47727272727, 0.48088842975, 0.48140495868, 0.48966942149, 0.49173553719, 0.49380165289, 0.49741735537, 0.4979338843, 0.5, 0.50413223141, 0.50568181818, 0.50981404959, 0.51239669422, 0.51446280992, 0.51652892562, 0.5180785124, 0.52066115703, 0.5222107438, 0.52272727273, 0.52634297521, 0.52685950413, 0.53512396694, 0.53719008265, 0.53925619835, 0.54287190083, 0.54338842975, 0.54545454546, 0.54958677686, 0.55113636364, 0.55526859504, 0.55785123967, 0.55991735537, 0.56198347107, 0.56353305785, 0.56611570248, 0.56766528926, 0.56818181818, 0.57179752066, 0.57231404959, 0.5805785124, 0.5826446281, 0.5847107438, 0.58832644628, 0.58884297521, 0.59090909091, 0.59504132231, 0.59659090909, 0.6007231405, 0.60330578512, 0.60537190083, 0.60743801653, 0.60898760331, 0.61157024793, 0.61311983471, 0.61363636364, 0.61725206612, 0.61776859504, 0.62603305785, 0.62809917355, 0.63016528926, 0.63378099174, 0.63429752066, 0.63636363636, 0.64049586777, 0.64204545455, 0.64617768595, 0.64876033058, 0.65082644628, 0.65289256198, 0.65444214876, 0.65702479339, 0.65857438017, 0.65909090909, 0.66270661157, 0.6632231405, 0.67148760331, 0.67355371901, 0.67561983471, 0.67923553719, 0.67975206612, 0.68181818182, 0.68595041322, 0.6875, 0.69163223141, 0.69421487603, 0.69628099174, 0.69834710744, 0.69989669422, 0.70247933884, 0.70402892562, 0.70454545455, 0.70816115703, 0.70867768595, 0.71694214876, 0.71900826446, 0.72107438017, 0.72469008265, 0.72520661157, 0.72727272727, 0.73140495868, 0.73295454546, 0.73708677686, 0.73966942149, 0.74173553719, 0.74380165289, 0.74535123967, 0.7479338843, 0.74948347107, 0.75, 0.75361570248, 0.75413223141, 0.76239669422, 0.76446280992, 0.76652892562, 0.7701446281, 0.77066115703, 0.77272727273, 0.77685950413, 0.77840909091, 0.78254132231, 0.78512396694, 0.78719008265, 0.78925619835, 0.79080578512, 0.79338842975, 0.79493801653, 0.79545454546, 0.79907024793, 0.79958677686, 0.80785123967, 0.80991735537, 0.81198347107, 0.81559917355, 0.81611570248, 0.81818181818, 0.82231404959, 0.82386363636, 0.82799586777, 0.8305785124, 0.8326446281, 0.8347107438, 0.83626033058, 0.83884297521, 0.84039256198, 0.84090909091, 0.84452479339, 0.84504132231, 0.85330578512, 0.85537190083, 0.85743801653, 0.86105371901, 0.86157024793, 0.86363636364, 0.86776859504, 0.86931818182, 0.87345041322, 0.87603305785, 0.87809917355, 0.88016528926, 0.88171487603, 0.88429752066, 0.88584710744, 0.88636363636, 0.88997933884, 0.89049586777, 0.89876033058, 0.90082644628, 0.90289256198, 0.90650826446, 0.90702479339, 0.90909090909, 0.9132231405, 0.91477272727, 0.91890495868, 0.92148760331, 0.92355371901, 0.92561983471, 0.92716942149, 0.92975206612, 0.93130165289, 0.93181818182, 0.9354338843, 0.93595041322, 0.94421487603, 0.94628099174, 0.94834710744, 0.95196280992, 0.95247933884, 0.95454545455, 0.95867768595, 0.96022727273, 0.96435950413, 0.96694214876, 0.96900826446, 0.97107438017, 0.97262396694, 0.97520661157, 0.97675619835, 0.97727272727, 0.98088842975, 0.98140495868, 0.98966942149, 0.99173553719, 0.99380165289, 0.99741735537, 0.9979338843]
pattern_even=[0.0, 0.00413223141, 0.00568181818, 0.00981404959, 0.01239669422, 0.01446280992, 0.01652892562, 0.0180785124, 0.02066115703, 0.0222107438, 0.02272727273, 0.02634297521, 0.02685950413, 0.03512396694, 0.03719008265, 0.03925619835, 0.04287190083, 0.04338842975, 0.04545454546, 0.04958677686, 0.05113636364, 0.05526859504, 0.05785123967, 0.05991735537, 0.06198347107, 0.06353305785, 0.06611570248, 0.06766528926, 0.06818181818, 0.07179752066, 0.07231404959, 0.0805785124, 0.0826446281, 0.0847107438, 0.08832644628, 0.08884297521, 0.09090909091, 0.09504132231, 0.09659090909, 0.1007231405, 0.10330578512, 0.10537190083, 0.10743801653, 0.10898760331, 0.11157024793, 0.11311983471, 0.11363636364, 0.11725206612, 0.11776859504, 0.12603305785, 0.12809917355, 0.13016528926, 0.13378099174, 0.13429752066, 0.13636363636, 0.14049586777, 0.14204545455, 0.14617768595, 0.14876033058, 0.15082644628, 0.15289256198, 0.15444214876, 0.15702479339, 0.15857438017, 0.15909090909, 0.16270661157, 0.1632231405, 0.17148760331, 0.17355371901, 0.17561983471, 0.17923553719, 0.17975206612, 0.18181818182, 0.18595041322, 0.1875, 0.19163223141, 0.19421487603, 0.19628099174, 0.19834710744, 0.19989669422, 0.20247933884, 0.20402892562, 0.20454545455, 0.20816115703, 0.20867768595, 0.21694214876, 0.21900826446, 0.22107438017, 0.22469008265, 0.22520661157, 0.22727272727, 0.23140495868, 0.23295454546, 0.23708677686, 0.23966942149, 0.24173553719, 0.24380165289, 0.24535123967, 0.2479338843, 0.24948347107, 0.25, 0.25361570248, 0.25413223141, 0.26239669422, 0.26446280992, 0.26652892562, 0.2701446281, 0.27066115703, 0.27272727273, 0.27685950413, 0.27840909091, 0.28254132231, 0.28512396694, 0.28719008265, 0.28925619835, 0.29080578512, 0.29338842975, 0.29493801653, 0.29545454546, 0.29907024793, 0.29958677686, 0.30785123967, 0.30991735537, 0.31198347107, 0.31559917355, 0.31611570248, 0.31818181818, 0.32231404959, 0.32386363636, 0.32799586777, 0.3305785124, 0.3326446281, 0.3347107438, 0.33626033058, 0.33884297521, 0.34039256198, 0.34090909091, 0.34452479339, 0.34504132231, 0.35330578512, 0.35537190083, 0.35743801653, 0.36105371901, 0.36157024793, 0.36363636364, 0.36776859504, 0.36931818182, 0.37345041322, 0.37603305785, 0.37809917355, 0.38016528926, 0.38171487603, 0.38429752066, 0.38584710744, 0.38636363636, 0.38997933884, 0.39049586777, 0.39876033058, 0.40082644628, 0.40289256198, 0.40650826446, 0.40702479339, 0.40909090909, 0.4132231405, 0.41477272727, 0.41890495868, 0.42148760331, 0.42355371901, 0.42561983471, 0.42716942149, 0.42975206612, 0.43130165289, 0.43181818182, 0.4354338843, 0.43595041322, 0.44421487603, 0.44628099174, 0.44834710744, 0.45196280992, 0.45247933884, 0.45454545455, 0.45867768595, 0.46022727273, 0.46435950413, 0.46694214876, 0.46900826446, 0.47107438017, 0.47262396694, 0.47520661157, 0.47675619835, 0.47727272727, 0.48088842975, 0.48140495868, 0.48966942149, 0.49173553719, 0.49380165289, 0.49741735537, 0.4979338843, 0.5, 0.50413223141, 0.50568181818, 0.50981404959, 0.51239669422, 0.51446280992, 0.51652892562, 0.5180785124, 0.52066115703, 0.5222107438, 0.52272727273, 0.52634297521, 0.52685950413, 0.53512396694, 0.53719008265, 0.53925619835, 0.54287190083, 0.54338842975, 0.54545454546, 0.54958677686, 0.55113636364, 0.55526859504, 0.55785123967, 0.55991735537, 0.56198347107, 0.56353305785, 0.56611570248, 0.56766528926, 0.56818181818, 0.57179752066, 0.57231404959, 0.5805785124, 0.5826446281, 0.5847107438, 0.58832644628, 0.58884297521, 0.59090909091, 0.59504132231, 0.59659090909, 0.6007231405, 0.60330578512, 0.60537190083, 0.60743801653, 0.60898760331, 0.61157024793, 0.61311983471, 0.61363636364, 0.61725206612, 0.61776859504, 0.62603305785, 0.62809917355, 0.63016528926, 0.63378099174, 0.63429752066, 0.63636363636, 0.64049586777, 0.64204545455, 0.64617768595, 0.64876033058, 0.65082644628, 0.65289256198, 0.65444214876, 0.65702479339, 0.65857438017, 0.65909090909, 0.66270661157, 0.6632231405, 0.67148760331, 0.67355371901, 0.67561983471, 0.67923553719, 0.67975206612, 0.68181818182, 0.68595041322, 0.6875, 0.69163223141, 0.69421487603, 0.69628099174, 0.69834710744, 0.69989669422, 0.70247933884, 0.70402892562, 0.70454545455, 0.70816115703, 0.70867768595, 0.71694214876, 0.71900826446, 0.72107438017, 0.72469008265, 0.72520661157, 0.72727272727, 0.73140495868, 0.73295454546, 0.73708677686, 0.73966942149, 0.74173553719, 0.74380165289, 0.74535123967, 0.7479338843, 0.74948347107, 0.75, 0.75361570248, 0.75413223141, 0.76239669422, 0.76446280992, 0.76652892562, 0.7701446281, 0.77066115703, 0.77272727273, 0.77685950413, 0.77840909091, 0.78254132231, 0.78512396694, 0.78719008265, 0.78925619835, 0.79080578512, 0.79338842975, 0.79493801653, 0.79545454546, 0.79907024793, 0.79958677686, 0.80785123967, 0.80991735537, 0.81198347107, 0.81559917355, 0.81611570248, 0.81818181818, 0.82231404959, 0.82386363636, 0.82799586777, 0.8305785124, 0.8326446281, 0.8347107438, 0.83626033058, 0.83884297521, 0.84039256198, 0.84090909091, 0.84452479339, 0.84504132231, 0.85330578512, 0.85537190083, 0.85743801653, 0.86105371901, 0.86157024793, 0.86363636364, 0.86776859504, 0.86931818182, 0.87345041322, 0.87603305785, 0.87809917355, 0.88016528926, 0.88171487603, 0.88429752066, 0.88584710744, 0.88636363636, 0.88997933884, 0.89049586777, 0.89876033058, 0.90082644628, 0.90289256198, 0.90650826446, 0.90702479339, 0.90909090909, 0.9132231405, 0.91477272727, 0.91890495868, 0.92148760331, 0.92355371901, 0.92561983471, 0.92716942149, 0.92975206612, 0.93130165289, 0.93181818182, 0.9354338843, 0.93595041322, 0.94421487603, 0.94628099174, 0.94834710744, 0.95196280992, 0.95247933884, 0.95454545455, 0.95867768595, 0.96022727273, 0.96435950413, 0.96694214876, 0.96900826446, 0.97107438017, 0.97262396694, 0.97520661157, 0.97675619835, 0.97727272727, 0.98088842975, 0.98140495868, 0.98966942149, 0.99173553719, 0.99380165289, 0.99741735537, 0.9979338843]
averages_even={0.0: [0.0], 0.1875: [0.25, 0.75], 0.06353305785: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.5: [0.0], 0.29958677686: [0.8636363636364, 0.1363636363636], 0.92975206612: [0.4545454545455, 0.5454545454545], 0.82231404959: [0.3636363636364, 0.6363636363636], 0.6875: [0.75, 0.25], 0.06766528926: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.52272727273: [0.5], 0.79958677686: [0.8636363636364, 0.1363636363636], 0.10743801653: [0.7272727272727, 0.2727272727273], 0.55785123967: [0.1818181818182, 0.8181818181818], 0.79338842975: [0.4545454545455, 0.5454545454545], 0.10898760331: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.65857438017: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.5826446281: [0.0909090909091, 0.9090909090909], 0.36931818182: [0.75, 0.25], 0.29545454546: [0.5], 0.97727272727: [0.5], 0.37345041322: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.0805785124: [0.6818181818182, 0.3181818181818], 0.90909090909: [0.0], 0.74380165289: [0.7272727272727, 0.2727272727273], 0.25361570248: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.65909090909: [0.5], 0.79545454546: [0.5], 0.82799586777: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.10537190083: [0.5909090909091, 0.4090909090909], 0.02634297521: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.16270661157: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.64876033058: [0.1818181818182, 0.8181818181818], 0.99741735537: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.91890495868: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.75: [0.5], 0.51652892562: [0.7272727272727, 0.2727272727273], 0.27840909091: [0.25, 0.75], 0.30785123967: [0.6818181818182, 0.3181818181818], 0.59504132231: [0.3636363636364, 0.6363636363636], 0.28254132231: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.58884297521: [0.0454545454545, 0.9545454545455], 0.29493801653: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.90082644628: [0.0909090909091, 0.9090909090909], 0.88997933884: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.36157024793: [0.0454545454545, 0.9545454545455], 0.24535123967: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.44421487603: [0.6818181818182, 0.3181818181818], 0.92716942149: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.49380165289: [0.2272727272727, 0.7727272727273], 0.21694214876: [0.6818181818182, 0.3181818181818], 0.48088842975: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.09659090909: [0.25, 0.75], 0.02066115703: [0.4545454545455, 0.5454545454545], 0.68595041322: [0.3636363636364, 0.6363636363636], 0.50568181818: [0.75, 0.25], 0.78925619835: [0.7272727272727, 0.2727272727273], 0.38016528926: [0.7272727272727, 0.2727272727273], 0.89049586777: [0.8636363636364, 0.1363636363636], 0.42975206612: [0.4545454545455, 0.5454545454545], 0.40702479339: [0.0454545454545, 0.9545454545455], 0.86776859504: [0.3636363636364, 0.6363636363636], 0.57231404959: [0.8636363636364, 0.1363636363636], 0.70867768595: [0.8636363636364, 0.1363636363636], 0.5805785124: [0.6818181818182, 0.3181818181818], 0.09090909091: [0.0], 0.81818181818: [0.0], 0.12603305785: [0.6818181818182, 0.3181818181818], 0.80785123967: [0.6818181818182, 0.3181818181818], 0.31611570248: [0.0454545454545, 0.9545454545455], 0.32799586777: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.61363636364: [0.5], 0.79907024793: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.43595041322: [0.8636363636364, 0.1363636363636], 0.73140495868: [0.3636363636364, 0.6363636363636], 0.24380165289: [0.7272727272727, 0.2727272727273], 0.00981404959: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.56766528926: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.0222107438: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.8305785124: [0.1818181818182, 0.8181818181818], 0.36363636364: [0.0], 0.67975206612: [0.0454545454545, 0.9545454545455], 0.6007231405: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.50413223141: [0.3636363636364, 0.6363636363636], 0.9979338843: [0.0454545454545, 0.9545454545455], 0.61725206612: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.60330578512: [0.1818181818182, 0.8181818181818], 0.72727272727: [0.0], 0.94834710744: [0.2272727272727, 0.7727272727273], 0.23295454546: [0.25, 0.75], 0.70247933884: [0.4545454545455, 0.5454545454545], 0.73708677686: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.48140495868: [0.8636363636364, 0.1363636363636], 0.40082644628: [0.0909090909091, 0.9090909090909], 0.72107438017: [0.2272727272727, 0.7727272727273], 0.56353305785: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.1632231405: [0.8636363636364, 0.1363636363636], 0.40289256198: [0.2272727272727, 0.7727272727273], 0.05991735537: [0.5909090909091, 0.4090909090909], 0.72520661157: [0.0454545454545, 0.9545454545455], 0.17355371901: [0.0909090909091, 0.9090909090909], 0.75413223141: [0.8636363636364, 0.1363636363636], 0.45454545455: [0.0], 0.07179752066: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.12809917355: [0.0909090909091, 0.9090909090909], 0.13016528926: [0.2272727272727, 0.7727272727273], 0.90650826446: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.67355371901: [0.0909090909091, 0.9090909090909], 0.05113636364: [0.25, 0.75], 0.24948347107: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.19163223141: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.46435950413: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.36105371901: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.81198347107: [0.2272727272727, 0.7727272727273], 0.03719008265: [0.0909090909091, 0.9090909090909], 0.77840909091: [0.75, 0.25], 0.91477272727: [0.75, 0.25], 0.63378099174: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.13636363636: [0.0], 0.78254132231: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.63636363636: [0.0], 0.30991735537: [0.0909090909091, 0.9090909090909], 0.86157024793: [0.0454545454545, 0.9545454545455], 0.95867768595: [0.3636363636364, 0.6363636363636], 0.17975206612: [0.0454545454545, 0.9545454545455], 0.56818181818: [0.5], 0.32231404959: [0.3636363636364, 0.6363636363636], 0.84039256198: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.45867768595: [0.3636363636364, 0.6363636363636], 0.3305785124: [0.1818181818182, 0.8181818181818], 0.06198347107: [0.7272727272727, 0.2727272727273], 0.01446280992: [0.5909090909091, 0.4090909090909], 0.59090909091: [0.0], 0.04287190083: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.65444214876: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.04338842975: [0.0454545454545, 0.9545454545455], 0.34090909091: [0.5], 0.34504132231: [0.8636363636364, 0.1363636363636], 0.17561983471: [0.2272727272727, 0.7727272727273], 0.53925619835: [0.2272727272727, 0.7727272727273], 0.88429752066: [0.4545454545455, 0.5454545454545], 0.55113636364: [0.75, 0.25], 0.43181818182: [0.5], 0.56611570248: [0.4545454545455, 0.5454545454545], 0.28925619835: [0.7272727272727, 0.2727272727273], 0.18595041322: [0.3636363636364, 0.6363636363636], 0.41890495868: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.62809917355: [0.0909090909091, 0.9090909090909], 0.26446280992: [0.0909090909091, 0.9090909090909], 0.22727272727: [0.0], 0.14049586777: [0.3636363636364, 0.6363636363636], 0.7701446281: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.61776859504: [0.8636363636364, 0.1363636363636], 0.2479338843: [0.4545454545455, 0.5454545454545], 0.0826446281: [0.0909090909091, 0.9090909090909], 0.13378099174: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.36776859504: [0.3636363636364, 0.6363636363636], 0.04958677686: [0.3636363636364, 0.6363636363636], 0.82386363636: [0.75, 0.25], 0.8347107438: [0.7272727272727, 0.2727272727273], 0.69989669422: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.87603305785: [0.1818181818182, 0.8181818181818], 0.77066115703: [0.0454545454545, 0.9545454545455], 0.75361570248: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.46694214876: [0.1818181818182, 0.8181818181818], 0.10330578512: [0.1818181818182, 0.8181818181818], 0.88584710744: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.14617768595: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.26652892562: [0.2272727272727, 0.7727272727273], 0.42148760331: [0.1818181818182, 0.8181818181818], 0.46022727273: [0.75, 0.25], 0.60743801653: [0.7272727272727, 0.2727272727273], 0.41477272727: [0.25, 0.75], 0.35330578512: [0.6818181818182, 0.3181818181818], 0.62603305785: [0.6818181818182, 0.3181818181818], 0.15857438017: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.34039256198: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.95454545455: [0.0], 0.44628099174: [0.0909090909091, 0.9090909090909], 0.96435950413: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.11311983471: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.03925619835: [0.2272727272727, 0.7727272727273], 0.42716942149: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.74173553719: [0.5909090909091, 0.4090909090909], 0.76239669422: [0.6818181818182, 0.3181818181818], 0.61311983471: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.07231404959: [0.8636363636364, 0.1363636363636], 0.60898760331: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.01652892562: [0.7272727272727, 0.2727272727273], 0.0847107438: [0.2272727272727, 0.7727272727273], 0.33884297521: [0.4545454545455, 0.5454545454545], 0.52685950413: [0.8636363636364, 0.1363636363636], 0.17923553719: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.66270661157: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.42561983471: [0.7272727272727, 0.2727272727273], 0.85743801653: [0.2272727272727, 0.7727272727273], 0.47520661157: [0.4545454545455, 0.5454545454545], 0.15289256198: [0.7272727272727, 0.2727272727273], 0.67148760331: [0.6818181818182, 0.3181818181818], 0.47727272727: [0.5], 0.24173553719: [0.5909090909091, 0.4090909090909], 0.25: [0.5], 0.19989669422: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.70402892562: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.22469008265: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.78719008265: [0.5909090909091, 0.4090909090909], 0.20454545455: [0.5], 0.29907024793: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.93595041322: [0.8636363636364, 0.1363636363636], 0.50981404959: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.20402892562: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.96694214876: [0.1818181818182, 0.8181818181818], 0.22520661157: [0.0454545454545, 0.9545454545455], 0.28719008265: [0.5909090909091, 0.4090909090909], 0.52066115703: [0.4545454545455, 0.5454545454545], 0.84504132231: [0.8636363636364, 0.1363636363636], 0.79493801653: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.69421487603: [0.1818181818182, 0.8181818181818], 0.88171487603: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.27272727273: [0.0], 0.92561983471: [0.7272727272727, 0.2727272727273], 0.97262396694: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.54338842975: [0.0454545454545, 0.9545454545455], 0.01239669422: [0.1818181818182, 0.8181818181818], 0.76652892562: [0.2272727272727, 0.7727272727273], 0.11157024793: [0.4545454545455, 0.5454545454545], 0.05526859504: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.65702479339: [0.4545454545455, 0.5454545454545], 0.71694214876: [0.6818181818182, 0.3181818181818], 0.38636363636: [0.5], 0.86105371901: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.0180785124: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.81611570248: [0.0454545454545, 0.9545454545455], 0.94628099174: [0.0909090909091, 0.9090909090909], 0.3326446281: [0.5909090909091, 0.4090909090909], 0.69834710744: [0.7272727272727, 0.2727272727273], 0.29080578512: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.51446280992: [0.5909090909091, 0.4090909090909], 0.35743801653: [0.2272727272727, 0.7727272727273], 0.83884297521: [0.4545454545455, 0.5454545454545], 0.5180785124: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.64204545455: [0.75, 0.25], 0.46900826446: [0.5909090909091, 0.4090909090909], 0.98966942149: [0.6818181818182, 0.3181818181818], 0.7479338843: [0.4545454545455, 0.5454545454545], 0.42355371901: [0.5909090909091, 0.4090909090909], 0.53719008265: [0.0909090909091, 0.9090909090909], 0.31559917355: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.76446280992: [0.0909090909091, 0.9090909090909], 0.31818181818: [0.0], 0.79080578512: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.35537190083: [0.0909090909091, 0.9090909090909], 0.08884297521: [0.0454545454545, 0.9545454545455], 0.87809917355: [0.5909090909091, 0.4090909090909], 0.20867768595: [0.8636363636364, 0.1363636363636], 0.39876033058: [0.6818181818182, 0.3181818181818], 0.67561983471: [0.2272727272727, 0.7727272727273], 0.92148760331: [0.1818181818182, 0.8181818181818], 0.80991735537: [0.0909090909091, 0.9090909090909], 0.1007231405: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.40909090909: [0.0], 0.54287190083: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.9354338843: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.59659090909: [0.75, 0.25], 0.25413223141: [0.8636363636364, 0.1363636363636], 0.52634297521: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.96022727273: [0.75, 0.25], 0.8326446281: [0.5909090909091, 0.4090909090909], 0.71900826446: [0.0909090909091, 0.9090909090909], 0.60537190083: [0.5909090909091, 0.4090909090909], 0.5222107438: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.68181818182: [0.0], 0.96900826446: [0.5909090909091, 0.4090909090909], 0.53512396694: [0.6818181818182, 0.3181818181818], 0.39049586777: [0.8636363636364, 0.1363636363636], 0.38171487603: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.86363636364: [0.0], 0.44834710744: [0.2272727272727, 0.7727272727273], 0.83626033058: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.63016528926: [0.2272727272727, 0.7727272727273], 0.38997933884: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.98140495868: [0.8636363636364, 0.1363636363636], 0.70454545455: [0.5], 0.97107438017: [0.7272727272727, 0.2727272727273], 0.45247933884: [0.0454545454545, 0.9545454545455], 0.98088842975: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.27685950413: [0.3636363636364, 0.6363636363636], 0.19834710744: [0.7272727272727, 0.2727272727273], 0.99380165289: [0.2272727272727, 0.7727272727273], 0.11725206612: [0.2954545454545, 0.2045454545455, 0.7954545454545, 0.7045454545455], 0.21900826446: [0.0909090909091, 0.9090909090909], 0.65289256198: [0.7272727272727, 0.2727272727273], 0.69628099174: [0.5909090909091, 0.4090909090909], 0.5847107438: [0.2272727272727, 0.7727272727273], 0.32386363636: [0.75, 0.25], 0.37603305785: [0.1818181818182, 0.8181818181818], 0.95196280992: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.90702479339: [0.0454545454545, 0.9545454545455], 0.4132231405: [0.3636363636364, 0.6363636363636], 0.48966942149: [0.6818181818182, 0.3181818181818], 0.43130165289: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.23140495868: [0.3636363636364, 0.6363636363636], 0.4979338843: [0.0454545454545, 0.9545454545455], 0.26239669422: [0.6818181818182, 0.3181818181818], 0.20816115703: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.19421487603: [0.1818181818182, 0.8181818181818], 0.31198347107: [0.2272727272727, 0.7727272727273], 0.45196280992: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.2701446281: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.23708677686: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.13429752066: [0.0454545454545, 0.9545454545455], 0.77272727273: [0.0], 0.06818181818: [0.5], 0.08832644628: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.33626033058: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.14876033058: [0.1818181818182, 0.8181818181818], 0.38584710744: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.88016528926: [0.7272727272727, 0.2727272727273], 0.28512396694: [0.1818181818182, 0.8181818181818], 0.93130165289: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.47675619835: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.4354338843: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.29338842975: [0.4545454545455, 0.5454545454545], 0.47262396694: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.94421487603: [0.6818181818182, 0.3181818181818], 0.3347107438: [0.7272727272727, 0.2727272727273], 0.49741735537: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.92355371901: [0.5909090909091, 0.4090909090909], 0.15702479339: [0.4545454545455, 0.5454545454545], 0.38429752066: [0.4545454545455, 0.5454545454545], 0.93181818182: [0.5], 0.54545454546: [0.0], 0.15909090909: [0.5], 0.15444214876: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.95247933884: [0.0454545454545, 0.9545454545455], 0.11776859504: [0.8636363636364, 0.1363636363636], 0.88636363636: [0.5], 0.27066115703: [0.0454545454545, 0.9545454545455], 0.89876033058: [0.6818181818182, 0.3181818181818], 0.87345041322: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.64049586777: [0.3636363636364, 0.6363636363636], 0.86931818182: [0.75, 0.25], 0.73966942149: [0.1818181818182, 0.8181818181818], 0.69163223141: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.19628099174: [0.5909090909091, 0.4090909090909], 0.34452479339: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.40650826446: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.97675619835: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.02272727273: [0.5], 0.90289256198: [0.2272727272727, 0.7727272727273], 0.6632231405: [0.8636363636364, 0.1363636363636], 0.97520661157: [0.4545454545455, 0.5454545454545], 0.51239669422: [0.1818181818182, 0.8181818181818], 0.09504132231: [0.3636363636364, 0.6363636363636], 0.03512396694: [0.6818181818182, 0.3181818181818], 0.15082644628: [0.5909090909091, 0.4090909090909], 0.61157024793: [0.4545454545455, 0.5454545454545], 0.77685950413: [0.3636363636364, 0.6363636363636], 0.17148760331: [0.6818181818182, 0.3181818181818], 0.78512396694: [0.1818181818182, 0.8181818181818], 0.55991735537: [0.5909090909091, 0.4090909090909], 0.22107438017: [0.2272727272727, 0.7727272727273], 0.18181818182: [0.0], 0.14204545455: [0.25, 0.75], 0.20247933884: [0.4545454545455, 0.5454545454545], 0.85330578512: [0.6818181818182, 0.3181818181818], 0.49173553719: [0.0909090909091, 0.9090909090909], 0.72469008265: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.84090909091: [0.5], 0.05785123967: [0.1818181818182, 0.8181818181818], 0.02685950413: [0.8636363636364, 0.1363636363636], 0.84452479339: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.54958677686: [0.3636363636364, 0.6363636363636], 0.70816115703: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.55526859504: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.9132231405: [0.3636363636364, 0.6363636363636], 0.37809917355: [0.5909090909091, 0.4090909090909], 0.73295454546: [0.75, 0.25], 0.56198347107: [0.7272727272727, 0.2727272727273], 0.85537190083: [0.0909090909091, 0.9090909090909], 0.74948347107: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.65082644628: [0.5909090909091, 0.4090909090909], 0.57179752066: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.11363636364: [0.5], 0.74535123967: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.06611570248: [0.4545454545455, 0.5454545454545], 0.00413223141: [0.3636363636364, 0.6363636363636], 0.81559917355: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.47107438017: [0.7272727272727, 0.2727272727273], 0.00568181818: [0.25, 0.75], 0.04545454546: [0.0], 0.63429752066: [0.0454545454545, 0.9545454545455], 0.64617768595: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.99173553719: [0.0909090909091, 0.9090909090909], 0.58832644628: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.23966942149: [0.1818181818182, 0.8181818181818], 0.67923553719: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909]}
averages_odd={0.0: [0.0], 0.1875: [0.25, 0.75], 0.06353305785: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.5: [0.0], 0.29958677686: [0.8636363636364, 0.1363636363636], 0.92975206612: [0.4545454545455, 0.5454545454545], 0.82231404959: [0.3636363636364, 0.6363636363636], 0.6875: [0.75, 0.25], 0.06766528926: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.52272727273: [0.5], 0.79958677686: [0.8636363636364, 0.1363636363636], 0.10743801653: [0.7272727272727, 0.2727272727273], 0.55785123967: [0.1818181818182, 0.8181818181818], 0.79338842975: [0.4545454545455, 0.5454545454545], 0.10898760331: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.65857438017: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.5826446281: [0.0909090909091, 0.9090909090909], 0.36931818182: [0.75, 0.25], 0.29545454546: [0.5], 0.97727272727: [0.5], 0.37345041322: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.0805785124: [0.6818181818182, 0.3181818181818], 0.90909090909: [0.0], 0.74380165289: [0.7272727272727, 0.2727272727273], 0.25361570248: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.65909090909: [0.5], 0.79545454546: [0.5], 0.82799586777: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.10537190083: [0.5909090909091, 0.4090909090909], 0.02634297521: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.16270661157: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.64876033058: [0.1818181818182, 0.8181818181818], 0.99741735537: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.91890495868: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.75: [0.5], 0.51652892562: [0.7272727272727, 0.2727272727273], 0.27840909091: [0.25, 0.75], 0.30785123967: [0.6818181818182, 0.3181818181818], 0.59504132231: [0.3636363636364, 0.6363636363636], 0.28254132231: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.58884297521: [0.0454545454545, 0.9545454545455], 0.29493801653: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.90082644628: [0.0909090909091, 0.9090909090909], 0.88997933884: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.36157024793: [0.0454545454545, 0.9545454545455], 0.24535123967: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.44421487603: [0.6818181818182, 0.3181818181818], 0.92716942149: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.49380165289: [0.2272727272727, 0.7727272727273], 0.21694214876: [0.6818181818182, 0.3181818181818], 0.48088842975: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.09659090909: [0.25, 0.75], 0.02066115703: [0.4545454545455, 0.5454545454545], 0.68595041322: [0.3636363636364, 0.6363636363636], 0.50568181818: [0.75, 0.25], 0.78925619835: [0.7272727272727, 0.2727272727273], 0.38016528926: [0.7272727272727, 0.2727272727273], 0.89049586777: [0.8636363636364, 0.1363636363636], 0.42975206612: [0.4545454545455, 0.5454545454545], 0.40702479339: [0.0454545454545, 0.9545454545455], 0.86776859504: [0.3636363636364, 0.6363636363636], 0.57231404959: [0.8636363636364, 0.1363636363636], 0.70867768595: [0.8636363636364, 0.1363636363636], 0.5805785124: [0.6818181818182, 0.3181818181818], 0.09090909091: [0.0], 0.81818181818: [0.0], 0.12603305785: [0.6818181818182, 0.3181818181818], 0.80785123967: [0.6818181818182, 0.3181818181818], 0.31611570248: [0.0454545454545, 0.9545454545455], 0.32799586777: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.61363636364: [0.5], 0.79907024793: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.43595041322: [0.8636363636364, 0.1363636363636], 0.73140495868: [0.3636363636364, 0.6363636363636], 0.24380165289: [0.7272727272727, 0.2727272727273], 0.00981404959: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.56766528926: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.0222107438: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.8305785124: [0.1818181818182, 0.8181818181818], 0.36363636364: [0.0], 0.67975206612: [0.0454545454545, 0.9545454545455], 0.6007231405: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.50413223141: [0.3636363636364, 0.6363636363636], 0.9979338843: [0.0454545454545, 0.9545454545455], 0.61725206612: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.60330578512: [0.1818181818182, 0.8181818181818], 0.72727272727: [0.0], 0.94834710744: [0.2272727272727, 0.7727272727273], 0.23295454546: [0.25, 0.75], 0.70247933884: [0.4545454545455, 0.5454545454545], 0.73708677686: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.48140495868: [0.8636363636364, 0.1363636363636], 0.40082644628: [0.0909090909091, 0.9090909090909], 0.72107438017: [0.2272727272727, 0.7727272727273], 0.56353305785: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.1632231405: [0.8636363636364, 0.1363636363636], 0.40289256198: [0.2272727272727, 0.7727272727273], 0.05991735537: [0.5909090909091, 0.4090909090909], 0.72520661157: [0.0454545454545, 0.9545454545455], 0.17355371901: [0.0909090909091, 0.9090909090909], 0.75413223141: [0.8636363636364, 0.1363636363636], 0.45454545455: [0.0], 0.07179752066: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.12809917355: [0.0909090909091, 0.9090909090909], 0.13016528926: [0.2272727272727, 0.7727272727273], 0.90650826446: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.67355371901: [0.0909090909091, 0.9090909090909], 0.05113636364: [0.25, 0.75], 0.24948347107: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.19163223141: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.46435950413: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.36105371901: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.81198347107: [0.2272727272727, 0.7727272727273], 0.03719008265: [0.0909090909091, 0.9090909090909], 0.77840909091: [0.75, 0.25], 0.91477272727: [0.75, 0.25], 0.63378099174: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.13636363636: [0.0], 0.78254132231: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.63636363636: [0.0], 0.30991735537: [0.0909090909091, 0.9090909090909], 0.86157024793: [0.0454545454545, 0.9545454545455], 0.95867768595: [0.3636363636364, 0.6363636363636], 0.17975206612: [0.0454545454545, 0.9545454545455], 0.56818181818: [0.5], 0.32231404959: [0.3636363636364, 0.6363636363636], 0.84039256198: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.45867768595: [0.3636363636364, 0.6363636363636], 0.3305785124: [0.1818181818182, 0.8181818181818], 0.06198347107: [0.7272727272727, 0.2727272727273], 0.01446280992: [0.5909090909091, 0.4090909090909], 0.59090909091: [0.0], 0.04287190083: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.65444214876: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.04338842975: [0.0454545454545, 0.9545454545455], 0.34090909091: [0.5], 0.34504132231: [0.8636363636364, 0.1363636363636], 0.17561983471: [0.2272727272727, 0.7727272727273], 0.53925619835: [0.2272727272727, 0.7727272727273], 0.88429752066: [0.4545454545455, 0.5454545454545], 0.55113636364: [0.75, 0.25], 0.43181818182: [0.5], 0.56611570248: [0.4545454545455, 0.5454545454545], 0.28925619835: [0.7272727272727, 0.2727272727273], 0.18595041322: [0.3636363636364, 0.6363636363636], 0.41890495868: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.62809917355: [0.0909090909091, 0.9090909090909], 0.26446280992: [0.0909090909091, 0.9090909090909], 0.22727272727: [0.0], 0.14049586777: [0.3636363636364, 0.6363636363636], 0.7701446281: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.61776859504: [0.8636363636364, 0.1363636363636], 0.2479338843: [0.4545454545455, 0.5454545454545], 0.0826446281: [0.0909090909091, 0.9090909090909], 0.13378099174: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.36776859504: [0.3636363636364, 0.6363636363636], 0.04958677686: [0.3636363636364, 0.6363636363636], 0.82386363636: [0.75, 0.25], 0.8347107438: [0.7272727272727, 0.2727272727273], 0.69989669422: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.87603305785: [0.1818181818182, 0.8181818181818], 0.77066115703: [0.0454545454545, 0.9545454545455], 0.75361570248: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.46694214876: [0.1818181818182, 0.8181818181818], 0.10330578512: [0.1818181818182, 0.8181818181818], 0.88584710744: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.14617768595: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.26652892562: [0.2272727272727, 0.7727272727273], 0.42148760331: [0.1818181818182, 0.8181818181818], 0.46022727273: [0.75, 0.25], 0.60743801653: [0.7272727272727, 0.2727272727273], 0.41477272727: [0.25, 0.75], 0.35330578512: [0.6818181818182, 0.3181818181818], 0.62603305785: [0.6818181818182, 0.3181818181818], 0.15857438017: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.34039256198: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.95454545455: [0.0], 0.44628099174: [0.0909090909091, 0.9090909090909], 0.96435950413: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.11311983471: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.03925619835: [0.2272727272727, 0.7727272727273], 0.42716942149: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.74173553719: [0.5909090909091, 0.4090909090909], 0.76239669422: [0.6818181818182, 0.3181818181818], 0.61311983471: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.07231404959: [0.8636363636364, 0.1363636363636], 0.60898760331: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.01652892562: [0.7272727272727, 0.2727272727273], 0.0847107438: [0.2272727272727, 0.7727272727273], 0.33884297521: [0.4545454545455, 0.5454545454545], 0.52685950413: [0.8636363636364, 0.1363636363636], 0.17923553719: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.66270661157: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.42561983471: [0.7272727272727, 0.2727272727273], 0.85743801653: [0.2272727272727, 0.7727272727273], 0.47520661157: [0.4545454545455, 0.5454545454545], 0.15289256198: [0.7272727272727, 0.2727272727273], 0.67148760331: [0.6818181818182, 0.3181818181818], 0.47727272727: [0.5], 0.24173553719: [0.5909090909091, 0.4090909090909], 0.25: [0.5], 0.19989669422: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.70402892562: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.22469008265: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.78719008265: [0.5909090909091, 0.4090909090909], 0.20454545455: [0.5], 0.29907024793: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.93595041322: [0.8636363636364, 0.1363636363636], 0.50981404959: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.20402892562: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.96694214876: [0.1818181818182, 0.8181818181818], 0.22520661157: [0.0454545454545, 0.9545454545455], 0.28719008265: [0.5909090909091, 0.4090909090909], 0.52066115703: [0.4545454545455, 0.5454545454545], 0.84504132231: [0.8636363636364, 0.1363636363636], 0.79493801653: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.69421487603: [0.1818181818182, 0.8181818181818], 0.88171487603: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.27272727273: [0.0], 0.92561983471: [0.7272727272727, 0.2727272727273], 0.97262396694: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.54338842975: [0.0454545454545, 0.9545454545455], 0.01239669422: [0.1818181818182, 0.8181818181818], 0.76652892562: [0.2272727272727, 0.7727272727273], 0.11157024793: [0.4545454545455, 0.5454545454545], 0.05526859504: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.65702479339: [0.4545454545455, 0.5454545454545], 0.71694214876: [0.6818181818182, 0.3181818181818], 0.38636363636: [0.5], 0.86105371901: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.0180785124: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.81611570248: [0.0454545454545, 0.9545454545455], 0.94628099174: [0.0909090909091, 0.9090909090909], 0.3326446281: [0.5909090909091, 0.4090909090909], 0.69834710744: [0.7272727272727, 0.2727272727273], 0.29080578512: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.51446280992: [0.5909090909091, 0.4090909090909], 0.35743801653: [0.2272727272727, 0.7727272727273], 0.83884297521: [0.4545454545455, 0.5454545454545], 0.5180785124: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.64204545455: [0.75, 0.25], 0.46900826446: [0.5909090909091, 0.4090909090909], 0.98966942149: [0.6818181818182, 0.3181818181818], 0.7479338843: [0.4545454545455, 0.5454545454545], 0.42355371901: [0.5909090909091, 0.4090909090909], 0.53719008265: [0.0909090909091, 0.9090909090909], 0.31559917355: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.76446280992: [0.0909090909091, 0.9090909090909], 0.31818181818: [0.0], 0.79080578512: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.35537190083: [0.0909090909091, 0.9090909090909], 0.08884297521: [0.0454545454545, 0.9545454545455], 0.87809917355: [0.5909090909091, 0.4090909090909], 0.20867768595: [0.8636363636364, 0.1363636363636], 0.39876033058: [0.6818181818182, 0.3181818181818], 0.67561983471: [0.2272727272727, 0.7727272727273], 0.92148760331: [0.1818181818182, 0.8181818181818], 0.80991735537: [0.0909090909091, 0.9090909090909], 0.1007231405: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.40909090909: [0.0], 0.54287190083: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.9354338843: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.59659090909: [0.75, 0.25], 0.25413223141: [0.8636363636364, 0.1363636363636], 0.52634297521: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.96022727273: [0.75, 0.25], 0.8326446281: [0.5909090909091, 0.4090909090909], 0.71900826446: [0.0909090909091, 0.9090909090909], 0.60537190083: [0.5909090909091, 0.4090909090909], 0.5222107438: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.68181818182: [0.0], 0.96900826446: [0.5909090909091, 0.4090909090909], 0.53512396694: [0.6818181818182, 0.3181818181818], 0.39049586777: [0.8636363636364, 0.1363636363636], 0.38171487603: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.86363636364: [0.0], 0.44834710744: [0.2272727272727, 0.7727272727273], 0.83626033058: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.63016528926: [0.2272727272727, 0.7727272727273], 0.38997933884: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.98140495868: [0.8636363636364, 0.1363636363636], 0.70454545455: [0.5], 0.97107438017: [0.7272727272727, 0.2727272727273], 0.45247933884: [0.0454545454545, 0.9545454545455], 0.98088842975: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.27685950413: [0.3636363636364, 0.6363636363636], 0.19834710744: [0.7272727272727, 0.2727272727273], 0.99380165289: [0.2272727272727, 0.7727272727273], 0.11725206612: [0.2954545454545, 0.2045454545455, 0.7954545454545, 0.7045454545455], 0.21900826446: [0.0909090909091, 0.9090909090909], 0.65289256198: [0.7272727272727, 0.2727272727273], 0.69628099174: [0.5909090909091, 0.4090909090909], 0.5847107438: [0.2272727272727, 0.7727272727273], 0.32386363636: [0.75, 0.25], 0.37603305785: [0.1818181818182, 0.8181818181818], 0.95196280992: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.90702479339: [0.0454545454545, 0.9545454545455], 0.4132231405: [0.3636363636364, 0.6363636363636], 0.48966942149: [0.6818181818182, 0.3181818181818], 0.43130165289: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.23140495868: [0.3636363636364, 0.6363636363636], 0.4979338843: [0.0454545454545, 0.9545454545455], 0.26239669422: [0.6818181818182, 0.3181818181818], 0.20816115703: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.19421487603: [0.1818181818182, 0.8181818181818], 0.31198347107: [0.2272727272727, 0.7727272727273], 0.45196280992: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.2701446281: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.23708677686: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.13429752066: [0.0454545454545, 0.9545454545455], 0.77272727273: [0.0], 0.06818181818: [0.5], 0.08832644628: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.33626033058: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.14876033058: [0.1818181818182, 0.8181818181818], 0.38584710744: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.88016528926: [0.7272727272727, 0.2727272727273], 0.28512396694: [0.1818181818182, 0.8181818181818], 0.93130165289: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.47675619835: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.4354338843: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.29338842975: [0.4545454545455, 0.5454545454545], 0.47262396694: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.94421487603: [0.6818181818182, 0.3181818181818], 0.3347107438: [0.7272727272727, 0.2727272727273], 0.49741735537: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.92355371901: [0.5909090909091, 0.4090909090909], 0.15702479339: [0.4545454545455, 0.5454545454545], 0.38429752066: [0.4545454545455, 0.5454545454545], 0.93181818182: [0.5], 0.54545454546: [0.0], 0.15909090909: [0.5], 0.15444214876: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.95247933884: [0.0454545454545, 0.9545454545455], 0.11776859504: [0.8636363636364, 0.1363636363636], 0.88636363636: [0.5], 0.27066115703: [0.0454545454545, 0.9545454545455], 0.89876033058: [0.6818181818182, 0.3181818181818], 0.87345041322: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.64049586777: [0.3636363636364, 0.6363636363636], 0.86931818182: [0.75, 0.25], 0.73966942149: [0.1818181818182, 0.8181818181818], 0.69163223141: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.19628099174: [0.5909090909091, 0.4090909090909], 0.34452479339: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.40650826446: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.97675619835: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.02272727273: [0.5], 0.90289256198: [0.2272727272727, 0.7727272727273], 0.6632231405: [0.8636363636364, 0.1363636363636], 0.97520661157: [0.4545454545455, 0.5454545454545], 0.51239669422: [0.1818181818182, 0.8181818181818], 0.09504132231: [0.3636363636364, 0.6363636363636], 0.03512396694: [0.6818181818182, 0.3181818181818], 0.15082644628: [0.5909090909091, 0.4090909090909], 0.61157024793: [0.4545454545455, 0.5454545454545], 0.77685950413: [0.3636363636364, 0.6363636363636], 0.17148760331: [0.6818181818182, 0.3181818181818], 0.78512396694: [0.1818181818182, 0.8181818181818], 0.55991735537: [0.5909090909091, 0.4090909090909], 0.22107438017: [0.2272727272727, 0.7727272727273], 0.18181818182: [0.0], 0.14204545455: [0.25, 0.75], 0.20247933884: [0.4545454545455, 0.5454545454545], 0.85330578512: [0.6818181818182, 0.3181818181818], 0.49173553719: [0.0909090909091, 0.9090909090909], 0.72469008265: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.84090909091: [0.5], 0.05785123967: [0.1818181818182, 0.8181818181818], 0.02685950413: [0.8636363636364, 0.1363636363636], 0.84452479339: [0.2954545454545, 0.2045454545455, 0.7045454545455, 0.7954545454545], 0.54958677686: [0.3636363636364, 0.6363636363636], 0.70816115703: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.55526859504: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.9132231405: [0.3636363636364, 0.6363636363636], 0.37809917355: [0.5909090909091, 0.4090909090909], 0.73295454546: [0.75, 0.25], 0.56198347107: [0.7272727272727, 0.2727272727273], 0.85537190083: [0.0909090909091, 0.9090909090909], 0.74948347107: [0.9772727272727, 0.4772727272727, 0.5227272727273, 0.0227272727273], 0.65082644628: [0.5909090909091, 0.4090909090909], 0.57179752066: [0.7045454545455, 0.2045454545455, 0.7954545454545, 0.2954545454545], 0.11363636364: [0.5], 0.74535123967: [0.4318181818182, 0.5681818181818, 0.9318181818182, 0.0681818181818], 0.06611570248: [0.4545454545455, 0.5454545454545], 0.00413223141: [0.3636363636364, 0.6363636363636], 0.81559917355: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.47107438017: [0.7272727272727, 0.2727272727273], 0.00568181818: [0.25, 0.75], 0.04545454546: [0.0], 0.63429752066: [0.0454545454545, 0.9545454545455], 0.64617768595: [0.6136363636364, 0.1136363636364, 0.3863636363636, 0.8863636363636], 0.99173553719: [0.0909090909091, 0.9090909090909], 0.58832644628: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909], 0.23966942149: [0.1818181818182, 0.8181818181818], 0.67923553719: [0.8409090909091, 0.3409090909091, 0.6590909090909, 0.1590909090909]}
| 12,286.6
| 22,108
| 0.785229
| 7,744
| 61,433
| 6.228564
| 0.096591
| 0.004064
| 0.003172
| 0.025542
| 0.90828
| 0.907244
| 0.907244
| 0.907244
| 0.907244
| 0.907244
| 0
| 0.83692
| 0.06293
| 61,433
| 5
| 22,109
| 12,286.6
| 0.000955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
b91030ddd4deab1c6a9f15b7e2be59bbef26c77c
| 147
|
py
|
Python
|
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_config_input_manager.py
|
dagster-io/dagster
|
5b834cee42670307157e9c6a4193a7fda1437841
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_config_input_manager.py
|
dagster-io/dagster
|
5b834cee42670307157e9c6a4193a7fda1437841
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_config_input_manager.py
|
dagster-io/dagster
|
5b834cee42670307157e9c6a4193a7fda1437841
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
from docs_snippets_crag.concepts.io_management.config_input_manager import execute_with_config
def test_execute_job():
execute_with_config()
| 24.5
| 94
| 0.857143
| 21
| 147
| 5.47619
| 0.761905
| 0.191304
| 0.295652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088435
| 147
| 5
| 95
| 29.4
| 0.858209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f8eddb78a8bacaa21cfb4f8e7efd0fa4e1f99373
| 9,568
|
py
|
Python
|
film/serializer.py
|
jihuncho7/Film_ex
|
625929d59a0726d15ea22b43f4ad6d8c95ba6e8f
|
[
"MIT"
] | null | null | null |
film/serializer.py
|
jihuncho7/Film_ex
|
625929d59a0726d15ea22b43f4ad6d8c95ba6e8f
|
[
"MIT"
] | null | null | null |
film/serializer.py
|
jihuncho7/Film_ex
|
625929d59a0726d15ea22b43f4ad6d8c95ba6e8f
|
[
"MIT"
] | 1
|
2021-07-14T14:30:23.000Z
|
2021-07-14T14:30:23.000Z
|
from .models import *
from rest_framework import serializers
from .serializer_comments import *
read_only_fields_global = (['author'])
class FilmSerializer(serializers.ModelSerializer):
rate_show = serializers.SerializerMethodField()
author_username = serializers.ReadOnlyField(source='author.username')
postfrom = serializers.SerializerMethodField()
def get_postfrom(self,obj):
return '영화리뷰'
def get_rate_show(self, instance):
return instance.get_rate()
class Meta:
model = Film
fields = '__all__'
read_only_fields = read_only_fields_global
# views.py 에서 필드 수정 할 수 있게 하는 커스텀 쿼리
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(FilmSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields)
for field_name in existing - allowed:
self.fields.pop(field_name)
class FreeBoardSerializer(serializers.ModelSerializer, object):
# user = serializers.ReadOnlyField(source='user.nickname')
get_likes = serializers.SerializerMethodField()
tag_set = serializers.SerializerMethodField()
author_username = serializers.ReadOnlyField(source='author.username')
is_like_user = serializers.SerializerMethodField()
CommentFreeBoard = CommentFreeBoardSerializer(many=True,read_only=True)
postfrom = serializers.SerializerMethodField()
def get_postfrom(self,obj):
return '자유게시판'
def get_get_likes(self, obj):
return obj.get_likes()
def get_tag_set(self, obj):
return obj.extract_tag_list()
def get_is_like_user(self, instance):
return instance.is_like_user(self.context['request'].user)
class Meta:
model = FreeBoard
fields = ('id','hit','author_username','get_likes','created_at',
'updated_at','title','context','image','category',
'tag_set','is_like_user','like_user_set','CommentFreeBoard','postfrom',
)
read_only_fields = read_only_fields_global
# views.py 에서 필드 수정 할 수 있게 하는 커스텀 쿼리
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(FreeBoardSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields)
for field_name in existing - allowed:
self.fields.pop(field_name)
class FreeBoard_SubSerializer(serializers.ModelSerializer, object):
postfrom = serializers.SerializerMethodField()
author_username = serializers.ReadOnlyField(source='author.username')
def get_postfrom(self,obj):
return '자유게시판'
class Meta:
model = FreeBoard
fields = ('id','hit','created_at','author_username',
'updated_at','title','context','image','category',
'tag_set','postfrom',
)
read_only_fields = read_only_fields_global
class HirePostStaffSerializer(serializers.ModelSerializer):
author_username = serializers.ReadOnlyField(source='author.username')
tag_set = serializers.SerializerMethodField()
postfrom = serializers.SerializerMethodField()
is_like_user = serializers.SerializerMethodField()
is_applied_user = serializers.SerializerMethodField()
def get_is_applied_user(self, instance):
return instance.is_applied_user(self.context['request'].user)
def get_is_like_user(self, instance):
return instance.is_like_user(self.context['request'].user)
def get_postfrom(self, obj):
return '스탭 구인'
def get_tag_set(self, obj):
return obj.extract_tag_list()
class Meta:
model = HirePostStaff
fields = ('id', 'hit', 'author_username', 'thumbs', 'created_at',
'updated_at', 'title', 'context', 'image', 'category',
'tag_set', 'like_user_set', 'payment', 'requirement', 'advantage',
'job_loca', 'company', 'company_loca', 'company_desc', 'deadline',
'company_url', 'job_position','postfrom','is_like_user','is_applied_user',
)
read_only_fields = read_only_fields_global
# views.py 에서 필드 수정 할 수 있게 하는 커스텀 쿼리
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(HirePostStaffSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields)
for field_name in existing - allowed:
self.fields.pop(field_name)
class HirePostActorSerializer(serializers.ModelSerializer):
author_username = serializers.ReadOnlyField(source='author.username')
tag_set = serializers.SerializerMethodField()
postfrom = serializers.SerializerMethodField()
is_like_user = serializers.SerializerMethodField()
def get_is_like_user(self, instance):
return instance.is_like_user(self.context['request'].user)
def get_postfrom(self, obj):
return '액터 구인'
def get_tag_set(self, obj):
return obj.extract_tag_list()
class Meta:
model = HirePostActor
fields = ('id', 'hit', 'author_username', 'thumbs', 'created_at',
'updated_at', 'title', 'context', 'image', 'category',
'tag_set', 'like_user_set', 'payment', 'requirement', 'advantage',
'job_loca', 'company', 'company_loca', 'company_desc', 'deadline',
'company_url', 'job_position','postfrom','is_like_user',
)
read_only_fields = read_only_fields_global
# views.py 에서 필드 수정 할 수 있게 하는 커스텀 쿼리
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(HirePostActorSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields)
for field_name in existing - allowed:
self.fields.pop(field_name)
class ResumeStaffSerializer(serializers.ModelSerializer):
author_username = serializers.ReadOnlyField(source='author.username')
postfrom = serializers.SerializerMethodField()
def get_postfrom(self, obj):
return '스탭 이력서'
class Meta:
model = ResumeStaff
fields = '__all__'
read_only_fields = read_only_fields_global
class ResumeActorSerializer(serializers.ModelSerializer):
author_username = serializers.ReadOnlyField(source='author.username')
postfrom = serializers.SerializerMethodField()
def get_postfrom(self, obj):
return '액터 이력서'
class Meta:
model = ResumeActor
fields = '__all__'
read_only_fields = read_only_fields_global
class QnASerializer(serializers.ModelSerializer):
author_username = serializers.ReadOnlyField(source='author.username')
class Meta:
model = QnA
fields = '__all__'
read_only_fields = read_only_fields_global
class MyHirePostStaffSerializer(serializers.ModelSerializer):
author_username = serializers.ReadOnlyField(source='author.username')
tag_set = serializers.SerializerMethodField()
postfrom = serializers.SerializerMethodField()
def get_postfrom(self, obj):
return '스탭 구인'
def get_tag_set(self, obj):
return obj.extract_tag_list()
class Meta:
model = HirePostStaff
fields = ('id', 'hit', 'author_username', 'thumbs', 'created_at',
'updated_at', 'title', 'context', 'image', 'category',
'tag_set', 'like_user_set', 'payment', 'requirement', 'advantage',
'job_loca', 'company', 'company_loca', 'company_desc', 'deadline',
'company_url', 'job_position','postfrom',
)
read_only_fields = read_only_fields_global
class MyHirePostActorSerializer(serializers.ModelSerializer):
author_username = serializers.ReadOnlyField(source='author.username')
tag_set = serializers.SerializerMethodField()
postfrom = serializers.SerializerMethodField()
def get_postfrom(self, obj):
return '액터 구인'
def get_tag_set(self, obj):
return obj.extract_tag_list()
class Meta:
model = HirePostActor
fields = ('id', 'hit', 'author_username', 'thumbs', 'created_at',
'updated_at', 'title', 'context', 'image', 'category',
'tag_set', 'like_user_set', 'payment', 'requirement', 'advantage',
'job_loca', 'company', 'company_loca', 'company_desc', 'deadline',
'company_url', 'job_position','postfrom',
)
read_only_fields = read_only_fields_global
| 36.659004
| 92
| 0.651024
| 1,034
| 9,568
| 5.776596
| 0.126692
| 0.060941
| 0.049222
| 0.036832
| 0.84363
| 0.834254
| 0.82153
| 0.799598
| 0.791562
| 0.720576
| 0
| 0
| 0.243834
| 9,568
| 260
| 93
| 36.8
| 0.82557
| 0.083194
| 0
| 0.743169
| 0
| 0
| 0.150251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131148
| false
| 0
| 0.016393
| 0.10929
| 0.535519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
5d11d086050e28b09b852e39fa4d6b11581a414f
| 65,692
|
py
|
Python
|
wavefront_api_client/api/alert_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/api/alert_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/api/alert_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: support@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class AlertApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_alert_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Add a tag to a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_alert_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.add_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data
def add_alert_tag_with_http_info(self, id, tag_value, **kwargs): # noqa: E501
"""Add a tag to a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_alert_tag_with_http_info(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'tag_value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_alert_tag" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `add_alert_tag`") # noqa: E501
# verify the required parameter 'tag_value' is set
if ('tag_value' not in params or
params['tag_value'] is None):
raise ValueError("Missing the required parameter `tag_value` when calling `add_alert_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'tag_value' in params:
path_params['tagValue'] = params['tag_value'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/tag/{tagValue}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_alert(self, **kwargs): # noqa: E501
"""Create a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_alert(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Alert body: Example Classic Body: <pre>{ \"name\": \"Alert Name\", \"target\": \"success@simulator.amazonses.com\", \"condition\": \"ts(~sample.cpu.loadavg.1m) > 1\", \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"resolveAfterMinutes\": 2, \"severity\": \"INFO\", \"additionalInformation\": \"Additional Info\", \"tags\": { \"customerTags\": [ \"alertTag1\" ] } }</pre> Example Threshold Body: <pre>{ \"name\": \"Alert Name\", \"alertType\": \"THRESHOLD\", \"conditions\": { \"info\": \"ts(~sample.cpu.loadavg.1m) > 0\", \"warn\": \"ts(~sample.cpu.loadavg.1m) > 2\" }, \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"additionalInformation\": \"conditions value entry needs to be of the form: displayExpression operator threshold\" }</pre>
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_alert_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_alert_with_http_info(**kwargs) # noqa: E501
return data
def create_alert_with_http_info(self, **kwargs): # noqa: E501
"""Create a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_alert_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param Alert body: Example Classic Body: <pre>{ \"name\": \"Alert Name\", \"target\": \"success@simulator.amazonses.com\", \"condition\": \"ts(~sample.cpu.loadavg.1m) > 1\", \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"resolveAfterMinutes\": 2, \"severity\": \"INFO\", \"additionalInformation\": \"Additional Info\", \"tags\": { \"customerTags\": [ \"alertTag1\" ] } }</pre> Example Threshold Body: <pre>{ \"name\": \"Alert Name\", \"alertType\": \"THRESHOLD\", \"conditions\": { \"info\": \"ts(~sample.cpu.loadavg.1m) > 0\", \"warn\": \"ts(~sample.cpu.loadavg.1m) > 2\" }, \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"additionalInformation\": \"conditions value entry needs to be of the form: displayExpression operator threshold\" }</pre>
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_alert" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_alert(self, id, **kwargs): # noqa: E501
"""Delete a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_alert(self, id, **kwargs): # noqa: E501
"""Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def get_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_alert_history(self, id, **kwargs): # noqa: E501
"""Get the version history of a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_history(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int offset:
:param int limit:
:return: ResponseContainerHistoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_history_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_alert_history_with_http_info(id, **kwargs) # noqa: E501
return data
def get_alert_history_with_http_info(self, id, **kwargs): # noqa: E501
"""Get the version history of a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_history_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int offset:
:param int limit:
:return: ResponseContainerHistoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_alert_history" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_alert_history`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/history', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerHistoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_alert_tags(self, id, **kwargs): # noqa: E501
"""Get all tags associated with a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerTagsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_alert_tags_with_http_info(id, **kwargs) # noqa: E501
return data
def get_alert_tags_with_http_info(self, id, **kwargs): # noqa: E501
"""Get all tags associated with a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_tags_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerTagsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_alert_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_alert_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/tag', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerTagsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_alert_version(self, id, version, **kwargs): # noqa: E501
"""Get a specific historical version of a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_version(id, version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int version: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alert_version_with_http_info(id, version, **kwargs) # noqa: E501
else:
(data) = self.get_alert_version_with_http_info(id, version, **kwargs) # noqa: E501
return data
def get_alert_version_with_http_info(self, id, version, **kwargs): # noqa: E501
"""Get a specific historical version of a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alert_version_with_http_info(id, version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int version: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_alert_version" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_alert_version`") # noqa: E501
# verify the required parameter 'version' is set
if ('version' not in params or
params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `get_alert_version`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'version' in params:
path_params['version'] = params['version'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/history/{version}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_alerts_summary(self, **kwargs): # noqa: E501
"""Count alerts of various statuses for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alerts_summary(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerMapStringInteger
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_alerts_summary_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_alerts_summary_with_http_info(**kwargs) # noqa: E501
return data
def get_alerts_summary_with_http_info(self, **kwargs): # noqa: E501
"""Count alerts of various statuses for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_alerts_summary_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerMapStringInteger
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_alerts_summary" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/summary', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerMapStringInteger', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_alert(self, **kwargs): # noqa: E501
"""Get all alerts for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_alert(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_alert_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_alert_with_http_info(**kwargs) # noqa: E501
return data
def get_all_alert_with_http_info(self, **kwargs): # noqa: E501
"""Get all alerts for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_alert_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_alert" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def hide_alert(self, id, **kwargs): # noqa: E501
"""Hide a specific integration alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.hide_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.hide_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.hide_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def hide_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Hide a specific integration alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.hide_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hide_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `hide_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/uninstall', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_alert_tag(self, id, tag_value, **kwargs): # noqa: E501
"""Remove a tag from a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_alert_tag(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
else:
(data) = self.remove_alert_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501
return data
def remove_alert_tag_with_http_info(self, id, tag_value, **kwargs): # noqa: E501
"""Remove a tag from a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_alert_tag_with_http_info(id, tag_value, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param str tag_value: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'tag_value'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_alert_tag" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `remove_alert_tag`") # noqa: E501
# verify the required parameter 'tag_value' is set
if ('tag_value' not in params or
params['tag_value'] is None):
raise ValueError("Missing the required parameter `tag_value` when calling `remove_alert_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'tag_value' in params:
path_params['tagValue'] = params['tag_value'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/tag/{tagValue}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_alert_tags(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_alert_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_alert_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_alert_tags_with_http_info(id, **kwargs) # noqa: E501
return data
def set_alert_tags_with_http_info(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_alert_tags_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_alert_tags" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `set_alert_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/tag', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def snooze_alert(self, id, **kwargs): # noqa: E501
"""Snooze a specific alert for some number of seconds # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.snooze_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int seconds:
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.snooze_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.snooze_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def snooze_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Snooze a specific alert for some number of seconds # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.snooze_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int seconds:
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'seconds'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method snooze_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `snooze_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'seconds' in params:
query_params.append(('seconds', params['seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/snooze', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def undelete_alert(self, id, **kwargs): # noqa: E501
"""Undelete a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.undelete_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.undelete_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.undelete_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def undelete_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Undelete a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.undelete_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method undelete_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `undelete_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/undelete', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unhide_alert(self, id, **kwargs): # noqa: E501
"""Unhide a specific integration alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unhide_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unhide_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.unhide_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def unhide_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Unhide a specific integration alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unhide_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unhide_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `unhide_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/install', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def unsnooze_alert(self, id, **kwargs): # noqa: E501
"""Unsnooze a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unsnooze_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unsnooze_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.unsnooze_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def unsnooze_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Unsnooze a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unsnooze_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unsnooze_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `unsnooze_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}/unsnooze', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_alert(self, id, **kwargs): # noqa: E501
"""Update a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Alert body: Example Body: <pre>{ \"id\": \"1459375928549\", \"name\": \"Alert Name\", \"target\": \"success@simulator.amazonses.com\", \"condition\": \"ts(~sample.cpu.loadavg.1m) > 1\", \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"resolveAfterMinutes\": 2, \"severity\": \"INFO\", \"additionalInformation\": \"Additional Info\", \"tags\": { \"customerTags\": [ \"alertTag1\" ] } }</pre>
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_alert_with_http_info(id, **kwargs) # noqa: E501
return data
def update_alert_with_http_info(self, id, **kwargs): # noqa: E501
"""Update a specific alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_alert_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param Alert body: Example Body: <pre>{ \"id\": \"1459375928549\", \"name\": \"Alert Name\", \"target\": \"success@simulator.amazonses.com\", \"condition\": \"ts(~sample.cpu.loadavg.1m) > 1\", \"displayExpression\": \"ts(~sample.cpu.loadavg.1m)\", \"minutes\": 5, \"resolveAfterMinutes\": 2, \"severity\": \"INFO\", \"additionalInformation\": \"Additional Info\", \"tags\": { \"customerTags\": [ \"alertTag1\" ] } }</pre>
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_alert" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_alert`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/alert/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAlert', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.687868
| 884
| 0.589265
| 7,429
| 65,692
| 4.976713
| 0.033517
| 0.054528
| 0.025749
| 0.033106
| 0.965785
| 0.962972
| 0.959862
| 0.955534
| 0.952207
| 0.947582
| 0
| 0.018584
| 0.310281
| 65,692
| 1,697
| 885
| 38.710666
| 0.797413
| 0.33686
| 0
| 0.824427
| 0
| 0
| 0.176222
| 0.045005
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038168
| false
| 0
| 0.004362
| 0
| 0.099237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d1b6fba6834f9a1097a7eac136ce838807d4d40
| 154
|
py
|
Python
|
tirelire-web-backend/app/adapters/session_manager/__init__.py
|
AgRenaud/tirelire
|
0ac42dbf735dea4ecb741057bd037c18657b95c7
|
[
"MIT"
] | null | null | null |
tirelire-web-backend/app/adapters/session_manager/__init__.py
|
AgRenaud/tirelire
|
0ac42dbf735dea4ecb741057bd037c18657b95c7
|
[
"MIT"
] | null | null | null |
tirelire-web-backend/app/adapters/session_manager/__init__.py
|
AgRenaud/tirelire
|
0ac42dbf735dea4ecb741057bd037c18657b95c7
|
[
"MIT"
] | null | null | null |
from app.adapters.session_manager.session_manager import SessionManager
from app.adapters.session_manager.redis_session_manager import RedisSessionManager
| 77
| 82
| 0.915584
| 19
| 154
| 7.157895
| 0.473684
| 0.411765
| 0.220588
| 0.323529
| 0.426471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 154
| 2
| 82
| 77
| 0.92517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
5d5a293468a0c3484219ebe2e7660374a44adcfe
| 16,373
|
py
|
Python
|
octavia/tests/unit/controller/worker/flows/test_amphora_flows.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/controller/worker/flows/test_amphora_flows.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/controller/worker/flows/test_amphora_flows.py
|
zjchao/octavia
|
e07031fa78604568c6e2112cb4cb147661bc57d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from taskflow.patterns import linear_flow as flow
from octavia.common import constants
from octavia.common import data_models
from octavia.controller.worker.flows import amphora_flows
import octavia.tests.unit.base as base
AUTH_VERSION = '2'
# NOTE: We patch the get_network_driver for all the calls so we don't
# inadvertently make real calls.
@mock.patch('octavia.common.utils.get_network_driver')
class TestAmphoraFlows(base.TestCase):
def setUp(self):
super(TestAmphoraFlows, self).setUp()
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
self.conf.config(
group="controller_worker",
amphora_driver='amphora_haproxy_rest_driver')
self.conf.config(group="nova", enable_anti_affinity=False)
self.AmpFlow = amphora_flows.AmphoraFlows()
self.amp1 = data_models.Amphora(id=1)
self.amp2 = data_models.Amphora(id=2)
self.amp3 = data_models.Amphora(id=3, status=constants.DELETED)
self.lb = data_models.LoadBalancer(
id=4, amphorae=[self.amp1, self.amp2, self.amp3])
def test_get_create_amphora_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_create_amphora_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires))
def test_get_create_amphora_flow_cert(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow.get_create_amphora_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires))
def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_get_cert_master_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.conf.config(group="nova", enable_anti_affinity=True)
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(3, len(amp_flow.requires))
self.conf.config(group="nova", enable_anti_affinity=False)
def test_get_cert_backup_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_get_cert_bogus_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', 'BOGUS_ROLE')
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires))
def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver):
self.conf.config(group="nova", enable_anti_affinity=True)
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(3, len(amp_flow.requires))
self.conf.config(group="nova", enable_anti_affinity=False)
def test_get_delete_amphora_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_delete_amphora_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires))
def test_allocate_amp_to_lb_decider(self, mock_get_net_driver):
history = mock.MagicMock()
values = mock.MagicMock(side_effect=[['TEST'], [None]])
history.values = values
result = self.AmpFlow._allocate_amp_to_lb_decider(history)
self.assertTrue(result)
result = self.AmpFlow._allocate_amp_to_lb_decider(history)
self.assertFalse(result)
def test_create_new_amp_for_lb_decider(self, mock_get_net_driver):
history = mock.MagicMock()
values = mock.MagicMock(side_effect=[[None], ['TEST']])
history.values = values
result = self.AmpFlow._create_new_amp_for_lb_decider(history)
self.assertTrue(result)
result = self.AmpFlow._create_new_amp_for_lb_decider(history)
self.assertFalse(result)
def test_get_failover_flow_allocated(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_failover_flow(
load_balancer=self.lb)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(3, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow(
role=constants.ROLE_MASTER, load_balancer=self.lb)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(3, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow(
role=constants.ROLE_BACKUP, load_balancer=self.lb)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(3, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow(
role='BOGUSROLE', load_balancer=self.lb)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(3, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
def test_get_failover_flow_spare(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_failover_flow()
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(0, len(amp_flow.provides))
def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow()
self.assertIsInstance(amp_rotate_flow, flow.Flow)
self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides)
self.assertIn(constants.AMPHORA, amp_rotate_flow.requires)
self.assertEqual(1, len(amp_rotate_flow.provides))
self.assertEqual(2, len(amp_rotate_flow.requires))
def test_get_vrrp_subflow(self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123')
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.LOADBALANCER, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires)
self.assertEqual(1, len(vrrp_subflow.provides))
self.assertEqual(1, len(vrrp_subflow.requires))
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows()
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_MASTER)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_BACKUP)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', constants.ROLE_STANDALONE)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
'SOMEPREFIX', 'BOGUS_ROLE')
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires))
| 40.228501
| 76
| 0.72418
| 2,098
| 16,373
| 5.368923
| 0.089609
| 0.107511
| 0.188299
| 0.151811
| 0.875533
| 0.855114
| 0.826527
| 0.814719
| 0.810458
| 0.803001
| 0
| 0.005073
| 0.181274
| 16,373
| 406
| 77
| 40.327586
| 0.835211
| 0.041104
| 0
| 0.78022
| 0
| 0
| 0.016196
| 0.004208
| 0
| 0
| 0
| 0
| 0.615385
| 1
| 0.065934
| false
| 0
| 0.029304
| 0
| 0.098901
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5d6effb309d67c24548bb265b65c8bc7b29ba6ff
| 104
|
py
|
Python
|
src/assemblyline/tests/__init__.py
|
eventbrite/django-assemblyline
|
3f4e0524b54ea5d840f6989abc89613abcded575
|
[
"MIT"
] | 1
|
2016-05-23T15:11:58.000Z
|
2016-05-23T15:11:58.000Z
|
src/assemblyline/tests/__init__.py
|
mscheibe/django-assemblyline
|
170db91f43ac915d4c671e2fc342a60df5cc3b35
|
[
"MIT"
] | null | null | null |
src/assemblyline/tests/__init__.py
|
mscheibe/django-assemblyline
|
170db91f43ac915d4c671e2fc342a60df5cc3b35
|
[
"MIT"
] | 2
|
2016-08-14T07:15:43.000Z
|
2021-09-08T11:57:38.000Z
|
from assemblyline.tests.test_factories import *
from assemblyline.tests.test_flat_page_factory import *
| 34.666667
| 55
| 0.865385
| 14
| 104
| 6.142857
| 0.642857
| 0.372093
| 0.488372
| 0.581395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 104
| 2
| 56
| 52
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
53d7a12e57fb22871806141e2929ce61072342c6
| 22,223
|
py
|
Python
|
google/cloud/aiplatform_v1/services/job_service/pagers.py
|
geraint0923/python-aiplatform
|
f40f32289e1fbeb93b35e4b66f65d15528a6481c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/job_service/pagers.py
|
geraint0923/python-aiplatform
|
f40f32289e1fbeb93b35e4b66f65d15528a6481c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1/services/job_service/pagers.py
|
geraint0923/python-aiplatform
|
f40f32289e1fbeb93b35e4b66f65d15528a6481c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.aiplatform_v1.types import batch_prediction_job
from google.cloud.aiplatform_v1.types import custom_job
from google.cloud.aiplatform_v1.types import data_labeling_job
from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1.types import job_service
class ListCustomJobsPager:
"""A pager for iterating through ``list_custom_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and
provides an ``__iter__`` method to iterate through its
``custom_jobs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListCustomJobs`` requests and continue to iterate
through the ``custom_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., job_service.ListCustomJobsResponse],
request: job_service.ListCustomJobsRequest,
response: job_service.ListCustomJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListCustomJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[job_service.ListCustomJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[custom_job.CustomJob]:
for page in self.pages:
yield from page.custom_jobs
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListCustomJobsAsyncPager:
"""A pager for iterating through ``list_custom_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``custom_jobs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListCustomJobs`` requests and continue to iterate
through the ``custom_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]],
request: job_service.ListCustomJobsRequest,
response: job_service.ListCustomJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListCustomJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]:
async def async_generator():
async for page in self.pages:
for response in page.custom_jobs:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListDataLabelingJobsPager:
"""A pager for iterating through ``list_data_labeling_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and
provides an ``__iter__`` method to iterate through its
``data_labeling_jobs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDataLabelingJobs`` requests and continue to iterate
through the ``data_labeling_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., job_service.ListDataLabelingJobsResponse],
request: job_service.ListDataLabelingJobsRequest,
response: job_service.ListDataLabelingJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListDataLabelingJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]:
for page in self.pages:
yield from page.data_labeling_jobs
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListDataLabelingJobsAsyncPager:
"""A pager for iterating through ``list_data_labeling_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``data_labeling_jobs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDataLabelingJobs`` requests and continue to iterate
through the ``data_labeling_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]],
request: job_service.ListDataLabelingJobsRequest,
response: job_service.ListDataLabelingJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListDataLabelingJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]:
async def async_generator():
async for page in self.pages:
for response in page.data_labeling_jobs:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListHyperparameterTuningJobsPager:
"""A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and
provides an ``__iter__`` method to iterate through its
``hyperparameter_tuning_jobs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListHyperparameterTuningJobs`` requests and continue to iterate
through the ``hyperparameter_tuning_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., job_service.ListHyperparameterTuningJobsResponse],
request: job_service.ListHyperparameterTuningJobsRequest,
response: job_service.ListHyperparameterTuningJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListHyperparameterTuningJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]:
for page in self.pages:
yield from page.hyperparameter_tuning_jobs
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListHyperparameterTuningJobsAsyncPager:
"""A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``hyperparameter_tuning_jobs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListHyperparameterTuningJobs`` requests and continue to iterate
through the ``hyperparameter_tuning_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]
],
request: job_service.ListHyperparameterTuningJobsRequest,
response: job_service.ListHyperparameterTuningJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListHyperparameterTuningJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(
self,
) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]:
async def async_generator():
async for page in self.pages:
for response in page.hyperparameter_tuning_jobs:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBatchPredictionJobsPager:
"""A pager for iterating through ``list_batch_prediction_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and
provides an ``__iter__`` method to iterate through its
``batch_prediction_jobs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBatchPredictionJobs`` requests and continue to iterate
through the ``batch_prediction_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., job_service.ListBatchPredictionJobsResponse],
request: job_service.ListBatchPredictionJobsRequest,
response: job_service.ListBatchPredictionJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListBatchPredictionJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]:
for page in self.pages:
yield from page.batch_prediction_jobs
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBatchPredictionJobsAsyncPager:
"""A pager for iterating through ``list_batch_prediction_jobs`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``batch_prediction_jobs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBatchPredictionJobs`` requests and continue to iterate
through the ``batch_prediction_jobs`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]],
request: job_service.ListBatchPredictionJobsRequest,
response: job_service.ListBatchPredictionJobsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = job_service.ListBatchPredictionJobsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]:
async def async_generator():
async for page in self.pages:
for response in page.batch_prediction_jobs:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 40.405455
| 96
| 0.686856
| 2,382
| 22,223
| 6.161629
| 0.080605
| 0.052327
| 0.05294
| 0.057982
| 0.918921
| 0.918921
| 0.918921
| 0.916332
| 0.896709
| 0.896709
| 0
| 0.003643
| 0.234217
| 22,223
| 549
| 97
| 40.479053
| 0.858797
| 0.456284
| 0
| 0.763052
| 0
| 0
| 0.007307
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144578
| false
| 0
| 0.024096
| 0.064257
| 0.281125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53ffe132e4e8c4ab815d6afa27919ba238e543fa
| 16,878
|
py
|
Python
|
RecoJets/JetProducers/python/PileupJetIDCutParams_cfi.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 2
|
2020-01-21T11:23:39.000Z
|
2020-01-21T11:23:42.000Z
|
RecoJets/JetProducers/python/PileupJetIDCutParams_cfi.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 26
|
2018-10-30T12:47:58.000Z
|
2022-03-29T08:39:00.000Z
|
RecoJets/JetProducers/python/PileupJetIDCutParams_cfi.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 3
|
2017-06-07T15:22:28.000Z
|
2019-02-28T20:48:30.000Z
|
import FWCore.ParameterSet.Config as cms
###########################################################
## Working points for the 81X training (completed in 80X with variable fixes)
###########################################################
full_81x_chs_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble( 0.69, -0.35, -0.26, -0.21),
Pt1020_Tight = cms.vdouble( 0.69, -0.35, -0.26, -0.21),
Pt2030_Tight = cms.vdouble( 0.69, -0.35, -0.26, -0.21),
Pt3050_Tight = cms.vdouble( 0.86, -0.10, -0.05, -0.01),
#Medium Id
Pt010_Medium = cms.vdouble( 0.18, -0.55, -0.42, -0.36),
Pt1020_Medium = cms.vdouble( 0.18, -0.55, -0.42, -0.36),
Pt2030_Medium = cms.vdouble( 0.18, -0.55, -0.42, -0.36),
Pt3050_Medium = cms.vdouble( 0.61, -0.35, -0.23, -0.17),
#Loose Id
Pt010_Loose = cms.vdouble(-0.97, -0.68, -0.53, -0.47),
Pt1020_Loose = cms.vdouble(-0.97, -0.68, -0.53, -0.47),
Pt2030_Loose = cms.vdouble(-0.97, -0.68, -0.53, -0.47),
Pt3050_Loose = cms.vdouble(-0.89, -0.52, -0.38, -0.30)
)
###########################################################
## Working points for the 102X training
###########################################################
full_102x_chs_wp = full_81x_chs_wp.clone()
###########################################################
## Working points for the 94X training
###########################################################
full_94x_chs_wp = full_81x_chs_wp.clone()
###########################################################
## Working points for the 80X training
###########################################################
full_80x_chs_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble( 0.26, -0.34, -0.24, -0.26),
Pt1020_Tight = cms.vdouble( 0.26, -0.34, -0.24, -0.26),
Pt2030_Tight = cms.vdouble( 0.26, -0.34, -0.24, -0.26),
Pt3050_Tight = cms.vdouble( 0.62, -0.21, -0.07, -0.03),
#Medium Id
Pt010_Medium = cms.vdouble(-0.49, -0.53, -0.44, -0.42),
Pt1020_Medium = cms.vdouble(-0.49, -0.53, -0.44, -0.42),
Pt2030_Medium = cms.vdouble(-0.49, -0.53, -0.44, -0.42),
Pt3050_Medium = cms.vdouble(-0.06, -0.42, -0.3 , -0.23),
#Loose Id
Pt010_Loose = cms.vdouble(-0.96, -0.64, -0.56, -0.54),
Pt1020_Loose = cms.vdouble(-0.96, -0.64, -0.56, -0.54),
Pt2030_Loose = cms.vdouble(-0.96, -0.64, -0.56, -0.54),
Pt3050_Loose = cms.vdouble(-0.92, -0.56, -0.44, -0.39)
)
###########################################################
## Working points for the 76X training
###########################################################
full_76x_chs_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(0.09,-0.37,-0.24,-0.21),
Pt1020_Tight = cms.vdouble(0.09,-0.37,-0.24,-0.21),
Pt2030_Tight = cms.vdouble(0.09,-0.37,-0.24,-0.21),
Pt3050_Tight = cms.vdouble(0.52,-0.19,-0.06,-0.03),
#Medium Id
Pt010_Medium = cms.vdouble(-0.58,-0.52,-0.40,-0.36),
Pt1020_Medium = cms.vdouble(-0.58,-0.52,-0.40,-0.36),
Pt2030_Medium = cms.vdouble(-0.58,-0.52,-0.40,-0.36),
Pt3050_Medium = cms.vdouble(-0.20,-0.39,-0.24,-0.19),
#Loose Id
Pt010_Loose = cms.vdouble(-0.96,-0.62,-0.53,-0.49),
Pt1020_Loose = cms.vdouble(-0.96,-0.62,-0.53,-0.49),
Pt2030_Loose = cms.vdouble(-0.96,-0.62,-0.53,-0.49),
Pt3050_Loose = cms.vdouble(-0.93,-0.52,-0.39,-0.31)
)
###########################################################
## Working points for the 74X training
###########################################################
full_74x_chs_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.1,-0.83,-0.83,-0.98),
Pt1020_Tight = cms.vdouble(-0.1,-0.83,-0.83,-0.98),
Pt2030_Tight = cms.vdouble(-0.1,-0.83,-0.83,-0.98),
Pt3050_Tight = cms.vdouble(-0.5,-0.77,-0.80,-0.98),
#Medium Id
Pt010_Medium = cms.vdouble(-0.3,-0.87,-0.87,-0.99),
Pt1020_Medium = cms.vdouble(-0.3,-0.87,-0.87,-0.99),
Pt2030_Medium = cms.vdouble(-0.3,-0.87,-0.87,-0.99),
Pt3050_Medium = cms.vdouble(-0.6,-0.85,-0.85,-0.99),
#Loose Id
Pt010_Loose = cms.vdouble(-0.8,-0.97,-0.97,-0.99),
Pt1020_Loose = cms.vdouble(-0.8,-0.97,-0.97,-0.99),
Pt2030_Loose = cms.vdouble(-0.8,-0.97,-0.97,-0.99),
Pt3050_Loose = cms.vdouble(-0.8,-0.95,-0.97,-0.99)
)
###########################################################
## Working points for the 53X training/New Met Dec 21, 2012
###########################################################
full_53x_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.83,-0.81,-0.74,-0.81),
Pt1020_Tight = cms.vdouble(-0.83,-0.81,-0.74,-0.81),
Pt2030_Tight = cms.vdouble( 0.73, 0.05,-0.26,-0.42),
Pt3050_Tight = cms.vdouble( 0.73, 0.05,-0.26,-0.42),
#Medium Id
Pt010_Medium = cms.vdouble(-0.83,-0.92,-0.90,-0.92),
Pt1020_Medium = cms.vdouble(-0.83,-0.92,-0.90,-0.92),
Pt2030_Medium = cms.vdouble( 0.10,-0.36,-0.54,-0.54),
Pt3050_Medium = cms.vdouble( 0.10,-0.36,-0.54,-0.54),
#Loose Id
Pt010_Loose = cms.vdouble(-0.95,-0.96,-0.94,-0.95),
Pt1020_Loose = cms.vdouble(-0.95,-0.96,-0.94,-0.95),
Pt2030_Loose = cms.vdouble(-0.63,-0.60,-0.55,-0.45),
Pt3050_Loose = cms.vdouble(-0.63,-0.60,-0.55,-0.45),
#MET
Pt010_MET = cms.vdouble( 0. ,-0.6,-0.4,-0.4),
Pt1020_MET = cms.vdouble( 0.3 ,-0.2,-0.4,-0.4),
Pt2030_MET = cms.vdouble( 0. , 0. , 0. , 0. ),
Pt3050_MET = cms.vdouble( 0. , 0. ,-0.1,-0.2)
)
full_53x_chs_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.83,-0.81,-0.74,-0.81),
Pt1020_Tight = cms.vdouble(-0.83,-0.81,-0.74,-0.81),
Pt2030_Tight = cms.vdouble( 0.78, 0.50, 0.17, 0.17),
Pt3050_Tight = cms.vdouble( 0.78, 0.50, 0.17, 0.17),
#Medium Id
Pt010_Medium = cms.vdouble(-0.83,-0.92,-0.90,-0.92),
Pt1020_Medium = cms.vdouble(-0.83,-0.92,-0.90,-0.92),
Pt2030_Medium = cms.vdouble(-0.07,-0.09, 0.00,-0.06),
Pt3050_Medium = cms.vdouble(-0.07,-0.09, 0.00,-0.06),
#Loose Id
Pt010_Loose = cms.vdouble(-0.95,-0.96,-0.94,-0.95),
Pt1020_Loose = cms.vdouble(-0.95,-0.96,-0.94,-0.95),
Pt2030_Loose = cms.vdouble(-0.15,-0.26,-0.16,-0.16),
Pt3050_Loose = cms.vdouble(-0.15,-0.26,-0.16,-0.16),
)
met_53x_wp = cms.PSet(
#Tight Id
Pt010_Tight = cms.vdouble(-2, -2, -2, -2, -2),
Pt1020_Tight = cms.vdouble(-2, -2, -2, -2, -2),
Pt2030_Tight = cms.vdouble(-2, -2, -2, -2, -2),
Pt3050_Tight = cms.vdouble(-2, -2, -2, -2, -2),
#Medium Id
Pt010_Medium = cms.vdouble(-2, -2, -2, -2, -2),
Pt1020_Medium = cms.vdouble(-2, -2, -2, -2, -2),
Pt2030_Medium = cms.vdouble(-2, -2, -2, -2, -2),
Pt3050_Medium = cms.vdouble(-2, -2, -2, -2, -2),
#Loose Id
Pt010_Loose = cms.vdouble(-2, -2, -2, -2, -2),
Pt1020_Loose = cms.vdouble(-2, -2, -2, -2, -2),
Pt2030_Loose = cms.vdouble(-2, -2, -2, -2, -2),
Pt3050_Loose = cms.vdouble(-2, -2, -2, -2, -2),
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#MET
Pt010_MET = cms.vdouble(-0.2 ,-0.3,-0.5,-0.5),
Pt1020_MET = cms.vdouble(-0.2 ,-0.2,-0.5,-0.3),
Pt2030_MET = cms.vdouble(-0.2 ,-0.2,-0.2, 0.1),
Pt3050_MET = cms.vdouble(-0.2 ,-0.2, 0. , 0.2)
)
metfull_53x_wp = cms.PSet(
#MET
Pt010_MET = cms.vdouble(-0.2 ,-0.3,-0.5,-0.5),
Pt1020_MET = cms.vdouble(-0.2 ,-0.2,-0.5,-0.3),
Pt2030_MET = cms.vdouble( 0. , 0. , 0. , 0. ),
Pt3050_MET = cms.vdouble( 0. , 0. ,-0.1,-0.2)
)
###########################################################
## Working points for the 5X training
###########################################################
full_5x_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.47,-0.92,-0.92,-0.94),
Pt1020_Tight = cms.vdouble(-0.47,-0.92,-0.92,-0.94),
Pt2030_Tight = cms.vdouble(+0.32,-0.49,-0.61,-0.74),
Pt3050_Tight = cms.vdouble(+0.32,-0.49,-0.61,-0.74),
#Medium Id
Pt010_Medium = cms.vdouble(-0.83,-0.96,-0.95,-0.96),
Pt1020_Medium = cms.vdouble(-0.83,-0.96,-0.95,-0.96),
Pt2030_Medium = cms.vdouble(-0.40,-0.74,-0.76,-0.81),
Pt3050_Medium = cms.vdouble(-0.40,-0.74,-0.76,-0.81),
#Loose Id
Pt010_Loose = cms.vdouble(-0.95,-0.97,-0.97,-0.97),
Pt1020_Loose = cms.vdouble(-0.95,-0.97,-0.97,-0.97),
Pt2030_Loose = cms.vdouble(-0.80,-0.85,-0.84,-0.85),
Pt3050_Loose = cms.vdouble(-0.80,-0.85,-0.84,-0.85)
)
simple_5x_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.54,-0.93,-0.93,-0.94),
Pt1020_Tight = cms.vdouble(-0.54,-0.93,-0.93,-0.94),
Pt2030_Tight = cms.vdouble(+0.26,-0.54,-0.63,-0.74),
Pt3050_Tight = cms.vdouble(+0.26,-0.54,-0.63,-0.74),
#Medium Id
Pt010_Medium = cms.vdouble(-0.85,-0.96,-0.95,-0.96),
Pt1020_Medium = cms.vdouble(-0.85,-0.96,-0.95,-0.96),
Pt2030_Medium = cms.vdouble(-0.40,-0.73,-0.74,-0.80),
Pt3050_Medium = cms.vdouble(-0.40,-0.73,-0.74,-0.80),
#Loose Id
Pt010_Loose = cms.vdouble(-0.95,-0.97,-0.96,-0.97),
Pt1020_Loose = cms.vdouble(-0.95,-0.97,-0.96,-0.97),
Pt2030_Loose = cms.vdouble(-0.80,-0.86,-0.80,-0.84),
Pt3050_Loose = cms.vdouble(-0.80,-0.86,-0.80,-0.84)
)
###########################################################
## Working points for the 5X_CHS training
###########################################################
full_5x_chs_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.59,-0.75,-0.78,-0.80),
Pt1020_Tight = cms.vdouble(-0.59,-0.75,-0.78,-0.80),
Pt2030_Tight = cms.vdouble(+0.41,-0.10,-0.20,-0.45),
Pt3050_Tight = cms.vdouble(+0.41,-0.10,-0.20,-0.45),
#Medium Id
Pt010_Medium = cms.vdouble(-0.94,-0.91,-0.91,-0.92),
Pt1020_Medium = cms.vdouble(-0.94,-0.91,-0.91,-0.92),
Pt2030_Medium = cms.vdouble(-0.58,-0.65,-0.57,-0.67),
Pt3050_Medium = cms.vdouble(-0.58,-0.65,-0.57,-0.67),
#Loose Id
Pt010_Loose = cms.vdouble(-0.98,-0.95,-0.94,-0.94),
Pt1020_Loose = cms.vdouble(-0.98,-0.95,-0.94,-0.94),
Pt2030_Loose = cms.vdouble(-0.89,-0.77,-0.69,-0.75),
Pt3050_Loose = cms.vdouble(-0.89,-0.77,-0.69,-0.57)
)
simple_5x_chs_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.60,-0.74,-0.78,-0.81),
Pt1020_Tight = cms.vdouble(-0.60,-0.74,-0.78,-0.81),
Pt2030_Tight = cms.vdouble(-0.47,-0.06,-0.23,-0.47),
Pt3050_Tight = cms.vdouble(-0.47,-0.06,-0.23,-0.47),
#Medium Id
Pt010_Medium = cms.vdouble(-0.95,-0.94,-0.92,-0.91),
Pt1020_Medium = cms.vdouble(-0.95,-0.94,-0.92,-0.91),
Pt2030_Medium = cms.vdouble(-0.59,-0.65,-0.56,-0.68),
Pt3050_Medium = cms.vdouble(-0.59,-0.65,-0.56,-0.68),
#Loose Id
Pt010_Loose = cms.vdouble(-0.98,-0.96,-0.94,-0.94),
Pt1020_Loose = cms.vdouble(-0.98,-0.96,-0.94,-0.94),
Pt2030_Loose = cms.vdouble(-0.89,-0.75,-0.72,-0.75),
Pt3050_Loose = cms.vdouble(-0.89,-0.75,-0.72,-0.75)
)
###########################################################
## Working points for the 4X training
###########################################################
PuJetIdOptMVA_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.5,-0.2,-0.83,-0.7),
Pt1020_Tight = cms.vdouble(-0.5,-0.2,-0.83,-0.7),
Pt2030_Tight = cms.vdouble(-0.2, 0., 0., 0.),
Pt3050_Tight = cms.vdouble(-0.2, 0., 0., 0.),
#Medium Id
Pt010_Medium = cms.vdouble(-0.73,-0.89,-0.89,-0.83),
Pt1020_Medium = cms.vdouble(-0.73,-0.89,-0.89,-0.83),
Pt2030_Medium = cms.vdouble(0.1, -0.4, -0.4, -0.45),
Pt3050_Medium = cms.vdouble(0.1, -0.4, -0.4, -0.45),
#Loose Id
Pt010_Loose = cms.vdouble(-0.9,-0.9, -0.9,-0.9),
Pt1020_Loose = cms.vdouble(-0.9,-0.9, -0.9,-0.9),
Pt2030_Loose = cms.vdouble(-0.4,-0.85,-0.7,-0.6),
Pt3050_Loose = cms.vdouble(-0.4,-0.85,-0.7,-0.6)
)
PuJetIdMinMVA_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-0.5,-0.2,-0.83,-0.7),
Pt1020_Tight = cms.vdouble(-0.5,-0.2,-0.83,-0.7),
Pt2030_Tight = cms.vdouble(-0.2, 0., 0., 0.),
Pt3050_Tight = cms.vdouble(-0.2, 0., 0., 0.),
#Medium Id
Pt010_Medium = cms.vdouble(-0.73,-0.89,-0.89,-0.83),
Pt1020_Medium = cms.vdouble(-0.73,-0.89,-0.89,-0.83),
Pt2030_Medium = cms.vdouble(0.1, -0.4, -0.5, -0.45),
Pt3050_Medium = cms.vdouble(0.1, -0.4, -0.5, -0.45),
#Loose Id
Pt010_Loose = cms.vdouble(-0.9,-0.9, -0.94,-0.9),
Pt1020_Loose = cms.vdouble(-0.9,-0.9, -0.94,-0.9),
Pt2030_Loose = cms.vdouble(-0.4,-0.85,-0.7,-0.6),
Pt3050_Loose = cms.vdouble(-0.4,-0.85,-0.7,-0.6)
)
EmptyJetIdParams = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble(-999.,-999.,-999.,-999.),
Pt1020_Tight = cms.vdouble(-999.,-999.,-999.,-999.),
Pt2030_Tight = cms.vdouble(-999.,-999.,-999.,-999.),
Pt3050_Tight = cms.vdouble(-999.,-999.,-999.,-999.),
#Medium Id
Pt010_Medium = cms.vdouble(-999.,-999.,-999.,-999.),
Pt1020_Medium = cms.vdouble(-999.,-999.,-999.,-999.),
Pt2030_Medium = cms.vdouble(-999.,-999.,-999.,-999.),
Pt3050_Medium = cms.vdouble(-999.,-999.,-999.,-999.),
#Loose Id
Pt010_Loose = cms.vdouble(-999.,-999.,-999.,-999.),
Pt1020_Loose = cms.vdouble(-999.,-999.,-999.,-999.),
Pt2030_Loose = cms.vdouble(-999.,-999.,-999.,-999.),
Pt3050_Loose = cms.vdouble(-999.,-999.,-999.,-999.)
)
PuJetIdCutBased_wp = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#betaStarClassic/log(nvtx-0.64) Values
#Tight Id
Pt010_BetaStarTight = cms.vdouble( 0.15, 0.15, 999., 999.),
Pt1020_BetaStarTight = cms.vdouble( 0.15, 0.15, 999., 999.),
Pt2030_BetaStarTight = cms.vdouble( 0.15, 0.15, 999., 999.),
Pt3050_BetaStarTight = cms.vdouble( 0.15, 0.15, 999., 999.),
#Medium Id => Daniele
Pt010_BetaStarMedium = cms.vdouble( 0.2, 0.3, 999., 999.),
Pt1020_BetaStarMedium = cms.vdouble( 0.2, 0.3, 999., 999.),
Pt2030_BetaStarMedium = cms.vdouble( 0.2, 0.3, 999., 999.),
Pt3050_BetaStarMedium = cms.vdouble( 0.2, 0.3, 999., 999.),
#Loose Id
Pt010_BetaStarLoose = cms.vdouble( 0.2, 0.3, 999., 999.),
Pt1020_BetaStarLoose = cms.vdouble( 0.2, 0.3, 999., 999.),
Pt2030_BetaStarLoose = cms.vdouble( 0.2, 0.3, 999., 999.),
Pt3050_BetaStarLoose = cms.vdouble( 0.2, 0.3, 999., 999.),
#RMS variable
#Tight Id
Pt010_RMSTight = cms.vdouble( 0.06, 0.07, 0.04, 0.05),
Pt1020_RMSTight = cms.vdouble( 0.06, 0.07, 0.04, 0.05),
Pt2030_RMSTight = cms.vdouble( 0.05, 0.07, 0.03, 0.045),
Pt3050_RMSTight = cms.vdouble( 0.05, 0.06, 0.03, 0.04),
#Medium Id => Daniele
Pt010_RMSMedium = cms.vdouble( 0.06, 0.03, 0.03, 0.04),
Pt1020_RMSMedium = cms.vdouble( 0.06, 0.03, 0.03, 0.04),
Pt2030_RMSMedium = cms.vdouble( 0.06, 0.03, 0.03, 0.04),
Pt3050_RMSMedium = cms.vdouble( 0.06, 0.03, 0.03, 0.04),
#Loose Id
Pt010_RMSLoose = cms.vdouble( 0.06, 0.05, 0.05, 0.07),
Pt1020_RMSLoose = cms.vdouble( 0.06, 0.05, 0.05, 0.07),
Pt2030_RMSLoose = cms.vdouble( 0.06, 0.05, 0.05, 0.055),
Pt3050_RMSLoose = cms.vdouble( 0.06, 0.05, 0.05, 0.055)
)
JetIdParams = cms.PSet(
#4 Eta Categories 0-2.5 2.5-2.75 2.75-3.0 3.0-5.0
#Tight Id
Pt010_Tight = cms.vdouble( 0.5,0.6,0.6,0.9),
Pt1020_Tight = cms.vdouble(-0.2,0.2,0.2,0.6),
Pt2030_Tight = cms.vdouble( 0.3,0.4,0.7,0.8),
Pt3050_Tight = cms.vdouble( 0.5,0.4,0.8,0.9),
#Medium Id
Pt010_Medium = cms.vdouble( 0.2,0.4,0.2,0.6),
Pt1020_Medium = cms.vdouble(-0.3,0. ,0. ,0.5),
Pt2030_Medium = cms.vdouble( 0.2,0.2,0.5,0.7),
Pt3050_Medium = cms.vdouble( 0.3,0.2,0.7,0.8),
#Loose Id
Pt010_Loose = cms.vdouble( 0. , 0. , 0. ,0.2),
Pt1020_Loose = cms.vdouble(-0.4,-0.4,-0.4,0.4),
Pt2030_Loose = cms.vdouble( 0. , 0. , 0.2,0.6),
Pt3050_Loose = cms.vdouble( 0. , 0. , 0.6,0.2)
)
| 38.889401
| 77
| 0.527669
| 3,033
| 16,878
| 2.849654
| 0.051105
| 0.249913
| 0.24436
| 0.096263
| 0.9042
| 0.85711
| 0.832697
| 0.727294
| 0.690732
| 0.619461
| 0
| 0.251959
| 0.19084
| 16,878
| 433
| 78
| 38.979215
| 0.380904
| 0.101315
| 0
| 0.142292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003953
| 0
| 0.003953
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0725a7d94dc62a6935f2976549b4fff79860e1fd
| 169
|
py
|
Python
|
teste.py
|
dom8891/Projeto_LMS_DevOps
|
d7a881811c76bbac197ecca5a6da17f041c12646
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
dom8891/Projeto_LMS_DevOps
|
d7a881811c76bbac197ecca5a6da17f041c12646
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
dom8891/Projeto_LMS_DevOps
|
d7a881811c76bbac197ecca5a6da17f041c12646
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from principal import somar
from principal import subtrair
def teste_somar():
assert somar(2,4)==6
def teste_subtrair():
assert subtrair(9,5)==4
| 18.777778
| 30
| 0.739645
| 26
| 169
| 4.730769
| 0.538462
| 0.211382
| 0.308943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 0.16568
| 169
| 9
| 31
| 18.777778
| 0.829787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| true
| 0
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
074013e3298771c50234a6783fe3c3913a47e310
| 106
|
py
|
Python
|
modules/dynamodb_simple_client_api/installer/module_dynamodb_lambdas/__init__.py
|
groboclown/whimbrel
|
1968cccf4888ef893686a812ed729205a31d2a12
|
[
"Apache-2.0"
] | null | null | null |
modules/dynamodb_simple_client_api/installer/module_dynamodb_lambdas/__init__.py
|
groboclown/whimbrel
|
1968cccf4888ef893686a812ed729205a31d2a12
|
[
"Apache-2.0"
] | null | null | null |
modules/dynamodb_simple_client_api/installer/module_dynamodb_lambdas/__init__.py
|
groboclown/whimbrel
|
1968cccf4888ef893686a812ed729205a31d2a12
|
[
"Apache-2.0"
] | null | null | null |
from .schema import DYNAMODB_LAMBDAS_DB_TABLES
def get_schema():
return DYNAMODB_LAMBDAS_DB_TABLES
| 15.142857
| 46
| 0.820755
| 15
| 106
| 5.333333
| 0.666667
| 0.375
| 0.425
| 0.575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141509
| 106
| 6
| 47
| 17.666667
| 0.879121
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 9
|
4adaacd3888dd3f9133bf8909db3d92df5b14bda
| 2,802
|
py
|
Python
|
tests/meta_hooks/check_useless_excludes_test.py
|
ashanbrown/pre-commit
|
6bc7b91dd10d0b3ffda14736b65f46f3c578bb2f
|
[
"MIT"
] | null | null | null |
tests/meta_hooks/check_useless_excludes_test.py
|
ashanbrown/pre-commit
|
6bc7b91dd10d0b3ffda14736b65f46f3c578bb2f
|
[
"MIT"
] | null | null | null |
tests/meta_hooks/check_useless_excludes_test.py
|
ashanbrown/pre-commit
|
6bc7b91dd10d0b3ffda14736b65f46f3c578bb2f
|
[
"MIT"
] | null | null | null |
from pre_commit.meta_hooks import check_useless_excludes
from testing.fixtures import add_config_to_repo
def test_useless_exclude_global(capsys, in_git_dir):
config = {
'exclude': 'foo',
'repos': [
{
'repo': 'meta',
'hooks': [{'id': 'check-useless-excludes'}],
},
],
}
add_config_to_repo(in_git_dir.strpath, config)
assert check_useless_excludes.main(()) == 1
out, _ = capsys.readouterr()
out = out.strip()
assert "The global exclude pattern 'foo' does not match any files" == out
def test_useless_exclude_for_hook(capsys, in_git_dir):
config = {
'repos': [
{
'repo': 'meta',
'hooks': [{'id': 'check-useless-excludes', 'exclude': 'foo'}],
},
],
}
add_config_to_repo(in_git_dir.strpath, config)
assert check_useless_excludes.main(()) == 1
out, _ = capsys.readouterr()
out = out.strip()
expected = (
"The exclude pattern 'foo' for check-useless-excludes "
"does not match any files"
)
assert expected == out
def test_useless_exclude_with_types_filter(capsys, in_git_dir):
config = {
'repos': [
{
'repo': 'meta',
'hooks': [
{
'id': 'check-useless-excludes',
'exclude': '.pre-commit-config.yaml',
'types': ['python'],
},
],
},
],
}
add_config_to_repo(in_git_dir.strpath, config)
assert check_useless_excludes.main(()) == 1
out, _ = capsys.readouterr()
out = out.strip()
expected = (
"The exclude pattern '.pre-commit-config.yaml' for "
"check-useless-excludes does not match any files"
)
assert expected == out
def test_no_excludes(capsys, in_git_dir):
config = {
'repos': [
{
'repo': 'meta',
'hooks': [{'id': 'check-useless-excludes'}],
},
],
}
add_config_to_repo(in_git_dir.strpath, config)
assert check_useless_excludes.main(()) == 0
out, _ = capsys.readouterr()
assert out == ''
def test_valid_exclude(capsys, in_git_dir):
config = {
'repos': [
{
'repo': 'meta',
'hooks': [
{
'id': 'check-useless-excludes',
'exclude': '.pre-commit-config.yaml',
},
],
},
],
}
add_config_to_repo(in_git_dir.strpath, config)
assert check_useless_excludes.main(()) == 0
out, _ = capsys.readouterr()
assert out == ''
| 24.155172
| 78
| 0.499286
| 276
| 2,802
| 4.804348
| 0.184783
| 0.117647
| 0.196078
| 0.067873
| 0.815988
| 0.757164
| 0.757164
| 0.757164
| 0.757164
| 0.757164
| 0
| 0.002831
| 0.369736
| 2,802
| 115
| 79
| 24.365217
| 0.748018
| 0
| 0
| 0.6
| 0
| 0
| 0.189864
| 0.0803
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.055556
| false
| 0
| 0.022222
| 0
| 0.077778
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ae2acf477c0cd0769e7994b6cae82dc51e2fa34
| 178
|
py
|
Python
|
tests/basics/builtin_bin.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | 7
|
2019-10-18T13:41:39.000Z
|
2022-03-15T17:27:57.000Z
|
tests/basics/builtin_bin.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | null | null | null |
tests/basics/builtin_bin.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | 2
|
2020-06-23T09:10:15.000Z
|
2020-12-22T06:42:14.000Z
|
# test builtin bin function
print(bin(1))
print(bin(-1))
print(bin(15))
print(bin(-15))
print(bin(12345))
print(bin(0b10101))
print(bin(0b10101010101010101010))
print("PASS")
| 13.692308
| 34
| 0.713483
| 27
| 178
| 4.703704
| 0.407407
| 0.440945
| 0.141732
| 0.220472
| 0.362205
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234568
| 0.089888
| 178
| 13
| 35
| 13.692308
| 0.549383
| 0.140449
| 0
| 0
| 0
| 0
| 0.026316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.125
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
|
0
| 7
|
ab3f745240b67c866572f33e892db18cf8e16ff6
| 146
|
py
|
Python
|
__init__.py
|
Fumipo-Theta/matpos
|
7a9aac0214867ca5f8ad308e39229368a9faee45
|
[
"BSD-2-Clause"
] | null | null | null |
__init__.py
|
Fumipo-Theta/matpos
|
7a9aac0214867ca5f8ad308e39229368a9faee45
|
[
"BSD-2-Clause"
] | null | null | null |
__init__.py
|
Fumipo-Theta/matpos
|
7a9aac0214867ca5f8ad308e39229368a9faee45
|
[
"BSD-2-Clause"
] | null | null | null |
"""
MatPos
FigureSizing
"""
from .matpos.matpos import Matpos
from .matpos.figure_sizing import FigureSizing
from .matpos.subgrid import Subgrid
| 16.222222
| 46
| 0.80137
| 18
| 146
| 6.444444
| 0.388889
| 0.258621
| 0.37931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116438
| 146
| 8
| 47
| 18.25
| 0.899225
| 0.130137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
db842d2242e764f0ffba37dfc4825ccb4fb93576
| 214
|
py
|
Python
|
muse_origin/__init__.py
|
musevlt/origin
|
91da9beaac96e9372865471dd87281a18f17749c
|
[
"MIT"
] | 1
|
2019-10-07T13:08:44.000Z
|
2019-10-07T13:08:44.000Z
|
muse_origin/__init__.py
|
musevlt/origin
|
91da9beaac96e9372865471dd87281a18f17749c
|
[
"MIT"
] | 6
|
2019-07-16T16:57:25.000Z
|
2021-02-05T00:28:18.000Z
|
muse_origin/__init__.py
|
musevlt/origin
|
91da9beaac96e9372865471dd87281a18f17749c
|
[
"MIT"
] | null | null | null |
from .lib_origin import * # noqa
from .origin import ORIGIN # noqa
from .source_creation import * # noqa
from .source_masks import * # noqa
from .steps import * # noqa
from .version import __version__ # noqa
| 30.571429
| 40
| 0.728972
| 29
| 214
| 5.137931
| 0.344828
| 0.268456
| 0.375839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196262
| 214
| 6
| 41
| 35.666667
| 0.866279
| 0.135514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
db88081f1208a8e8434c25655f4650bbc81a7fe9
| 20,127
|
py
|
Python
|
ILI/clustering_datasets.py
|
arodriguezca/DeepOutbreak
|
ee3de8aa4cab1abd5c3be2e85ed69bddc635cb6e
|
[
"MIT"
] | 1
|
2021-08-07T15:32:25.000Z
|
2021-08-07T15:32:25.000Z
|
ILI/clustering_datasets.py
|
arodriguezca/DeepOutbreak
|
ee3de8aa4cab1abd5c3be2e85ed69bddc635cb6e
|
[
"MIT"
] | null | null | null |
ILI/clustering_datasets.py
|
arodriguezca/DeepOutbreak
|
ee3de8aa4cab1abd5c3be2e85ed69bddc635cb6e
|
[
"MIT"
] | null | null | null |
'''
Note: These are not used for COVID
'''
import numpy as np
import math
import os
# useful for kdd epidepp
def load_mydata(length, first_year, data_region, path = './data'):
if data_region != 'X': # if not national region
str_arr = data_region.split('n')
data_region = str_arr[0]+'n '+str_arr[1]
input_file = os.path.join( path, 'ILINet.csv')
x = []
# indexed by region
all_data = {}
in_f = open(input_file)
in_f.readline()
in_f.readline()
for line in in_f:
raw = line.strip().split(',')
region = raw[1].strip()
year = int(raw[2].strip())
week = int(raw[3].strip())
## upto 20th week belongs to last years cycle
if(week <= 20):
year -= 1
infection = raw[4].strip()
inf = 0
if is_number(infection):
inf = float(infection)
if region not in all_data:
all_data[region]={}
if year not in all_data[region]:
all_data[region][year] = []
all_data[region][year].append(inf)
indexDic = {}
raw = all_data[data_region]
keylist = list(raw.keys())
keylist.sort()
for year in keylist:
# if year == 2003 or year == 2008: # these years have 53
# print(year, len(raw[year]))
if year>=first_year and len(raw[year]) >= 52: # it was ==52, but some seasons have 53 TODO: check if this is fine
indexDic[len(x)] = year
x.append(raw[year][0:length])
return np.array(x)
def load_RNNdata(length, first_year, data_region, path = './data'):
if data_region != 'X': # if not national region
str_arr = data_region.split('n')
data_region = str_arr[0]+'n '+str_arr[1]
input_file = os.path.join( path, 'ILINet.csv')
x = []
y = []
peak = []
peak_time = []
onset_time = []
baseline_file = open(os.path.join(path, 'baseline'))
cdc_baselines = {}
for line in baseline_file:
arr = line.strip().split()
#print(arr)
year = int(arr[0])
baseline = float(arr[1])
cdc_baselines[year] = baseline
# indexed by region
all_data = {}
in_f = open(input_file)
in_f.readline()
in_f.readline()
for line in in_f:
raw = line.strip().split(',')
region = raw[1].strip()
year = int(raw[2].strip())
week = int(raw[3].strip())
## upto 20th week belongs to last years cycle
if(week <= 20):
year -= 1
infection = raw[4].strip()
inf = 0
if is_number(infection):
inf = float(infection)
if region not in all_data:
all_data[region]={}
if year not in all_data[region]:
all_data[region][year] = []
all_data[region][year].append(inf)
indexDic = {}
raw = all_data[data_region]
keylist = list(raw.keys())
keylist.sort()
peak_time_vals = []
for year in keylist:
if year>=first_year and len(raw[year]) >= 52: # NOTE: same modification as in load_mydata()
indexDic[len(x)] = year
x.append(raw[year][0:length])
y.append(raw[year][length])
peak.append(max(raw[year]))
peak_time_val = (raw[year]).index(max(raw[year]))
peak_time_vec = [0]*52
if float(peak_time_val) > 13:
peak_time_val = 13
peak_time_vec[peak_time_val] = 1
peak_time_vals.append(peak_time_val)
peak_time.append(peak_time_vec) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
onset = -1
baseline_val = cdc_baselines[year]
for i in range(len(raw[year])-3):
trueVals = [raw[year][x]>=baseline_val for x in range(i,i+3)]
if all(trueVals):
onset = i
break
onset_vec = [0]*53
onset_vec[onset]= 1
onset_time.append(onset_vec) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
# -1 means no onset
x = np.array(x)
x = x[:, :,np.newaxis]
return x, np.array(y),np.array(peak),np.array(peak_time), np.array(onset_time), np.array(peak_time_vals)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def load_myRegionaldata(length, first_year, path = './data'):
import os
input_file = os.path.join( path, 'ILINetProcessed.csv')
clusters_file = open( os.path.join(path, 'SeasonClustersFinal'))
seasonDic = {}
allSeasons = {}
for line in clusters_file:
arr = line.strip().split()
year = int(arr[0])
season = int(arr[1])
seasonDic[year] = season
allSeasons[season] = True
all_data = read_ILINetProccessed(input_file)
indexDic = {}
data = {}
region_order = []
for region, raw in all_data.items():
region_order.append(region)
keylist = list(raw.keys())
keylist.sort()
x = []
y = []
for year in keylist:
if year>=first_year and len(raw[year]) == 52:
indexDic[len(x)] = year
x.append(raw[year][0:length])
y.append(seasonDic[year])
data[region] = [np.array(x), np.array(y)]
return data
def load_myRegionalRNNdata( length, first_year, path = './data'):
"""
This one returns labels in classification format
"""
import os
input_file = os.path.join( path, 'ILINetProcessed.csv')
clusters_file = open( os.path.join(path, 'SeasonClustersFinal'))
seasonDic = {}
allSeasons = {}
for line in clusters_file:
arr = line.strip().split()
year = int(arr[0])
season = int(arr[1])
seasonDic[year] = season
allSeasons[season] = True
baseline_file = open(os.path.join(path, 'wILI_Baseline.csv'))
cdc_baselines = {}
line = baseline_file.readline()
for line in baseline_file:
year = 2000
arr = line.strip().split(',')
region = arr[0]
cdc_baselines[region] = {}
for i in range(1, len(arr)):
baseline = float(arr[i])
cdc_baselines[region][year] = baseline
year += 1
all_data = read_ILINetProccessed(input_file)
indexDic = {}
data = {}
for region, raw in all_data.items():
# Note: raw is a dictionary (year is key) that contains yearly sequence
keylist = list(raw.keys())
keylist.sort()
x = []
y_1 = []
y_2 = []
y_3 = []
y_4 = []
y_5 = []
y_6 = []
peak = []
peak_time = []
onset_time = []
y_1_val_arr = []
y_2_val_arr = []
y_3_val_arr = []
y_4_val_arr = []
for year in keylist:
if year>=first_year and len(raw[year]) == 52:
indexDic[len(x)] = year
x.append(raw[year][0:length])
y1_val, y2_val, y3_val, y4_val, y5_val, y6_val = raw[year][length:length+6]
y1_vec = [0]*131
y1_val *= 10
if(y1_val<130):
y1_vec[int(math.floor(y1_val))]=1
else:
y1_vec[-1]= 1
y2_vec = [0]*131
y2_val *= 10
if(y2_val<130):
y2_vec[int(math.floor(y2_val))]=1
else:
y2_vec[-1]= 1
y3_vec = [0]*131
y3_val *= 10
if(y3_val<130):
y3_vec[int(math.floor(y3_val))]=1
else:
y3_vec[-1]= 1
y4_vec = [0]*131
y4_val *= 10
if(y4_val<130):
y4_vec[int(math.floor(y4_val))]=1
else:
y4_vec[-1]= 1
y5_vec = [0]*131
y5_val *= 10
if(y5_val<130):
y5_vec[int(math.floor(y5_val))]=1
else:
y5_vec[-1]= 1
y6_vec = [0]*131
y6_val *= 10
if(y6_val<130):
y6_vec[int(math.floor(y6_val))]=1
else:
y6_vec[-1]= 1
y_1.append(y1_vec)
y_2.append(y2_vec)
y_3.append(y3_vec)
y_4.append(y4_vec)
y_5.append(y5_vec)
y_6.append(y6_vec)
y_1_val_arr.append([y1_val])
y_2_val_arr.append([y2_val])
y_3_val_arr.append([y3_val])
y_4_val_arr.append([y4_val])
peak_val = max(raw[year])
peak_val_vec = [0]*131
peak_val *= 10
if(peak_val<130):
peak_val_vec[int(math.floor(peak_val))]=1
else:
peak_val_vec[-1]= 1
peak.append(peak_val_vec)
peak_time_val = (raw[year]).index(max(raw[year]))
peak_time_vec = [0]*52
peak_time_vec[peak_time_val] = 1
peak_time.append(peak_time_vec[19:]) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
onset = -1
offset = -1
baseline_val = cdc_baselines[region][year]
for i in range(len(raw[year])-3):
trueVals = [raw[year][x]>=baseline_val for x in range(i,i+3)]
if all(trueVals):
onset = i
break
onset_vec = [0]*53
onset_vec[onset]= 1
onset_time.append(onset_vec[19:]) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
# -1 means no onset
x = np.array(x)
x = x[:, :,np.newaxis]
data[region] = [x, np.array(y_1),np.array(y_2), np.array(y_3), np.array(y_4), np.array(y_5), np.array(y_6),np.array(peak),np.array(peak_time), np.array(onset_time) ] # previously we had np.array(y_1_val_arr), np.array(y_2_val_arr), np.array(y_3_val_arr) , np.array(y_4_val_arr)
# print(data); quit()
# print(y_1); quit()
#print("y=",y)
#print("peak =",peak)
#print("peak_time =", peak_time)
return data
def load_myRegionalRNNdata_NumericwILI(length, first_year, path = './data'):
"""
This one returns labels in regression format
Based on load_RNNdata
"""
import os
input_file = os.path.join( path, 'ILINetProcessed.csv')
clusters_file = open( os.path.join(path, 'SeasonClustersFinal'))
seasonDic = {}
allSeasons = {}
for line in clusters_file:
arr = line.strip().split()
year = int(arr[0])
season = int(arr[1])
seasonDic[year] = season
allSeasons[season] = True
baseline_file = open(os.path.join(path, 'wILI_Baseline.csv'))
cdc_baselines = {}
line = baseline_file.readline()
for line in baseline_file:
year = 2000
arr = line.strip().split(',')
region = arr[0]
cdc_baselines[region] = {}
for i in range(1, len(arr)):
baseline = float(arr[i])
cdc_baselines[region][year] = baseline
year += 1
all_data = read_ILINetProccessed(input_file)
indexDic = {}
data = {}
for region, raw in all_data.items():
# Note: raw is a dictionary (year is key) that contains yearly sequence
keylist = list(raw.keys())
keylist.sort()
x = []
y_1 = []
y_2 = []
y_3 = []
y_4 = []
y_5 = []
y_6 = []
peak = []
peak_time = []
onset_time = []
offset_time = []
peak_time_vals = []
for year in keylist:
if year>=first_year and len(raw[year]) >= 52: # NOTE: same modification as in load_mydata()
indexDic[len(x)] = year
x.append(raw[year][0:length])
y_1.append(raw[year][length])
y_2.append(raw[year][length+1])
y_3.append(raw[year][length+2])
y_4.append(raw[year][length+3])
y_5.append(raw[year][length+4])
y_6.append(raw[year][length+5])
peak.append(max(raw[year]))
peak_time_val = (raw[year]).index(max(raw[year]))
peak_time_vec = [0]*52
peak_time_vec[peak_time_val] = 1
peak_time_vals.append(peak_time_val)
peak_time.append(peak_time_vec[19:]) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
# peak_time.append(peak_time_vec) #careful the peak time is from the 21st week
# #counts from 0, so 37 means 21+37-52=6 week next year
onset = -1
baseline_val = cdc_baselines[region][year]
for i in range(len(raw[year])-3):
trueVals = [raw[year][x]>=baseline_val for x in range(i,i+3)]
if all(trueVals):
onset = i
break
onset_vec = [0]*53
onset_vec[onset]= 1
# onset_time.append(onset_vec) #careful the peak time is from the 21st week
# #counts from 0, so 37 means 21+37-52=6 week next year
# # -1 means no onset
onset_time.append(onset_vec[19:]) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
# -1 means no onset
# NOTE: offset - for COVID
for i in range(34,len(raw[year])-3): # by week 34, we have passed the onset
trueVals = [raw[year][x]<=baseline_val for x in range(i,i+3)]
if all(trueVals):
offset = i
break
offset_vec = [0]*53
offset_vec[offset]= 1
# offset_time.append(offset_vec) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
# -1 means no onset
offset_time.append(offset_vec[19:]) #careful the peak time is from the 21st week
#counts from 0, so 37 means 21+37-52=6 week next year
# -1 means no onset
x = np.array(x)
x = x[:, :,np.newaxis]
data[region] = [x, np.array(y_1),np.array(y_2), np.array(y_3), np.array(y_4), np.array(y_5), np.array(y_6), np.array(peak), np.array(peak_time), np.array(onset_time), np.array(offset_time) ]
return data
def load_myRegionalRNNdata_Prediction( length, first_year, path = './data'):
import os
input_file = os.path.join( path, 'ILINetProcessed.csv')
clusters_file = open( os.path.join(path, 'SeasonClustersFinal'))
seasonDic = {}
allSeasons = {}
for line in clusters_file:
arr = line.strip().split()
year = int(arr[0])
season = int(arr[1])
seasonDic[year] = season
allSeasons[season] = True
baseline_file = open(os.path.join(path, 'baseline'))
cdc_baselines = {}
for line in baseline_file:
arr = line.strip().split()
#print(arr)
year = int(arr[0])
baseline = float(arr[1])
cdc_baselines[year] = baseline
all_data = read_ILINetProccessed(input_file)
indexDic = {}
data = {}
for region, raw in all_data.items():
keylist = list(raw.keys())
keylist.sort()
x = []
for year in keylist:
if year==2019:
indexDic[len(x)] = year
x.append(raw[year][0:length])
x = np.array(x)
x = x[:, :,np.newaxis]
data[region] = [x]
#print("y=",y)
#print("peak =",peak)
#print("peak_time =", peak_time)
return data
def load_myRegionaldata_Prediction(length, first_year, path = './data'):
import os
input_file = os.path.join( path, 'ILINetProcessed.csv')
clusters_file = open( os.path.join(path, 'SeasonClustersFinal'))
seasonDic = {}
allSeasons = {}
for line in clusters_file:
arr = line.strip().split()
year = int(arr[0])
season = int(arr[1])
seasonDic[year] = season
allSeasons[season] = True
all_data = read_ILINetProccessed(input_file)
indexDic = {}
data = {}
region_order = []
for region, raw in all_data.items():
region_order.append(region)
keylist = list(raw.keys())
keylist.sort()
x = []
for year in keylist:
if year==2019:
indexDic[len(x)] = year
x.append(raw[year][0:length])
data[region] = [np.array(x)]
return data
def read_ILINetProccessed(input_file):
# indexed by region
all_data = {}
in_f = open(input_file)
in_f.readline()
in_f.readline()
for line in in_f:
raw = line.strip().split(',')
region = raw[1].strip()
year = int(raw[2].strip())
week = int(raw[3].strip())
## upto 20th week belongs to last years cycle
if(week <= 20):
year -= 1
infection = raw[4].strip()
inf = 0
if is_number(infection):
inf = float(infection)
if region not in all_data:
all_data[region]={}
if year not in all_data[region]:
all_data[region][year] = []
all_data[region][year].append(inf)
return all_data
if __name__ == "__main__":
# load_myRegionalRNNdata(35, 2015)
# rnn_data, rnn_label_wILI_1, rnn_label_wILI_2, rnn_label_wILI_3, rnn_label_wILI_4,\
# rnn_label_wILI_5, rnn_label_wILI_6, rnn_label_peak, rnn_label_peak_time, rnn_label_onset_time,\
# = load_myRegionalRNNdata(35, 2015)['X']
length=21
first_year=2004; RegionName='Region 1'
rnn_data, rnn_label_wILI_1, rnn_label_wILI_2, rnn_label_wILI_3, rnn_label_wILI_4,\
rnn_label_wILI_5, rnn_label_wILI_6, rnn_label_peak, rnn_label_peak_time, rnn_label_onset_time,\
rnn_label_offset_time = load_myRegionalRNNdata_NumericwILI(length, first_year)[RegionName]
print(rnn_data)
print(rnn_label_wILI_1)
print(rnn_label_wILI_2)
print(rnn_label_wILI_3)
print(rnn_label_wILI_4)
print(rnn_label_wILI_5)
print(rnn_label_wILI_6)
print(rnn_label_onset_time)
print(rnn_label_offset_time)
# print(np.asarray(rnn_label_onset_time==1).nonzero())
# print(np.asarray(rnn_label_offset_time==1).nonzero())
print(load_ILI_as_time_series(RegionName))
| 32.151757
| 286
| 0.50549
| 2,544
| 20,127
| 3.806211
| 0.077044
| 0.042136
| 0.014871
| 0.023133
| 0.805226
| 0.782402
| 0.765775
| 0.765775
| 0.765672
| 0.762574
| 0
| 0.041256
| 0.379788
| 20,127
| 625
| 287
| 32.2032
| 0.734439
| 0.137775
| 0
| 0.744344
| 0
| 0
| 0.019215
| 0
| 0
| 0
| 0
| 0.0016
| 0
| 1
| 0.020362
| false
| 0
| 0.0181
| 0
| 0.061086
| 0.022624
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbaf1bec50764fdd6366a6e4b27415ffe8b2b7c7
| 18,843
|
py
|
Python
|
cdlib/test/test_community_discovery_models.py
|
rparrapy/cdlib
|
743526abe4bde338fe8a67daf435ed554dedd604
|
[
"BSD-2-Clause"
] | null | null | null |
cdlib/test/test_community_discovery_models.py
|
rparrapy/cdlib
|
743526abe4bde338fe8a67daf435ed554dedd604
|
[
"BSD-2-Clause"
] | null | null | null |
cdlib/test/test_community_discovery_models.py
|
rparrapy/cdlib
|
743526abe4bde338fe8a67daf435ed554dedd604
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from cdlib import algorithms
import networkx as nx
import os
try:
import igraph as ig
except ModuleNotFoundError:
ig = None
try:
import leidenalg
except ModuleNotFoundError:
leidenalg = None
try:
import infomap
except ModuleNotFoundError:
infomap = None
try:
import graph_tool.all as gt
except ModuleNotFoundError:
gt = None
def get_string_graph():
g = nx.karate_club_graph()
node_map = {}
for n in g.nodes():
node_map[n] = "$%s$" % n
nx.relabel_nodes(g, node_map, False)
return g
class CommunityDiscoveryTests(unittest.TestCase):
def test_ego(self):
g = get_string_graph()
coms = algorithms.ego_networks(g)
self.assertEqual(len(coms.communities), g.number_of_nodes())
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_demon(self):
g = get_string_graph()
coms = algorithms.demon(g, epsilon=0.25)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_node_perception(self):
g = get_string_graph()
coms = algorithms.node_perception(g, threshold=0.25, overlap_threshold=0.25)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
g = nx.karate_club_graph()
coms = algorithms.node_perception(g, threshold=0.25, overlap_threshold=0.25)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_angel(self):
if ig is not None:
g = get_string_graph()
coms = algorithms.angel(g, threshold=0.25)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_louvain(self):
g = get_string_graph()
coms = algorithms.louvain(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_leiden(self):
if leidenalg is not None:
g = get_string_graph()
coms = algorithms.leiden(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_significance(self):
if leidenalg is not None:
g = get_string_graph()
coms = algorithms.significance_communities(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_surprise(self):
if leidenalg is not None:
g = get_string_graph()
coms = algorithms.surprise_communities(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_cpm(self):
if leidenalg is not None:
g = get_string_graph()
coms = algorithms.cpm(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_rbpots(self):
if leidenalg is not None:
g = get_string_graph()
coms = algorithms.rb_pots(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_rberpots(self):
if leidenalg is not None:
g = get_string_graph()
coms = algorithms.rber_pots(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_greedy_modularity(self):
if leidenalg is not None:
g = get_string_graph()
coms = algorithms.greedy_modularity(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_infomap(self):
if infomap is not None:
g = get_string_graph()
coms = algorithms.infomap(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
if os.path.exists(".tree"):
os.remove(".tree")
def test_lp(self):
g = get_string_graph()
coms = algorithms.label_propagation(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_slpa(self):
g = get_string_graph()
coms = algorithms.slpa(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_fluid(self):
if ig is not None:
g = get_string_graph()
coms = algorithms.async_fluid(g, 3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_kclique(self):
g = get_string_graph()
coms = algorithms.kclique(g, 3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_gn(self):
g = get_string_graph()
coms = algorithms.girvan_newman(g, 3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_multicom(self):
g = get_string_graph()
coms = algorithms.multicom(g, seed_node=0)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
g = nx.karate_club_graph()
coms = algorithms.multicom(g, seed_node=0)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_em(self):
g = get_string_graph()
coms = algorithms.em(g, k=3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
g = nx.karate_club_graph()
coms = algorithms.em(g, k=3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_LFM(self):
g = get_string_graph()
coms = algorithms.lfm(g, alpha=0.8)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_SCAN(self):
g = get_string_graph()
coms = algorithms.scan(g, 0.7, 3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_HLC(self):
g = get_string_graph()
coms = algorithms.hierarchical_link_community(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), tuple)
def test_DER(self):
g = get_string_graph()
coms = algorithms.der(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_osse(self):
g = get_string_graph()
seeds = ["$0$", "$2$", "$5$"]
communities = algorithms.overlapping_seed_set_expansion(g, seeds)
self.assertEqual(type(communities.communities), list)
if len(communities.communities) > 0:
self.assertEqual(type(communities.communities[0]), list)
self.assertEqual(type(communities.communities[0][0]), str)
def test_markov_clustering(self):
g = get_string_graph()
communities = algorithms.markov_clustering(g)
self.assertEqual(type(communities.communities), list)
if len(communities.communities) > 0:
self.assertEqual(type(communities.communities[0]), list)
if len(communities.communities[0]) > 0:
self.assertEqual(type(communities.communities[0][0]), str)
g = nx.karate_club_graph()
communities = algorithms.markov_clustering(g)
self.assertEqual(type(communities.communities), list)
if len(communities.communities) > 0:
self.assertEqual(type(communities.communities[0]), list)
if len(communities.communities[0]) > 0:
self.assertEqual(type(communities.communities[0][0]), int)
def test_bigClam(self):
g = nx.karate_club_graph()
coms = algorithms.big_clam(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_lemon(self):
g = get_string_graph()
seeds = ["$0$", "$2$", "$3$"]
com = algorithms.lemon(g, seeds, min_com_size=10, max_com_size=50)
self.assertEqual(type(com.communities), list)
if len(com.communities) > 0:
self.assertEqual(type(com.communities[0]), list)
self.assertEqual(type(com.communities[0][0]), str)
g = nx.karate_club_graph()
seeds = [0, 2, 3]
com = algorithms.lemon(g, seeds, min_com_size=10, max_com_size=50)
self.assertEqual(type(com.communities), list)
if len(com.communities) > 0:
self.assertEqual(type(com.communities[0]), list)
self.assertEqual(type(com.communities[0][0]), int)
def test_lais2(self):
g = get_string_graph()
com = algorithms.lais2(g)
self.assertEqual(type(com.communities), list)
if len(com.communities) > 0:
self.assertEqual(type(com.communities[0]), list)
self.assertEqual(type(com.communities[0][0]), str)
def test_gdmp2(self):
g = get_string_graph()
com = algorithms.gdmp2(g, min_threshold=.75)
self.assertEqual(type(com.communities), list)
if len(com.communities) > 0:
self.assertEqual(type(com.communities[0]), list)
self.assertEqual(type(com.communities[0][0]), str)
def test_spinglass(self):
if ig is not None:
g = get_string_graph()
com = algorithms.spinglass(g)
self.assertEqual(type(com.communities), list)
if len(com.communities) > 0:
self.assertEqual(type(com.communities[0]), list)
self.assertEqual(type(com.communities[0][0]), str)
def test_walktrap(self):
if ig is not None:
g = get_string_graph()
com = algorithms.walktrap(g)
self.assertEqual(type(com.communities), list)
if len(com.communities) > 0:
self.assertEqual(type(com.communities[0]), list)
self.assertEqual(type(com.communities[0][0]), str)
def test_eigenvector(self):
if ig is not None:
g = get_string_graph()
com = algorithms.eigenvector(g)
self.assertEqual(type(com.communities), list)
if len(com.communities) > 0:
self.assertEqual(type(com.communities[0]), list)
self.assertEqual(type(com.communities[0][0]), str)
def test_Congo(self):
g = get_string_graph()
coms = algorithms.congo(g, number_communities=3, height=2)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_Conga(self):
g = get_string_graph()
coms = algorithms.conga(g, number_communities=3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_agdl(self):
g = get_string_graph()
coms = algorithms.agdl(g, 3, 2, 2, 0.5)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_frc_fgsn(self):
g = get_string_graph()
coms = algorithms.frc_fgsn(g, 1, 0.5, 3)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), tuple)
self.assertIsInstance(coms.allocation_matrix, dict)
self.assertEqual(len(coms.allocation_matrix), g.number_of_nodes())
def test_sbm_dl(self):
if gt is not None:
g = get_string_graph()
coms = algorithms.sbm_dl(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_sbm_nested_dl(self):
if gt is not None:
g = get_string_graph()
coms = algorithms.sbm_dl_nested(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_danmf(self):
g = get_string_graph()
coms = algorithms.danmf(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_egonet_splitter(self):
g = get_string_graph()
coms = algorithms.egonet_splitter(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
def test_nnsed(self):
g = nx.karate_club_graph()
coms = algorithms.nnsed(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_nmnf(self):
g = nx.karate_club_graph()
coms = algorithms.nmnf(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_edmot(self):
g = nx.karate_club_graph()
coms = algorithms.edmot(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_bimlpa(self):
g = nx.algorithms.bipartite.random_graph(50, 50, 0.25)
coms = algorithms.bimlpa(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_aslpaw(self):
g = nx.karate_club_graph()
coms = algorithms.aslpaw(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
def test_percomvc(self):
g = nx.karate_club_graph()
coms = algorithms.percomvc(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), int)
| 39.25625
| 84
| 0.608873
| 2,295
| 18,843
| 4.906754
| 0.06841
| 0.225113
| 0.263209
| 0.257348
| 0.874434
| 0.871592
| 0.869195
| 0.809964
| 0.783145
| 0.777196
| 0
| 0.020468
| 0.263652
| 18,843
| 479
| 85
| 39.338205
| 0.791135
| 0
| 0
| 0.708738
| 0
| 0
| 0.001698
| 0
| 0
| 0
| 0
| 0
| 0.385922
| 1
| 0.116505
| false
| 0
| 0.019417
| 0
| 0.140777
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
dbcfcfe36fc87dbf637b1da2ccc252c4d9ea58ab
| 4,605
|
py
|
Python
|
newcomer/Protein/script.py
|
yutake27/newcomer-exercise
|
e4466edd03e5f1803bbc0c1b0be39a475297392a
|
[
"CC0-1.0"
] | null | null | null |
newcomer/Protein/script.py
|
yutake27/newcomer-exercise
|
e4466edd03e5f1803bbc0c1b0be39a475297392a
|
[
"CC0-1.0"
] | null | null | null |
newcomer/Protein/script.py
|
yutake27/newcomer-exercise
|
e4466edd03e5f1803bbc0c1b0be39a475297392a
|
[
"CC0-1.0"
] | null | null | null |
pymol.cmd.load('/Users/takei/Desktop/newcomer/Protein/1buw.pdb')
pymol.cmd.do('hide everything')
pymol.cmd.do('sel inter,(id 14,15,67,68,69,70,71,72,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,120,121,123,124,125,126,129,137,138,145,146,147,148,149,150,151,152,153,154,155,156,157,158,163,164,165,166,167,168,169,170,171,172,173,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,215,216,217,218,219,220,221,222,226,227,228,229,230,231,232,233,234,235,238,239,240,241,242,243,244,264,266,268,269,285,287,288,289,290,311,312,313,324,325,326,327,328,329,415,416,417,418,419,423,424,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,457,458,459,460,461,462,463,464,466,467,468,469,470,471,472,473,474,475,476,477,478,479,480,481,482,483,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,507,508,509,510,511,512,513,514,515,516,517,518,519,520,521,522,523,524,525,526,542,545,547,565,566,567,568,569,570,571,589,590,592,593,594,595,596,597,598,599,611,612,613,614,615,616,617,618,619,620,621,623,635,637,638,640,641,642,643,644,645,646,647,648,649,650,651,681,695,696,697,698,699,700,703,704,709,710,711,712,713,716,717,718,719,723,724,725,726,727,728,729,730,731,732,733,734,735,736,737,738,739,740,741,742,743,744,745,746,747,748,749,751,752,753,754,755,756,757,758,759,760,761,762,763,764,765,766,767,768,769,770,771,772,773,774,775,776,777,778,779,780,781,782,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,843,853,882,884,886,909,916,920,926,929,930,931,934,935,936,937,938,939,940,941,942,943,944,945,946,947,948,949,950,951,952,953,954,959,960,961,962,963,964,965,966,967,968,969,970,971,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023,1024,1025,1026,1028,1029,1030)')
pymol.cmd.do('sel exter,(id 1,2,3,4,5,6,7,8,9,10,11,12,13,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,106,107,108,109,110,111,112,113,114,115,116,117,118,122,127,128,130,131,132,133,134,135,136,139,140,141,142,143,144,159,160,161,162,174,211,212,213,214,223,224,225,236,237,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,265,267,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,286,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,314,315,316,317,318,319,320,321,322,323,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,420,421,422,425,426,427,428,429,430,431,454,455,456,465,527,528,529,530,531,532,533,534,535,536,537,538,539,540,541,543,544,546,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,563,564,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588,591,600,601,602,603,604,605,606,607,608,609,610,622,624,625,626,627,628,629,630,631,632,633,634,636,639,652,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,682,683,684,685,686,687,688,689,690,691,692,693,694,701,702,705,706,707,708,714,715,720,721,722,750,828,829,830,831,832,833,834,835,836,837,838,839,840,841,842,844,845,846,847,848,849,850,851,852,854,855,856,857,858,859,860,861,862,863,864,865,866,867,868,869,870,871,872,873,874,875,876,877,878,879,880,881,883,885,887,888,889,890,891,892,893,894,895,896,897,898,899,900,901,902,903,904,905,906,907,908,910,911,912,913,914,915,917,918,919,921,922,923,924,925,927,928,932,933,955,956,957,958,1027,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069)')
pymol.cmd.do('show cartoon, inter')
pymol.cmd.do('show cartoon, exter')
pymol.cmd.do('color red, inter')
pymol.cmd.do('color blue, exter')
pymol.cmd.do('png /Users/takei/Desktop/newcomer/Protein/3chainA.png')
| 511.666667
| 2,217
| 0.74658
| 1,131
| 4,605
| 3.039788
| 0.966401
| 0.020942
| 0.023269
| 0.014543
| 0.030832
| 0
| 0
| 0
| 0
| 0
| 0
| 0.691905
| 0.004777
| 4,605
| 9
| 2,218
| 511.666667
| 0.058259
| 0
| 0
| 0
| 0
| 0.222222
| 0.966348
| 0.940729
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
917b8a75c7b589cb7c68e0c1f2591c6b5af130fe
| 8,124
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_general_document_async.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2021-09-07T18:39:05.000Z
|
2021-09-07T18:39:05.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_general_document_async.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_general_document_async.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from devtools_testutils.aio import recorded_by_proxy_async
from azure.ai.formrecognizer._generated.models import AnalyzeResultOperation
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
from azure.ai.formrecognizer import AnalyzeResult
from preparers import FormRecognizerPreparer
from asynctestcase import AsyncFormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
DocumentAnalysisClientPreparer = functools.partial(_GlobalClientPreparer, DocumentAnalysisClient)
class TestDACAnalyzeDocumentAsync(AsyncFormRecognizerTest):
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_stream_transform_pdf(self, client):
with open(self.invoice_pdf, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_stream_transform_jpg(self, client):
with open(self.form_jpg, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_multipage_transform(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
document = fd.read()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_document)
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, cls=callback)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_multipage_table_span_pdf(self, client):
with open(self.multipage_table_pdf, "rb") as fd:
myfile = fd.read()
async with client:
poller = await client.begin_analyze_document("prebuilt-document", myfile)
document = await poller.result()
assert len(document.tables) == 3
assert document.tables[0].row_count == 29
assert document.tables[0].column_count == 5
assert document.tables[1].row_count == 6
assert document.tables[1].column_count == 4
assert document.tables[2].row_count == 23
assert document.tables[2].column_count == 5
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_document_specify_pages(self, client):
with open(self.multipage_invoice_pdf, "rb") as fd:
document = fd.read()
async with client:
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1")
result = await poller.result()
assert len(result.pages) == 1
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1, 3")
result = await poller.result()
assert len(result.pages) == 2
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2")
result = await poller.result()
assert len(result.pages) == 2
poller = await client.begin_analyze_document("prebuilt-document", document, pages="1-2, 3")
result = await poller.result()
assert len(result.pages) == 3
| 47.232558
| 124
| 0.727105
| 835
| 8,124
| 6.792814
| 0.14012
| 0.110014
| 0.093089
| 0.03103
| 0.840268
| 0.831805
| 0.821403
| 0.821403
| 0.813822
| 0.805889
| 0
| 0.00517
| 0.190547
| 8,124
| 171
| 125
| 47.508772
| 0.85736
| 0.032742
| 0
| 0.71875
| 0
| 0
| 0.020393
| 0
| 0
| 0
| 0
| 0
| 0.320313
| 1
| 0.023438
| false
| 0
| 0.070313
| 0
| 0.101563
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37e631dbb30f410f136872532def68abd5a03a5a
| 29,092
|
py
|
Python
|
DQM/L1TMonitorClient/python/L1TStage2EventInfoClient_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
DQM/L1TMonitorClient/python/L1TStage2EventInfoClient_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
DQM/L1TMonitorClient/python/L1TStage2EventInfoClient_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
# L1 Trigger Event Info client cfi
#
# The cfi can be used, with appropriate settings, for both L1T and L1TEMU.
# Default version in cfi: L1T event client
#
# authors previous versions - see CVS
#
# V.M. Ghete 2010-10-22 revised version of L1T DQM and L1TEMU DQM
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
l1tStage2EventInfoClient = DQMEDHarvester("L1TEventInfoClient",
monitorDir = cms.untracked.string("L1T"),
# decide when to run and update the results of the quality tests
# retrieval of quality test results must be consistent with the event / LS / Run execution
#
runInEventLoop=cms.untracked.bool(False),
runInEndLumi=cms.untracked.bool(True),
runInEndRun=cms.untracked.bool(True),
runInEndJob=cms.untracked.bool(False),
#
# for each L1 system, give:
# - SystemLabel: system label
# - HwValLabel: system label as used in hardware validation package
# (the package producing the ErrorFlag histogram)
# - SystemDisable: system disabled: if 1, all quality tests for the system
# are disabled in the summary plot
# - for each quality test:
# - QualityTestName: name of quality test
# - QualityTestHist: histogram (full path)
# - QualityTestSummaryEnabled: 0 if disabled, 1 if enabled in summary plot
#
# the position in the parameter set gives, in reverse order, the position in the reportSummaryMap
# in the emulator column (left column)
L1Systems = cms.VPSet(
cms.PSet(
SystemLabel = cms.string("ECAL_TPG"),
HwValLabel = cms.string("ETP"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string("Layer1LinkErrorThreshold"),
QualityTestHist = cms.string("L1T/L1TStage2CaloLayer1/MismatchDetail/maxEvtLinkErrorsByLumiECAL"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("Layer1MismatchThreshold"),
QualityTestHist = cms.string("L1T/L1TStage2CaloLayer1/MismatchDetail/maxEvtMismatchByLumiECAL"),
QualityTestSummaryEnabled = cms.uint32(1)
),
)
),
cms.PSet(
SystemLabel = cms.string("HCAL_TPG"),
HwValLabel = cms.string("HTP"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string("Layer1LinkErrorThreshold"),
QualityTestHist = cms.string("L1T/L1TStage2CaloLayer1/MismatchDetail/maxEvtLinkErrorsByLumiHCAL"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("Layer1MismatchThreshold"),
QualityTestHist = cms.string("L1T/L1TStage2CaloLayer1/MismatchDetail/maxEvtMismatchByLumiHCAL"),
QualityTestSummaryEnabled = cms.uint32(1)
),
)
),
cms.PSet(
SystemLabel = cms.string("Calo Layer1"),
HwValLabel = cms.string("Stage2CaloLayer1"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string("Layer1LinkErrorThreshold"),
QualityTestHist = cms.string("L1T/L1TStage2CaloLayer1/maxEvtLinkErrorsByLumi"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("Layer1MismatchThreshold"),
QualityTestHist = cms.string("L1T/L1TStage2CaloLayer1/maxEvtMismatchByLumi"),
QualityTestSummaryEnabled = cms.uint32(1)
),
)
),
cms.PSet(
SystemLabel = cms.string("Calo Layer2"),
HwValLabel = cms.string("Stage2CaloLayer2"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
),
)
),
cms.PSet(
SystemLabel = cms.string("BMTF"),
HwValLabel = cms.string("Stage2BMTF"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string("BMTF_hwPtRange"),
QualityTestHist = cms.string("L1T/L1TStage2BMTF/bmtf_hwPt"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("BMTF_hwPtSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2BMTF/bmtf_hwPt"),
QualityTestSummaryEnabled = cms.uint32(0)
),
cms.PSet(
QualityTestName = cms.string("BMTF_WedgeBXNoisyWedge"),
QualityTestHist = cms.string("L1T/L1TStage2BMTF/bmtf_wedge_bx"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("BMTF_WedgeBXSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2BMTF/bmtf_wedge_bx"),
QualityTestSummaryEnabled = cms.uint32(0)
),
)
),
cms.PSet(
SystemLabel = cms.string("OMTF"),
HwValLabel = cms.string("Stage2OMTF"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
),
)
),
cms.PSet(
SystemLabel = cms.string("EMTF"),
HwValLabel = cms.string("Stage2EMTF"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string("EMTF_LCTOccupancyDeadChambe"),
QualityTestHist = cms.string("L1T/L1TStage2EMTF/cscLCTOccupancy"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("EMTF_LCTOccupancyNoisyChamber"),
QualityTestHist = cms.string("L1T/L1TStage2EMTF/cscLCTOccupancy"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("EMTF_TrackBXNoisyTrack"),
QualityTestHist = cms.string("L1T/L1TStage2EMTF/emtfTrackBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("EMTF_TrackBXSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2EMTF/emtfTrackBX"),
QualityTestSummaryEnabled = cms.uint32(0)
),
)
),
cms.PSet(
SystemLabel = cms.string("uGMT"),
HwValLabel = cms.string("Stage2uGMT"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string("uGMT_MuonBXPeakAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/ugmtMuonBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_MuonBXMeanAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/ugmtMuonBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_MuonEtaMeanAt0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/ugmtMuonEta"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_MuonPtRange"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/ugmtMuonPt"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_MuonPtSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/ugmtMuonPt"),
QualityTestSummaryEnabled = cms.uint32(0)
),
cms.PSet(
QualityTestName = cms.string("uGMT_MuonPhivsEtaSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/ugmtMuonPhivsEta"),
QualityTestSummaryEnabled = cms.uint32(0)
),
cms.PSet(
QualityTestName = cms.string("uGMT_BMTFBXPeakAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/BMTFInput/ugmtBMTFBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_BMTFBXMeanAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/BMTFInput/ugmtBMTFBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_BMTFhwPhiSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/BMTFInput/ugmtBMTFglbhwPhi"),
QualityTestSummaryEnabled = cms.uint32(0)
),
cms.PSet(
QualityTestName = cms.string("uGMT_BMTFhwEtaMeanAt0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/BMTFInput/ugmtBMTFhwEta"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_BMTFhwSignUniform"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/BMTFInput/ugmtBMTFhwSign"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_OMTFBXPeakAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/OMTFInput/ugmtOMTFBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_OMTFBXMeanAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/OMTFInput/ugmtOMTFBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_OMTFhwPhiPosSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/OMTFInput/ugmtOMTFglbhwPhiPos"),
QualityTestSummaryEnabled = cms.uint32(0)
),
cms.PSet(
QualityTestName = cms.string("uGMT_OMTFhwEtaMeanAt0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/OMTFInput/ugmtOMTFhwEta"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_OMTFhwPtRange"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/OMTFInput/ugmtOMTFhwPt"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_OMTFhwPtSpectrum"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/OMTFInput/ugmtOMTFhwPt"),
QualityTestSummaryEnabled = cms.uint32(0)
),
cms.PSet(
QualityTestName = cms.string("uGMT_OMTFhwSignUniform"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/OMTFInput/ugmtOMTFhwSign"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_EMTFBXPeakAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/EMTFInput/ugmtEMTFBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_EMTFBXMeanAtBX0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/EMTFInput/ugmtEMTFBX"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMT_EMTFMuonPhiMeanAt0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/ugmtMuonPhiEmtf"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMTvsuGT_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/uGMToutput_vs_uGTinput/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("BMTFvsuGMT_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/BMTFoutput_vs_uGMTinput/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("EMTFvsuGMT_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/EMTFoutput_vs_uGMTinput/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMTCopies_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/uGMTMuonCopies/GMTMuonCopy1/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMTCopies_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/uGMTMuonCopies/uGMTMuonCopy2/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMTCopies_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/uGMTMuonCopies/uGMTMuonCopy3/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMTCopies_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/uGMTMuonCopies/uGMTMuonCopy4/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("uGMTCopies_MismatchRatioMax0"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/uGMTMuonCopies/uGMTMuonCopy5/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("zeroSupp_MismatchRatioMax0p05"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/zeroSuppression/AllEvts/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
cms.PSet(
QualityTestName = cms.string("zeroSupp_MismatchRatioMax0p05"),
QualityTestHist = cms.string("L1T/L1TStage2uGMT/zeroSuppression/FatEvts/mismatchRatio"),
QualityTestSummaryEnabled = cms.uint32(1)
),
)
),
cms.PSet(
SystemLabel = cms.string("uGT"),
HwValLabel = cms.string("Stage2uGT"),
SystemDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
),
)
),
),
#
# for each L1 trigger object, give:
# - ObjectLabel: object label as used in enum L1GtObject
# - ObjectDisable: emulator mask: if 1, the system is masked in the summary plot
#
# the position in the parameter set gives, in reverse order, the position in the reportSummaryMap
# in the trigger object column (right column)
L1Objects = cms.VPSet(
cms.PSet(
ObjectLabel = cms.string("TechTrig"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("GtExternal"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("HfRingEtSums"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("HfBitCounts"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("HTM"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("HTT"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("ETM"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("ETT"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("Tau"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("ForJet"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("CenJet"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("IsoEG"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("NoIsoEG"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
),
cms.PSet(
ObjectLabel = cms.string("Mu"),
ObjectDisable = cms.uint32(0),
QualityTests = cms.VPSet(
cms.PSet(
QualityTestName = cms.string(""),
QualityTestHist = cms.string(""),
QualityTestSummaryEnabled = cms.uint32(0)
)
)
)
),
#
# fast over-mask a system: if the name of the system is in the list, the system will be masked
# (the default mask value is given in L1Systems VPSet)
#
DisableL1Systems = cms.vstring(),
#
# fast over-mask an object: if the name of the object is in the list, the object will be masked
# (the default mask value is given in L1Objects VPSet)
#
DisableL1Objects = cms.vstring()
)
| 55.838772
| 130
| 0.414788
| 1,663
| 29,092
| 7.224293
| 0.15454
| 0.116864
| 0.113534
| 0.129016
| 0.784668
| 0.765524
| 0.725487
| 0.724405
| 0.724405
| 0.681621
| 0
| 0.033392
| 0.511034
| 29,092
| 520
| 131
| 55.946154
| 0.811178
| 0.057988
| 0
| 0.70339
| 0
| 0
| 0.117161
| 0.097062
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004237
| 0
| 0.004237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37ed28ec0709f535a02797f4adee3112fe9cff3e
| 4,323
|
py
|
Python
|
utils/embd_load.py
|
ys10/Tacotron-cn
|
0420aedb3c04359327de58b978198846e7c1a887
|
[
"MIT"
] | 2
|
2019-03-07T12:15:16.000Z
|
2020-12-14T06:15:31.000Z
|
utils/embd_load.py
|
ys10/Tacotron-cn
|
0420aedb3c04359327de58b978198846e7c1a887
|
[
"MIT"
] | null | null | null |
utils/embd_load.py
|
ys10/Tacotron-cn
|
0420aedb3c04359327de58b978198846e7c1a887
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import numpy as np
def list2array(_list):
return np.array(list(map(float, _list)))
def array2string(array):
return ' '.join(map(str, array.tolist()))
class EmbdMapper:
def __init__(self, config):
self.data_path = config.embd_path
# special char
self.unk_char = config.unk_char
# load embedding
self.num, self.dim, self.embd_dict = self._load_embd()
def _load_embd(self):
embd_dict = dict()
with open(self.data_path, "r") as f:
# read meta data
meta_line = f.readline()
_, dim = meta_line.split()
dim = int(dim)
while 1:
lines = f.readlines(1000)
if not lines:
break
for line in lines:
chars, *scalars = line[:-1].split()
if len(chars) > 1:
continue
assert (len(scalars) == dim)
array = list2array(scalars)
embd_dict[chars] = array
# add special chars
zeros = np.zeros(shape=(dim,), dtype=np.float32)
embd_dict[self.unk_char] = zeros
num = len(embd_dict.keys())
print('number of vector:{}, dimension:{}'.format(num, dim))
return num, dim, embd_dict
def char2embd(self, char):
if char not in self.embd_dict.keys():
raise KeyError
return self.embd_dict[char]
def text2embd(self, text):
return [self.char2embd(char) for char in text]
def get_char2idx(self):
char2idx = {char: idx for idx, char in enumerate(self.embd_dict.keys())}
return char2idx
def get_vocab(self):
return list(self.embd_dict.keys())
def get_lookup_table(self):
# return [self.embd_dict[k] for k in self.embd_dict.keys()]
# return list(self.embd_dict.values())
return np.array(list(self.embd_dict.values()))
def save(self, path):
embd_list = list()
for k in self.embd_dict.keys():
embd_list.append('{} {}\n'.format(k, array2string(self.embd_dict[k])))
sorted(embd_list)
with open(path, 'w', newline='\n') as f:
f.writelines(['{} {}\n'.format(self.num, self.dim)])
f.writelines(embd_list)
class OrigEmbdMapper:
def __init__(self, config):
self.data_path = config.embd_path
# special char
self.unk_char = config.unk_char
# load embedding
self.num, self.dim, self.embd_dict = self._load_embd()
def _load_embd(self):
embd_dict = dict()
with open(self.data_path, "r") as f:
# read meta data
meta_line = f.readline()
_, dim = meta_line.split()
dim = int(dim)
while 1:
lines = f.readlines(1000)
if not lines:
break
for line in lines:
chars, *scalars = line[:-1].split()
if len(chars) > 1:
continue
assert (len(scalars) == dim)
array = scalars
embd_dict[chars] = array
# add special chars
zeros = [0.0] * dim
embd_dict[self.unk_char] = zeros
num = len(embd_dict.keys())
print('number of vector:{}, dimension:{}'.format(num, dim))
return num, dim, embd_dict
def char2embd(self, char):
if char not in self.embd_dict.keys():
raise KeyError
return self.embd_dict[char]
def text2embd(self, text):
return [self.char2embd(char) for char in text]
def get_char2idx(self):
char2idx = {char: idx for idx, char in enumerate(self.embd_dict.keys())}
return char2idx
def get_vocab(self):
return list(self.embd_dict.keys())
def get_lookup_table(self):
return list(self.embd_dict.values())
def save(self, path):
embd_list = list()
for k in self.embd_dict.keys():
embd_list.append('{} {}\n'.format(k, array2string(self.embd_dict[k])))
sorted(embd_list)
with open(path, 'w', newline='\n') as f:
f.writelines(['{} {}\n'.format(self.num, self.dim)])
f.writelines(embd_list)
| 31.100719
| 82
| 0.54291
| 539
| 4,323
| 4.204082
| 0.174397
| 0.102383
| 0.111209
| 0.063548
| 0.90203
| 0.90203
| 0.889673
| 0.879965
| 0.879965
| 0.840247
| 0
| 0.012574
| 0.337728
| 4,323
| 138
| 83
| 31.326087
| 0.778903
| 0.052972
| 0
| 0.871287
| 0
| 0
| 0.025233
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 1
| 0.178218
| false
| 0
| 0.009901
| 0.079208
| 0.346535
| 0.019802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37f49d6856d0642f29cacece0cc9ad7cf1f9bb79
| 144
|
py
|
Python
|
GenProcTrees/__init__.py
|
KolijnWolfaardt/TreeGen
|
587ae9d140a8eefcddc149a754e238aee8007a00
|
[
"MIT"
] | 2
|
2018-07-10T22:36:22.000Z
|
2021-04-08T08:17:32.000Z
|
GenProcTrees/__init__.py
|
KolijnWolfaardt/TreeGen
|
587ae9d140a8eefcddc149a754e238aee8007a00
|
[
"MIT"
] | null | null | null |
GenProcTrees/__init__.py
|
KolijnWolfaardt/TreeGen
|
587ae9d140a8eefcddc149a754e238aee8007a00
|
[
"MIT"
] | 1
|
2021-05-03T02:11:15.000Z
|
2021-05-03T02:11:15.000Z
|
from .gen_proc_trees import generate_tree
from .image_writer import write_image_from_tree
from .geometry_writer import write_geometry_from_tree
| 36
| 53
| 0.895833
| 23
| 144
| 5.130435
| 0.478261
| 0.135593
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 144
| 3
| 54
| 48
| 0.893939
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
534329991e47e416c586c1ba2c822d90cd7ebdcb
| 955
|
py
|
Python
|
code/settings.py
|
oublalkhalid/MoroccoAI-Data-Challenge
|
f9a7e9f62b4b79314bf65a1495d536feec9df17e
|
[
"Apache-2.0"
] | 4
|
2021-12-23T13:34:59.000Z
|
2022-01-18T10:13:44.000Z
|
code/settings.py
|
oublalkhalid/MoroccoAI-Data-Challenge
|
f9a7e9f62b4b79314bf65a1495d536feec9df17e
|
[
"Apache-2.0"
] | null | null | null |
code/settings.py
|
oublalkhalid/MoroccoAI-Data-Challenge
|
f9a7e9f62b4b79314bf65a1495d536feec9df17e
|
[
"Apache-2.0"
] | null | null | null |
DEBUG= True
# Yolo character detection palte complet
images_input= '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/train'
annotations_input_xml= '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/train_annotation_xml'
labels_output = '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/train_annotation_txt'
config_output= '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/'
# Yolo character detection(a,b,h,d,...)
images_input_character= '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/train_charcter_detection/image'
annotations_input_xml_character= '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/train_charcter_detection/label_xml'
labels_output_character = '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/train_charcter_detection/label_txt'
config_output_character= '/home/koublal/Downloads/moroccoai-data-challenge-edition-001/train_charcter_detection/'
| 59.6875
| 130
| 0.845026
| 124
| 955
| 6.282258
| 0.258065
| 0.112965
| 0.205392
| 0.297818
| 0.781772
| 0.781772
| 0.781772
| 0.781772
| 0.781772
| 0.626444
| 0
| 0.026115
| 0.037696
| 955
| 16
| 131
| 59.6875
| 0.821545
| 0.079581
| 0
| 0
| 0
| 0
| 0.748005
| 0.748005
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
72b0006a6cd16e8b21d5629194d054718f912255
| 564
|
py
|
Python
|
test/integration/test_pch.py
|
thomasrockhu/bfg9000
|
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
|
[
"BSD-3-Clause"
] | 72
|
2015-06-23T02:35:13.000Z
|
2021-12-08T01:47:40.000Z
|
test/integration/test_pch.py
|
thomasrockhu/bfg9000
|
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
|
[
"BSD-3-Clause"
] | 139
|
2015-03-01T18:48:17.000Z
|
2021-06-18T15:45:14.000Z
|
test/integration/test_pch.py
|
thomasrockhu/bfg9000
|
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
|
[
"BSD-3-Clause"
] | 19
|
2015-12-23T21:24:33.000Z
|
2022-01-06T04:04:41.000Z
|
from . import *
class TestPch(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__('pch', *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from pch!\n')
class TestPchNoSource(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__('pch_no_source', *args, **kwargs)
def test_build(self):
self.build(executable('program'))
self.assertOutput([executable('program')], 'hello from pch!\n')
| 28.2
| 71
| 0.641844
| 62
| 564
| 5.516129
| 0.354839
| 0.116959
| 0.128655
| 0.152047
| 0.853801
| 0.853801
| 0.853801
| 0.853801
| 0.853801
| 0.573099
| 0
| 0
| 0.187943
| 564
| 19
| 72
| 29.684211
| 0.746725
| 0
| 0
| 0.615385
| 0
| 0
| 0.138298
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.307692
| false
| 0
| 0.076923
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
f408168e6c3f8fec2316ff2f37d1d9ea68084332
| 18,866
|
py
|
Python
|
pmpt/mobility.py
|
pengyuan/markov2tensor
|
4bcdcba6273dc7b671d81953da934188135dbca3
|
[
"MIT"
] | 1
|
2018-03-20T08:28:25.000Z
|
2018-03-20T08:28:25.000Z
|
pmpt/mobility.py
|
pengyuan/markov2tensor
|
4bcdcba6273dc7b671d81953da934188135dbca3
|
[
"MIT"
] | null | null | null |
pmpt/mobility.py
|
pengyuan/markov2tensor
|
4bcdcba6273dc7b671d81953da934188135dbca3
|
[
"MIT"
] | 2
|
2015-12-16T07:21:15.000Z
|
2018-03-20T08:28:27.000Z
|
#!/usr/bin/env python
# coding: UTF-8
from __future__ import division
from pmpt.util import *
from preprocess import settings
__author__ = 'Peng Yuan <pengyuan.org@gmail.com>'
__copyright__ = 'Copyright (c) 2014 Peng Yuan'
__license__ = 'Public domain'
# 一个用户必须访问10个poi,一个poi必须被10个用户访问,大部分情况poi数目>>用户数目,后者更重要
def init_data(time_slice, train, region, cluster_radius, filter_count):
conn = MySQLdb.connect(host=settings.HOST, user=settings.USER, passwd=settings.PASSWORD, db=settings.DB)
cursor = conn.cursor()
result = 0
try:
sql = "select user_id from staypoint where mean_coordinate_latitude between "+str(region[0])+" and "+str(region[1])+" and mean_coordinate_longtitude between "+str(region[2])+" and "+str(region[3])+" group by user_id having count(*) > "+str(filter_count)
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
user_available = []
for item in result:
user_available.append(int(item[0]))
if len(user_available) == 0:
raise "没有足够数据"
try:
sql = "select user_id, poi_name,arrival_timestamp, mean_coordinate_latitude, mean_coordinate_longtitude, poi_distance from staypoint where user_id in "+tuple(user_available).__str__()+" and mean_coordinate_latitude between "+str(region[0])+" and "+str(region[1])+" and mean_coordinate_longtitude between "+str(region[2])+" and "+str(region[3])+" order by id"
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
temp_data = []
for item in result:
temp_data.append((item[0], item[1], item[2], item[3], item[4], item[5]))
cursor.close()
conn.close()
return temp_data, time_slice, train, cluster_radius
def preprocess(temp_data_a, time_slice, train, cluster_radius, order, filter_poi=False, return_poi_num=False):
temp_data = temp_data_a
if filter_poi:
temp_data = []
filter_data = {}
poi_set = set()
poi_filter_set = set()
for item in temp_data_a:
if filter_data.has_key(item[0]):
filter_data[item[0]].add(item[1])
else:
filter_data[item[0]] = set()
poi_set.add(item[1])
# print "poi_set: ", poi_set
for poi in poi_set:
count = 0
for key in filter_data.keys():
if poi in filter_data[key]:
count += 1
# print "count: ", count
if count > len(filter_data.keys()) * 0.2:
poi_filter_set.add(poi)
for t_data in temp_data_a:
if t_data[1] in poi_filter_set:
temp_data.append((t_data[0], t_data[1], t_data[2], t_data[3], t_data[4], t_data[5]))
length = int(len(temp_data) * train)
recommends = {}
time_slot = range(0, time_slice)
poi_set = set()
user_set = set()
for item in temp_data:
poi_set.add(item[1])
user_set.add(item[0])
poi_num = len(poi_set)
user_num = len(user_set)
print "poi数目:", poi_num
print "用户数目:", user_num
pois_axis = {}
axis_pois = {}
index = 0
for item in poi_set:
pois_axis[item] = index
axis_pois[index] = item
index += 1
users_axis = {}
axis_users = {}
index = 0
for item in user_set:
users_axis[item] = index
axis_users[index] = item
index += 1
full_data = []
for item in temp_data:
time = int(item[2] % 86400 // (3600 * (24 // time_slice)))
full_data.append((users_axis[item[0]], pois_axis[item[1]], time, item[3], item[4], item[5]))
train_data = full_data[:length]
test_data = full_data[length:]
poi_lat_lon = {}
for item in full_data:
if item[1] in poi_lat_lon:
if poi_lat_lon[item[1]][2] > item[5]:
poi_lat_lon[item[1]] = (item[3], item[4], item[5]) # (latitude, longtitude, poi_distance)
else:
poi_lat_lon[item[1]] = (item[3], item[4], item[5])
poi_adjacent_list = {}
for poi in range(poi_num):
poi_adjacent_list[poi] = set()
for key_1 in poi_lat_lon.keys():
for key_2 in poi_lat_lon.keys():
poi_1 = poi_lat_lon[key_1]
poi_2 = poi_lat_lon[key_2]
dis = calculate_distance(poi_1[0], poi_1[1], poi_2[0], poi_2[1])
if dis <= cluster_radius:
poi_adjacent_list[key_1].add(key_2)
poi_adjacent_list[key_2].add(key_1)
train_structure_data = {}
test_structure_data = {}
know_poi_set = {}
unknow_poi_set = {}
for user in range(user_num):
train_structure_data[user] = {}
test_structure_data[user] = {}
know_poi_set[user] = {}
unknow_poi_set[user] = {}
recommends[user] = {}
for time in time_slot:
train_structure_data[user][time] = []
test_structure_data[user][time] = []
know_poi_set[user][time] = set()
unknow_poi_set[user][time] = set()
recommends[user][time] = []
for item in train_data:
train_structure_data[item[0]][item[2]].append(item[1])
know_poi_set[item[0]][item[2]].add(item[1])
for item in test_data:
test_structure_data[item[0]][item[2]].append(item[1])
if item[1] not in know_poi_set[item[0]][item[2]]:
unknow_poi_set[item[0]][item[2]].add(item[1])
for user in range(user_num):
for time in time_slot:
data = test_structure_data[user][time]
data_length = len(data)
if data_length == 0:
recommends[user][time] = None
else:
for index in range(data_length):
if data[index] not in unknow_poi_set[user][time]:
continue
else:
if order == 3:
if index == 0:
if len(train_structure_data[user][time]) < 2:
continue
past = train_structure_data[user][time][-2]
now = train_structure_data[user][time][-1]
future = data[0]
elif index == 1:
if len(train_structure_data[user][time]) < 1:
continue
past = train_structure_data[user][time][-1]
now = data[0]
future = data[1]
else:
past = data[index-2]
now = data[index-1]
future = data[index]
recommends[user][time].append((past, now, future))
else:
if index == 0:
if len(train_structure_data[user][time]) < 1:
continue
now = train_structure_data[user][time][-1]
future = data[0]
else:
now = data[index-1]
future = data[index]
recommends[user][time].append((now, future))
if return_poi_num:
return axis_pois, axis_users, train_structure_data, poi_adjacent_list, recommends, unknow_poi_set, poi_num
return axis_pois, axis_users, train_structure_data, poi_adjacent_list, recommends, unknow_poi_set
def preprocess2(temp_data_a, time_slice, train, cluster_radius, order, filter_poi=False, return_poi_num=False):
temp_data = temp_data_a
if filter_poi:
temp_data = []
filter_data = {}
poi_set = set()
poi_filter_set = set()
for item in temp_data_a:
if filter_data.has_key(item[0]):
filter_data[item[0]].add(item[1])
else:
filter_data[item[0]] = set()
poi_set.add(item[1])
# print "poi_set: ", poi_set
for poi in poi_set:
count = 0
for key in filter_data.keys():
if poi in filter_data[key]:
count += 1
# print "count: ", count
if count > len(filter_data.keys()) * 0.2:
poi_filter_set.add(poi)
for t_data in temp_data_a:
if t_data[1] in poi_filter_set:
temp_data.append((t_data[0], t_data[1], t_data[2], t_data[3], t_data[4], t_data[5]))
length = int(len(temp_data) * train)
recommends = {}
for step in range(1, 6):
recommends[step] = {}
time_slot = range(0, time_slice)
poi_set = set()
user_set = set()
for item in temp_data:
poi_set.add(item[1])
user_set.add(item[0])
poi_num = len(poi_set)
user_num = len(user_set)
print "poi数目:", poi_num
print "用户数目:", user_num
pois_axis = {}
axis_pois = {}
index = 0
for item in poi_set:
pois_axis[item] = index
axis_pois[index] = item
index += 1
users_axis = {}
axis_users = {}
index = 0
for item in user_set:
users_axis[item] = index
axis_users[index] = item
index += 1
full_data = []
for item in temp_data:
time = int(item[2] % 86400 // (3600 * (24 // time_slice)))
full_data.append((users_axis[item[0]], pois_axis[item[1]], time, item[3], item[4], item[5]))
train_data = full_data[:length]
test_data = full_data[length:]
poi_lat_lon = {}
for item in full_data:
if item[1] in poi_lat_lon:
if poi_lat_lon[item[1]][2] > item[5]:
poi_lat_lon[item[1]] = (item[3], item[4], item[5]) # (latitude, longtitude, poi_distance)
else:
poi_lat_lon[item[1]] = (item[3], item[4], item[5])
poi_adjacent_list = {}
for poi in range(poi_num):
poi_adjacent_list[poi] = set()
for key_1 in poi_lat_lon.keys():
for key_2 in poi_lat_lon.keys():
poi_1 = poi_lat_lon[key_1]
poi_2 = poi_lat_lon[key_2]
dis = calculate_distance(poi_1[0], poi_1[1], poi_2[0], poi_2[1])
if dis <= cluster_radius:
poi_adjacent_list[key_1].add(key_2)
poi_adjacent_list[key_2].add(key_1)
train_structure_data = {}
test_structure_data = {}
know_poi_set = {}
unknow_poi_set = {}
for user in range(user_num):
train_structure_data[user] = {}
test_structure_data[user] = {}
know_poi_set[user] = {}
unknow_poi_set[user] = {}
for step in range(1, 6):
recommends[step][user] = {}
for time in time_slot:
train_structure_data[user][time] = []
test_structure_data[user][time] = []
know_poi_set[user][time] = set()
unknow_poi_set[user][time] = set()
for step in range(1, 6):
recommends[step][user][time] = []
for item in train_data:
train_structure_data[item[0]][item[2]].append(item[1])
know_poi_set[item[0]][item[2]].add(item[1])
for item in test_data:
test_structure_data[item[0]][item[2]].append(item[1])
if item[1] not in know_poi_set[item[0]][item[2]]:
unknow_poi_set[item[0]][item[2]].add(item[1])
for user in range(user_num):
for time in time_slot:
data = test_structure_data[user][time]
data_length = len(data)
if data_length == 0:
for step in range(1, 6):
recommends[step][user][time] = None
else:
for index in range(data_length-4):
if data[index] not in unknow_poi_set[user][time]:
continue
else:
if index == 0:
if len(train_structure_data[user][time]) < 1:
continue
now = train_structure_data[user][time][-1]
future = data[0]
else:
now = data[index-1]
future = data[index]
recommends[1][user][time].append((now, future))
if data[index+1] not in unknow_poi_set[user][time]:
continue
else:
if index+1 == 1:
if len(train_structure_data[user][time]) < 1:
continue
now = train_structure_data[user][time][-1]
future = data[1]
else:
now = data[index-1]
future = data[index+1]
recommends[2][user][time].append((now, future))
if data[index+2] not in unknow_poi_set[user][time]:
continue
else:
if index+2 == 2:
if len(train_structure_data[user][time]) < 1:
continue
now = train_structure_data[user][time][-1]
future = data[2]
else:
now = data[index-1]
future = data[index+2]
recommends[3][user][time].append((now, future))
if data[index+3] not in unknow_poi_set[user][time]:
continue
else:
if index+3 == 3:
if len(train_structure_data[user][time]) < 1:
continue
now = train_structure_data[user][time][-1]
future = data[3]
else:
now = data[index-1]
future = data[index+3]
recommends[4][user][time].append((now, future))
if data[index+1] not in unknow_poi_set[user][time]:
continue
else:
if index+4 == 4:
if len(train_structure_data[user][time]) < 1:
continue
now = train_structure_data[user][time][-1]
future = data[4]
else:
now = data[index-1]
future = data[index+4]
recommends[5][user][time].append((now, future))
if return_poi_num:
return axis_pois, axis_users, train_structure_data, poi_adjacent_list, recommends, unknow_poi_set, poi_num
return axis_pois, axis_users, train_structure_data, poi_adjacent_list, recommends, unknow_poi_set
def trans(train_structure_data, poi_adjacent_list, order, poi_num, user_num, time_slice):
time_slot = range(0, time_slice)
if order == 2:
tensor = [[[[0 for i in range(poi_num)] for j in range(poi_num)] for k in range(time_slice)] for l in range(user_num)]
else:
tensor = [[[[[0 for i in range(poi_num)] for j in range(poi_num)] for k in range(poi_num)] for l in range(time_slice)] for m in range(user_num)]
for key in train_structure_data.keys():
for time in time_slot:
data = train_structure_data[key][time]
if order == 3:
if len(data) < 3:
continue
else:
for i in range(poi_num):
for j in range(poi_num):
for k in range(poi_num):
count_3 = 0
count_2 = 0
for index in range(len(data)-2):
past = data[index]
now = data[index+1]
future = data[index+2]
if i in poi_adjacent_list[past] and j in poi_adjacent_list[now] and k in poi_adjacent_list[future]:
count_3 += 1
if i in poi_adjacent_list[past] and j in poi_adjacent_list[now]:
count_2 += 1
if count_2 > 0:
tensor[key][time][i][j][k] = count_3 / count_2
else:
tensor[key][time][i][j][k] = 0
if order == 2:
if len(data) < 2:
continue
else:
for i in range(poi_num):
for j in range(poi_num):
count_2 = 0
count_1 = 0
for index in range(len(data)-1):
now = data[index]
future = data[index+1]
if i in poi_adjacent_list[now] and j in poi_adjacent_list[future]:
count_2 += 1
if i in poi_adjacent_list[now]:
count_1 += 1
if count_1 > 0:
tensor[key][time][i][j] = count_2 / count_1
# print tensor[key][time][i][j]
else:
tensor[key][time][i][j] = 0
# flag = True
# for index in range(poi_num):
# if tensor[key][time][i][index] != 0:
# flag = False
# break
#
# if flag:
# for index in range(poi_num):
# tensor[key][time][i][index] = 1 / poi_num
# print tensor[key][time]
# print is_stochastic(numpy.array(tensor[key][time]))
return tensor
def is_stochastic(matrix):
matrix = numpy.ndarray.tolist(matrix)
shape = numpy.array(matrix).shape
# print "shape: ", shape
for i in range(shape[0]):
sum = 0
for j in range(shape[1]):
sum += matrix[i][j]
if sum != 1:
return False
return True
if __name__ == '__main__':
print "here"
| 38.73922
| 366
| 0.488498
| 2,293
| 18,866
| 3.781945
| 0.07283
| 0.03321
| 0.066421
| 0.055696
| 0.828067
| 0.807657
| 0.775138
| 0.761877
| 0.726822
| 0.723363
| 0
| 0.027872
| 0.404749
| 18,866
| 487
| 367
| 38.73922
| 0.744346
| 0.031803
| 0
| 0.767726
| 0
| 0
| 0.028117
| 0.011126
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.002445
| 0.007335
| null | null | 0.017115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f45a1620c83cdce11e45b3f08f960a4bbf800d10
| 55,268
|
py
|
Python
|
tests/s3tests/functional/test_headers.py
|
baotiao/zeppelin-gateway
|
bff8d8e160422322e306dfc1dc1768b29001a8c0
|
[
"Apache-2.0"
] | 20
|
2017-05-04T00:49:55.000Z
|
2022-03-27T10:06:02.000Z
|
tests/s3tests/functional/test_headers.py
|
baotiao/zeppelin-gateway
|
bff8d8e160422322e306dfc1dc1768b29001a8c0
|
[
"Apache-2.0"
] | null | null | null |
tests/s3tests/functional/test_headers.py
|
baotiao/zeppelin-gateway
|
bff8d8e160422322e306dfc1dc1768b29001a8c0
|
[
"Apache-2.0"
] | 16
|
2017-04-11T08:10:04.000Z
|
2020-06-16T02:49:48.000Z
|
from cStringIO import StringIO
import boto.connection
import boto.exception
import boto.s3.connection
import boto.s3.acl
import boto.utils
import bunch
import nose
import operator
import random
import string
import socket
import ssl
import os
import re
from urlparse import urlparse
from boto.s3.connection import S3Connection
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from .utils import assert_raises
import AnonymousAuth
from email.header import decode_header
from . import (
_make_raw_request,
nuke_prefixed_buckets,
get_new_bucket,
s3,
config,
get_prefix,
TargetConnection,
targets,
)
_orig_conn = {}
_orig_authorize = None
_custom_headers = {}
_remove_headers = []
boto_type = None
# HeaderS3Connection and _our_authorize are necessary to be able to arbitrarily
# overwrite headers. Depending on the version of boto, one or the other is
# necessary. We later determine in setup what needs to be used.
def _update_headers(headers):
""" update a set of headers with additions/removals
"""
global _custom_headers, _remove_headers
headers.update(_custom_headers)
for header in _remove_headers:
try:
del headers[header]
except KeyError:
pass
# Note: We need to update the headers twice. The first time so the
# authentication signing is done correctly. The second time to overwrite any
# headers modified or created in the authentication step.
class HeaderS3Connection(S3Connection):
""" establish an authenticated connection w/customized headers
"""
def fill_in_auth(self, http_request, **kwargs):
_update_headers(http_request.headers)
S3Connection.fill_in_auth(self, http_request, **kwargs)
_update_headers(http_request.headers)
return http_request
def _our_authorize(self, connection, **kwargs):
""" perform an authentication w/customized headers
"""
_update_headers(self.headers)
_orig_authorize(self, connection, **kwargs)
_update_headers(self.headers)
def setup():
global boto_type
# we determine what we need to replace by the existence of particular
# attributes. boto 2.0rc1 as fill_in_auth for S3Connection, while boto 2.0
# has authorize for HTTPRequest.
if hasattr(S3Connection, 'fill_in_auth'):
global _orig_conn
boto_type = 'S3Connection'
for conn in s3:
_orig_conn[conn] = s3[conn]
header_conn = HeaderS3Connection(
aws_access_key_id=s3[conn].aws_access_key_id,
aws_secret_access_key=s3[conn].aws_secret_access_key,
is_secure=s3[conn].is_secure,
port=s3[conn].port,
host=s3[conn].host,
calling_format=s3[conn].calling_format
)
s3[conn] = header_conn
elif hasattr(boto.connection.HTTPRequest, 'authorize'):
global _orig_authorize
boto_type = 'HTTPRequest'
_orig_authorize = boto.connection.HTTPRequest.authorize
boto.connection.HTTPRequest.authorize = _our_authorize
else:
raise RuntimeError
def teardown():
global boto_type
# replace original functionality depending on the boto version
if boto_type is 'S3Connection':
global _orig_conn
for conn in s3:
s3[conn] = _orig_conn[conn]
_orig_conn = {}
elif boto_type is 'HTTPRequest':
global _orig_authorize
boto.connection.HTTPRequest.authorize = _orig_authorize
_orig_authorize = None
else:
raise RuntimeError
def _clear_custom_headers():
""" Eliminate any header customizations
"""
global _custom_headers, _remove_headers
_custom_headers = {}
_remove_headers = []
def _add_custom_headers(headers=None, remove=None):
""" Define header customizations (additions, replacements, removals)
"""
global _custom_headers, _remove_headers
if not _custom_headers:
_custom_headers = {}
if headers is not None:
_custom_headers.update(headers)
if remove is not None:
_remove_headers.extend(remove)
def _setup_bad_object(headers=None, remove=None):
""" Create a new bucket, add an object w/header customizations
"""
bucket = get_new_bucket()
_add_custom_headers(headers=headers, remove=remove)
return bucket.new_key('foo')
def tag(*tags):
def wrap(func):
for tag in tags:
setattr(func, tag, True)
return func
return wrap
#
# common tests
#
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_invalid_short():
key = _setup_bad_object({'Content-MD5':'YWJyYWNhZGFicmE='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/mismatched MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_bad():
key = _setup_bad_object({'Content-MD5':'rL0Y20zC+Fzt72VPzMSk2A=='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'BadDigest')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_empty():
key = _setup_bad_object({'Content-MD5': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphics in MD5')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_unreadable():
key = _setup_bad_object({'Content-MD5': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no MD5 header')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_none():
key = _setup_bad_object(remove=('Content-MD5',))
key.set_contents_from_string('bar')
# strangely, amazon doesn't report an error with a non-expect 100 also, our
# error comes back as html, and not xml as I normally expect
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/Expect 200')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_object_create_bad_expect_mismatch():
key = _setup_bad_object({'Expect': 200})
key.set_contents_from_string('bar')
# this is a really long test, and I don't know if it's valid...
# again, accepts this with no troubles
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty expect')
@attr(assertion='succeeds ... should it?')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_expect_empty():
key = _setup_bad_object({'Expect': ''})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no expect')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_expect_none():
key = _setup_bad_object(remove=('Expect',))
key.set_contents_from_string('bar')
# this is a really long test..
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic expect')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_strict_rfc2616')
def test_object_create_bad_expect_unreadable():
key = _setup_bad_object({'Expect': '\x07'})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty content length')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_object_create_bad_contentlength_empty():
key = _setup_bad_object({'Content-Length': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, None)
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/negative content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_negative():
key = _setup_bad_object({'Content-Length': -1})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no content length')
@attr(assertion='fails 411')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_none():
key = _setup_bad_object(remove=('Content-Length',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 411)
eq(e.reason, 'Length Required')
eq(e.error_code,'MissingContentLength')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_unreadable():
key = _setup_bad_object({'Content-Length': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, None)
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content length too long')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_object_create_bad_contentlength_mismatch_above():
content = 'bar'
length = len(content) + 1
key = _setup_bad_object({'Content-Length': length})
# Disable retries since key.should_retry will discard the response with
# PleaseRetryException.
def no_retry(response, chunked_transfer): return False
key.should_retry = no_retry
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'RequestTimeout')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content type text/plain')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contenttype_invalid():
key = _setup_bad_object({'Content-Type': 'text/plain'})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty content type')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contenttype_empty():
key = _setup_bad_object({'Content-Type': ''})
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no content type')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contenttype_none():
key = _setup_bad_object(remove=('Content-Type',))
key.set_contents_from_string('bar')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic content type')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_strict_rfc2616')
def test_object_create_bad_contenttype_unreadable():
key = _setup_bad_object({'Content-Type': '\x08'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_strict_rfc2616')
def test_object_create_bad_authorization_unreadable():
key = _setup_bad_object({'Authorization': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_empty():
key = _setup_bad_object({'Authorization': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_none():
key = _setup_bad_object(remove=('Authorization',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no content length')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_contentlength_none():
_add_custom_headers(remove=('Content-Length',))
get_new_bucket()
@tag('auth_common')
@attr(resource='bucket')
@attr(method='acls')
@attr(operation='set w/no content length')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_acl_create_contentlength_none():
bucket = get_new_bucket()
key = bucket.new_key('foo')
key.set_contents_from_string('blah')
_add_custom_headers(remove=('Content-Length',))
key.set_acl('public-read')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='acls')
@attr(operation='set w/invalid permission')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_put_bad_canned_acl():
bucket = get_new_bucket()
_add_custom_headers({'x-amz-acl': 'public-ready'})
e = assert_raises(boto.exception.S3ResponseError, bucket.set_acl, 'public-read')
eq(e.status, 400)
# strangely, amazon doesn't report an error with a non-expect 100 also, our
# error comes back as html, and not xml as I normally expect
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/expect 200')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_bucket_create_bad_expect_mismatch():
_add_custom_headers({'Expect':200})
bucket = get_new_bucket()
# this is a really long test, and I don't know if it's valid...
# again, accepts this with no troubles
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/expect empty')
@attr(assertion='garbage, but S3 succeeds!')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_expect_empty():
_add_custom_headers({'Expect': ''})
bucket = get_new_bucket()
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/expect nongraphic')
@attr(assertion='garbage, but S3 succeeds!')
# this is a really long test..
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_strict_rfc2616')
def test_bucket_create_bad_expect_unreadable():
_add_custom_headers({'Expect': '\x07'})
bucket = get_new_bucket()
def _create_new_connection():
# We're going to need to manually build a connection using bad authorization info.
# But to save the day, lets just hijack the settings from s3.main. :)
main = s3.main
conn = HeaderS3Connection(
aws_access_key_id=main.aws_access_key_id,
aws_secret_access_key=main.aws_secret_access_key,
is_secure=main.is_secure,
port=main.port,
host=main.host,
calling_format=main.calling_format,
)
return TargetConnection(targets.main.default.conf, conn)
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty content length')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
def test_bucket_create_bad_contentlength_empty():
conn = _create_new_connection()
_add_custom_headers({'Content-Length': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, conn)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/negative content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_contentlength_negative():
_add_custom_headers({'Content-Length': -1})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no content length')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_contentlength_none():
_add_custom_headers(remove=('Content-Length',))
bucket = get_new_bucket()
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic content length')
@attr(assertion='fails 400')
@attr('fails_on_mod_proxy_fcgi')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_contentlength_unreadable():
_add_custom_headers({'Content-Length': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, None)
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
@attr('fails_on_rgw')
@attr('fails_strict_rfc2616')
def test_bucket_create_bad_authorization_unreadable():
_add_custom_headers({'Authorization': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_empty():
_add_custom_headers({'Authorization': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
# the teardown is really messed up here. check it out
@tag('auth_common')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_none():
_add_custom_headers(remove=('Authorization',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
#
# AWS2 specific tests
#
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_invalid_garbage_aws2():
check_aws2_support()
key = _setup_bad_object({'Content-MD5':'AWS HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content length too short')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_mismatch_below_aws2():
check_aws2_support()
content = 'bar'
length = len(content) - 1
key = _setup_bad_object({'Content-Length': length})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'BadDigest')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/incorrect authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_incorrect_aws2():
check_aws2_support()
key = _setup_bad_object({'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId')
@tag('auth_aws2')
@nose.with_setup(teardown=_clear_custom_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
def test_object_create_bad_authorization_invalid_aws2():
check_aws2_support()
key = _setup_bad_object({'Authorization': 'AWS HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidArgument')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_empty_aws2():
check_aws2_support()
key = _setup_bad_object({'User-Agent': ''})
key.set_contents_from_string('bar')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='succeeds')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_unreadable_aws2():
check_aws2_support()
key = _setup_bad_object({'User-Agent': '\x07'})
key.set_contents_from_string('bar')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_none_aws2():
check_aws2_support()
key = _setup_bad_object(remove=('User-Agent',))
key.set_contents_from_string('bar')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_invalid_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_empty_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_unreadable_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_none_aws2():
check_aws2_support()
key = _setup_bad_object(remove=('Date',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_today_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_today_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_epoch_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date after 9999')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_end_aws2():
check_aws2_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 9999 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_invalid_aws2():
check_aws2_support()
_add_custom_headers({'Authorization': 'AWS HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidArgument')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_empty_aws2():
check_aws2_support()
_add_custom_headers({'User-Agent': ''})
bucket = get_new_bucket()
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='succeeds')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_unreadable_aws2():
check_aws2_support()
_add_custom_headers({'User-Agent': '\x07'})
bucket = get_new_bucket()
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_none_aws2():
check_aws2_support()
_add_custom_headers(remove=('User-Agent',))
bucket = get_new_bucket()
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_invalid_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_empty_aws2():
check_aws2_support()
_add_custom_headers({'Date': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_unreadable_aws2():
check_aws2_support()
_add_custom_headers({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_none_aws2():
check_aws2_support()
_add_custom_headers(remove=('Date',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_today_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_after_today_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'RequestTimeTooSkewed')
@tag('auth_aws2')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_epoch_aws2():
check_aws2_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'AccessDenied')
#
# AWS4 specific tests
#
def check_aws4_support():
if 'S3_USE_SIGV4' not in os.environ:
raise SkipTest
def check_aws2_support():
if 'S3_USE_SIGV4' in os.environ:
raise SkipTest
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid MD5')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_md5_invalid_garbage_aws4():
check_aws4_support()
key = _setup_bad_object({'Content-MD5':'AWS4 HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidDigest')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/content length too short')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_contentlength_mismatch_below_aws4():
check_aws4_support()
content = 'bar'
length = len(content) - 1
key = _setup_bad_object({'Content-Length': length})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'XAmzContentSHA256Mismatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/incorrect authorization')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_authorization_incorrect_aws4():
check_aws4_support()
key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=AKIAIGR7ZNNBHC5BKSUB/20150930/us-east-1/s3/aws4_request,SignedHeaders=host;user-agent,Signature=FWeDfwojDSdS2Ztmpfeubhd9isU='})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId')
@tag('auth_aws4')
@nose.with_setup(teardown=_clear_custom_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
def test_object_create_bad_authorization_invalid_aws4():
check_aws4_support()
key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
assert e.error_code in ('AuthorizationHeaderMalformed', 'InvalidArgument')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_empty_aws4():
check_aws4_support()
key = _setup_bad_object({'User-Agent': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_unreadable_aws4():
check_aws4_support()
key = _setup_bad_object({'User-Agent': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_ua_none_aws4():
check_aws4_support()
key = _setup_bad_object(remove=('User-Agent',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_invalid_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Bad Date'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/invalid x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_invalid_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_empty_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': ''})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/empty x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_empty_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': ''})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_unreadable_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/non-graphic x-amz-date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_unreadable_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_none_aws4():
check_aws4_support()
key = _setup_bad_object(remove=('Date',))
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/no x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_none_aws4():
check_aws4_support()
key = _setup_bad_object(remove=('X-Amz-Date',))
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_today_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_before_today_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '20100707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_today_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_after_today_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '20300707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_before_epoch_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_before_epoch_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '19500707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/date after 9999')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_date_after_end_aws4():
check_aws4_support()
key = _setup_bad_object({'Date': 'Tue, 07 Jul 9999 21:53:04 GMT'})
key.set_contents_from_string('bar')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create w/x-amz-date after 9999')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_bad_amz_date_after_end_aws4():
check_aws4_support()
key = _setup_bad_object({'X-Amz-Date': '99990707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(operation='create with missing signed custom header')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_missing_signed_custom_header_aws4():
check_aws4_support()
method='PUT'
expires_in='100000'
bucket = get_new_bucket()
key = bucket.new_key('foo')
body='zoo'
# compute the signature with 'x-amz-foo=bar' in the headers...
request_headers = {'x-amz-foo':'bar'}
url = key.generate_url(expires_in, method=method, headers=request_headers)
o = urlparse(url)
path = o.path + '?' + o.query
# avoid sending 'x-amz-foo=bar' in the headers
request_headers.pop('x-amz-foo')
res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path,
body=body, request_headers=request_headers, secure=s3.main.is_secure)
eq(res.status, 403)
eq(res.reason, 'Forbidden')
@tag('auth_aws4')
@attr(resource='object')
@attr(method='put')
@attr(opearation='create with missing signed header')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_object_create_missing_signed_header_aws4():
check_aws4_support()
method='PUT'
expires_in='100000'
bucket = get_new_bucket()
key = bucket.new_key('foo')
body='zoo'
# compute the signature...
request_headers = {}
url = key.generate_url(expires_in, method=method, headers=request_headers)
o = urlparse(url)
path = o.path + '?' + o.query
# 'X-Amz-Expires' is missing
target = r'&X-Amz-Expires=' + expires_in
path = re.sub(target, '', path)
res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path,
body=body, request_headers=request_headers, secure=s3.main.is_secure)
eq(res.status, 403)
eq(res.reason, 'Forbidden')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid authorization')
@attr(assertion='fails 400')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_authorization_invalid_aws4():
check_aws4_support()
_add_custom_headers({'Authorization': 'AWS4 HAHAHA'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 400)
eq(e.reason.lower(), 'bad request') # some proxies vary the case
eq(e.error_code, 'InvalidArgument')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_empty_aws4():
check_aws4_support()
_add_custom_headers({'User-Agent': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic user agent')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_unreadable_aws4():
check_aws4_support()
_add_custom_headers({'User-Agent': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no user agent')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_ua_none_aws4():
check_aws4_support()
_add_custom_headers(remove=('User-Agent',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_invalid_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Bad Date'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/invalid x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_invalid_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': 'Bad Date'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_empty_aws4():
check_aws4_support()
_add_custom_headers({'Date': ''})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/empty x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_empty_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': ''})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_unreadable_aws4():
check_aws4_support()
_add_custom_headers({'Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
eq(e.error_code, 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/non-graphic x-amz-date')
@attr(assertion='fails 403')
@attr('fails_strict_rfc2616')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_unreadable_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '\x07'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no date')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_none_aws4():
check_aws4_support()
_add_custom_headers(remove=('Date',))
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/no x-amz-date')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_none_aws4():
check_aws4_support()
_add_custom_headers(remove=('X-Amz-Date',))
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in past')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_today_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/x-amz-date in past')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_before_today_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '20100707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date in future')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_after_today_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/x-amz-date in future')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_after_today_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '20300707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/date before epoch')
@attr(assertion='succeeds')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_date_before_epoch_aws4():
check_aws4_support()
_add_custom_headers({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
get_new_bucket()
@tag('auth_aws4')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/x-amz-date before epoch')
@attr(assertion='fails 403')
@nose.with_setup(teardown=_clear_custom_headers)
def test_bucket_create_bad_amz_date_before_epoch_aws4():
check_aws4_support()
_add_custom_headers({'X-Amz-Date': '19500707T215304Z'})
e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
eq(e.status, 403)
eq(e.reason, 'Forbidden')
assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
| 31.509692
| 202
| 0.737859
| 7,708
| 55,268
| 4.997795
| 0.049429
| 0.01394
| 0.048594
| 0.056148
| 0.895076
| 0.883862
| 0.861148
| 0.855904
| 0.844171
| 0.825403
| 0
| 0.02526
| 0.118966
| 55,268
| 1,753
| 203
| 31.527667
| 0.76588
| 0.048021
| 0
| 0.786533
| 0
| 0.000716
| 0.198804
| 0.017517
| 0
| 0
| 0
| 0
| 0.137536
| 1
| 0.083811
| false
| 0.000716
| 0.017192
| 0.000716
| 0.105301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f45c8c7ec55723aea0353c0fc362af14b7f93ca9
| 3,167
|
py
|
Python
|
tests/test_cov.py
|
gferraro2019/python-meegkit
|
aed858dc3603a3b71e620df3f29da6ae1a8f68da
|
[
"BSD-3-Clause"
] | 80
|
2018-02-13T13:51:09.000Z
|
2022-03-31T19:35:09.000Z
|
tests/test_cov.py
|
gferraro2019/python-meegkit
|
aed858dc3603a3b71e620df3f29da6ae1a8f68da
|
[
"BSD-3-Clause"
] | 56
|
2019-03-13T14:55:42.000Z
|
2022-01-10T15:40:41.000Z
|
tests/test_cov.py
|
gferraro2019/python-meegkit
|
aed858dc3603a3b71e620df3f29da6ae1a8f68da
|
[
"BSD-3-Clause"
] | 23
|
2018-06-29T07:24:19.000Z
|
2022-03-21T09:25:51.000Z
|
import numpy as np
from numpy.testing import assert_almost_equal
from meegkit.utils import tscov, tsxcov, convmtx
def test_tscov():
"""Test time-shift covariance."""
x = 2 * np.eye(3) + 0.1 * np.random.rand(3)
x = x - np.mean(x, 0)
# Compare 0-lag case with numpy.cov()
c1, n1 = tscov(x, [0])
c2 = np.cov(x, bias=True)
assert_almost_equal(c1 / n1, c2)
# Compare 0-lag case with numpy.cov()
x = 2 * np.eye(3)
c1, n1 = tscov(x, [0, -1])
assert_almost_equal(c1, np.array([[4, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 4, 0, 0, 4],
[4, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 4, 0, 0, 4]]))
c2, n2 = tsxcov(x, x, [0, -1])
# C3 = nt_tsxcov(x, x, 1:2)
# C4 = nt_cov_lags(x, x, 1:2)
# C3 =
# 0 0 4 0 0 4
# 0 0 0 0 0 0
# 0 0 0 0 0 0
# C4(:,:,1) =
# 0 0 0 0 0 0
# 0 4 0 4 0 0
# 0 0 4 0 4 0
# 0 4 0 4 0 0
# 0 0 4 0 4 0
# 0 0 0 0 0 0
# C4(:,:,2) =
# 0 0 0 0 0 0
# 0 0 0 0 0 0
# 0 0 4 4 0 0
# 0 0 4 4 0 0
# 0 0 0 0 0 0
# 0 0 0 0 0 0
def test_convmtx():
"""Convmtx comparison with matlab."""
h = [1, 2, 3, 2, 1]
X = convmtx(h, 7)
print(X)
np.testing.assert_array_equal(
X,
np.array([[1., 2., 3., 2., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 2., 3., 2., 1., 0., 0., 0., 0., 0.],
[0., 0., 1., 2., 3., 2., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 2., 3., 2., 1., 0., 0., 0.],
[0., 0., 0., 0., 1., 2., 3., 2., 1., 0., 0.],
[0., 0., 0., 0., 0., 1., 2., 3., 2., 1., 0.],
[0., 0., 0., 0., 0., 0., 1., 2., 3., 2., 1.],
])
)
print()
X = convmtx(np.array(h)[None, :], 7)
print(X)
np.testing.assert_equal(
X,
np.array([[1., 0., 0., 0., 0., 0., 0.],
[2., 1., 0., 0., 0., 0., 0.],
[3., 2., 1., 0., 0., 0., 0.],
[2., 3., 2., 1., 0., 0., 0.],
[1., 2., 3., 2., 1., 0., 0.],
[0., 1., 2., 3., 2., 1., 0.],
[0., 0., 1., 2., 3., 2., 1.],
[0., 0., 0., 1., 2., 3., 2.],
[0., 0., 0., 0., 1., 2., 3.],
[0., 0., 0., 0., 0., 1., 2.],
[0., 0., 0., 0., 0., 0., 1.],
])
)
if __name__ == '__main__':
# import pytest
# pytest.main([__file__])
test_convmtx()
| 33.691489
| 73
| 0.275655
| 451
| 3,167
| 1.875831
| 0.124169
| 0.352246
| 0.407801
| 0.416076
| 0.535461
| 0.453901
| 0.399527
| 0.277778
| 0.271868
| 0.258865
| 0
| 0.219661
| 0.53426
| 3,167
| 93
| 74
| 34.053763
| 0.353898
| 0.255131
| 0
| 0.113208
| 0
| 0
| 0.003441
| 0
| 0
| 0
| 0
| 0
| 0.09434
| 1
| 0.037736
| false
| 0
| 0.056604
| 0
| 0.09434
| 0.056604
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f479e51364c17b3bc4906d382831d0c01208e226
| 17,410
|
py
|
Python
|
fhir/resources/tests/test_explanationofbenefit.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_explanationofbenefit.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/tests/test_explanationofbenefit.py
|
mmabey/fhir.resources
|
cc73718e9762c04726cd7de240c8f2dd5313cbe1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ExplanationOfBenefit
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import io
import json
import os
import unittest
import pytest
from .. import explanationofbenefit
from ..fhirdate import FHIRDate
from .fixtures import force_bytes
@pytest.mark.usefixtures("base_settings")
class ExplanationOfBenefitTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get("FHIR_UNITTEST_DATADIR") or ""
with io.open(os.path.join(datadir, filename), "r", encoding="utf-8") as handle:
js = json.load(handle)
self.assertEqual("ExplanationOfBenefit", js["resourceType"])
return explanationofbenefit.ExplanationOfBenefit(js)
def testExplanationOfBenefit1(self):
inst = self.instantiate_from("explanationofbenefit-example.json")
self.assertIsNotNone(
inst, "Must have instantiated a ExplanationOfBenefit instance"
)
self.implExplanationOfBenefit1(inst)
js = inst.as_json()
self.assertEqual("ExplanationOfBenefit", js["resourceType"])
inst2 = explanationofbenefit.ExplanationOfBenefit(js)
self.implExplanationOfBenefit1(inst2)
def implExplanationOfBenefit1(self, inst):
self.assertEqual(inst.careTeam[0].sequence, 1)
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(
force_bytes(inst.disposition), force_bytes("Claim settled as per contract.")
)
self.assertEqual(force_bytes(inst.id), force_bytes("EB3500"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://www.BenefitsInc.com/fhir/explanationofbenefit"),
)
self.assertEqual(
force_bytes(inst.identifier[0].value), force_bytes("987654321")
)
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(
force_bytes(inst.item[0].adjudication[0].amount.currency),
force_bytes("USD"),
)
self.assertEqual(inst.item[0].adjudication[0].amount.value, 120.0)
self.assertEqual(
force_bytes(inst.item[0].adjudication[0].category.coding[0].code),
force_bytes("eligible"),
)
self.assertEqual(
force_bytes(inst.item[0].adjudication[1].category.coding[0].code),
force_bytes("eligpercent"),
)
self.assertEqual(inst.item[0].adjudication[1].value, 0.8)
self.assertEqual(
force_bytes(inst.item[0].adjudication[2].amount.currency),
force_bytes("USD"),
)
self.assertEqual(inst.item[0].adjudication[2].amount.value, 96.0)
self.assertEqual(
force_bytes(inst.item[0].adjudication[2].category.coding[0].code),
force_bytes("benefit"),
)
self.assertEqual(inst.item[0].careTeamSequence[0], 1)
self.assertEqual(force_bytes(inst.item[0].net.currency), force_bytes("USD"))
self.assertEqual(inst.item[0].net.value, 135.57)
self.assertEqual(
force_bytes(inst.item[0].productOrService.coding[0].code),
force_bytes("1205"),
)
self.assertEqual(
force_bytes(inst.item[0].productOrService.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/ex-USCLS"),
)
self.assertEqual(inst.item[0].sequence, 1)
self.assertEqual(inst.item[0].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[0].servicedDate.as_json(), "2014-08-16")
self.assertEqual(
force_bytes(inst.item[0].unitPrice.currency), force_bytes("USD")
)
self.assertEqual(inst.item[0].unitPrice.value, 135.57)
self.assertEqual(
force_bytes(inst.item[1].adjudication[0].amount.currency),
force_bytes("USD"),
)
self.assertEqual(inst.item[1].adjudication[0].amount.value, 180.0)
self.assertEqual(
force_bytes(inst.item[1].adjudication[0].category.coding[0].code),
force_bytes("benefit"),
)
self.assertEqual(inst.item[1].careTeamSequence[0], 1)
self.assertEqual(
force_bytes(inst.item[1].detail[0].adjudication[0].amount.currency),
force_bytes("USD"),
)
self.assertEqual(inst.item[1].detail[0].adjudication[0].amount.value, 180.0)
self.assertEqual(
force_bytes(inst.item[1].detail[0].adjudication[0].category.coding[0].code),
force_bytes("benefit"),
)
self.assertEqual(
force_bytes(inst.item[1].detail[0].net.currency), force_bytes("USD")
)
self.assertEqual(inst.item[1].detail[0].net.value, 200.0)
self.assertEqual(
force_bytes(inst.item[1].detail[0].productOrService.coding[0].code),
force_bytes("group"),
)
self.assertEqual(inst.item[1].detail[0].sequence, 1)
self.assertEqual(
force_bytes(
inst.item[1].detail[0].subDetail[0].adjudication[0].amount.currency
),
force_bytes("USD"),
)
self.assertEqual(
inst.item[1].detail[0].subDetail[0].adjudication[0].amount.value, 200.0
)
self.assertEqual(
force_bytes(
inst.item[1]
.detail[0]
.subDetail[0]
.adjudication[0]
.category.coding[0]
.code
),
force_bytes("eligible"),
)
self.assertEqual(
force_bytes(
inst.item[1]
.detail[0]
.subDetail[0]
.adjudication[1]
.category.coding[0]
.code
),
force_bytes("eligpercent"),
)
self.assertEqual(inst.item[1].detail[0].subDetail[0].adjudication[1].value, 0.9)
self.assertEqual(
force_bytes(
inst.item[1].detail[0].subDetail[0].adjudication[2].amount.currency
),
force_bytes("USD"),
)
self.assertEqual(
inst.item[1].detail[0].subDetail[0].adjudication[2].amount.value, 180.0
)
self.assertEqual(
force_bytes(
inst.item[1]
.detail[0]
.subDetail[0]
.adjudication[2]
.category.coding[0]
.code
),
force_bytes("benefit"),
)
self.assertEqual(
force_bytes(inst.item[1].detail[0].subDetail[0].net.currency),
force_bytes("USD"),
)
self.assertEqual(inst.item[1].detail[0].subDetail[0].net.value, 200.0)
self.assertEqual(
force_bytes(
inst.item[1].detail[0].subDetail[0].productOrService.coding[0].code
),
force_bytes("1205"),
)
self.assertEqual(
force_bytes(
inst.item[1].detail[0].subDetail[0].productOrService.coding[0].system
),
force_bytes("http://terminology.hl7.org/CodeSystem/ex-USCLS"),
)
self.assertEqual(inst.item[1].detail[0].subDetail[0].sequence, 1)
self.assertEqual(
force_bytes(inst.item[1].detail[0].subDetail[0].unitPrice.currency),
force_bytes("USD"),
)
self.assertEqual(inst.item[1].detail[0].subDetail[0].unitPrice.value, 200.0)
self.assertEqual(force_bytes(inst.item[1].net.currency), force_bytes("USD"))
self.assertEqual(inst.item[1].net.value, 200.0)
self.assertEqual(
force_bytes(inst.item[1].productOrService.coding[0].code),
force_bytes("group"),
)
self.assertEqual(inst.item[1].sequence, 2)
self.assertEqual(inst.item[1].servicedDate.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.item[1].servicedDate.as_json(), "2014-08-16")
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.outcome), force_bytes("complete"))
self.assertEqual(
force_bytes(inst.payee.type.coding[0].code), force_bytes("provider")
)
self.assertEqual(
force_bytes(inst.payee.type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/payeetype"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(
force_bytes(inst.text.div),
force_bytes(
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable rendering of the ExplanationOfBenefit</div>'
),
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.total[0].amount.currency), force_bytes("USD"))
self.assertEqual(inst.total[0].amount.value, 135.57)
self.assertEqual(
force_bytes(inst.total[0].category.coding[0].code), force_bytes("submitted")
)
self.assertEqual(force_bytes(inst.total[1].amount.currency), force_bytes("USD"))
self.assertEqual(inst.total[1].amount.value, 96.0)
self.assertEqual(
force_bytes(inst.total[1].category.coding[0].code), force_bytes("benefit")
)
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("oral"))
self.assertEqual(
force_bytes(inst.type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/claim-type"),
)
self.assertEqual(force_bytes(inst.use), force_bytes("claim"))
def testExplanationOfBenefit2(self):
inst = self.instantiate_from("explanationofbenefit-example-2.json")
self.assertIsNotNone(
inst, "Must have instantiated a ExplanationOfBenefit instance"
)
self.implExplanationOfBenefit2(inst)
js = inst.as_json()
self.assertEqual("ExplanationOfBenefit", js["resourceType"])
inst2 = explanationofbenefit.ExplanationOfBenefit(js)
self.implExplanationOfBenefit2(inst2)
def implExplanationOfBenefit2(self, inst):
self.assertEqual(inst.accident.date.date, FHIRDate("2014-02-14").date)
self.assertEqual(inst.accident.date.as_json(), "2014-02-14")
self.assertEqual(
force_bytes(inst.accident.type.coding[0].code), force_bytes("SPT")
)
self.assertEqual(
force_bytes(inst.accident.type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActCode"),
)
self.assertEqual(inst.billablePeriod.end.date, FHIRDate("2014-03-01").date)
self.assertEqual(inst.billablePeriod.end.as_json(), "2014-03-01")
self.assertEqual(inst.billablePeriod.start.date, FHIRDate("2014-02-01").date)
self.assertEqual(inst.billablePeriod.start.as_json(), "2014-02-01")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(
force_bytes(inst.disposition), force_bytes("Could not process.")
)
self.assertEqual(force_bytes(inst.formCode.coding[0].code), force_bytes("2"))
self.assertEqual(
force_bytes(inst.formCode.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/forms-codes"),
)
self.assertEqual(force_bytes(inst.id), force_bytes("EB3501"))
self.assertEqual(
force_bytes(inst.identifier[0].system),
force_bytes("http://www.BenefitsInc.com/fhir/explanationofbenefit"),
)
self.assertEqual(force_bytes(inst.identifier[0].value), force_bytes("error-1"))
self.assertTrue(inst.insurance[0].focal)
self.assertEqual(force_bytes(inst.meta.tag[0].code), force_bytes("HTEST"))
self.assertEqual(
force_bytes(inst.meta.tag[0].display), force_bytes("test health data")
)
self.assertEqual(
force_bytes(inst.meta.tag[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/v3-ActReason"),
)
self.assertEqual(force_bytes(inst.outcome), force_bytes("error"))
self.assertEqual(inst.precedence, 2)
self.assertEqual(inst.procedure[0].date.date, FHIRDate("2014-02-14").date)
self.assertEqual(inst.procedure[0].date.as_json(), "2014-02-14")
self.assertEqual(
force_bytes(inst.procedure[0].procedureCodeableConcept.coding[0].code),
force_bytes("123001"),
)
self.assertEqual(
force_bytes(inst.procedure[0].procedureCodeableConcept.coding[0].system),
force_bytes("http://hl7.org/fhir/sid/ex-icd-10-procedures"),
)
self.assertEqual(inst.procedure[0].sequence, 1)
self.assertEqual(
force_bytes(inst.processNote[0].language.coding[0].code),
force_bytes("en-CA"),
)
self.assertEqual(
force_bytes(inst.processNote[0].language.coding[0].system),
force_bytes("urn:ietf:bcp:47"),
)
self.assertEqual(inst.processNote[0].number, 1)
self.assertEqual(
force_bytes(inst.processNote[0].text), force_bytes("Invalid claim")
)
self.assertEqual(force_bytes(inst.processNote[0].type), force_bytes("display"))
self.assertEqual(
force_bytes(inst.related[0].reference.system),
force_bytes("http://www.BenefitsInc.com/case-number"),
)
self.assertEqual(
force_bytes(inst.related[0].reference.value),
force_bytes("23-56Tu-XX-47-20150M14"),
)
self.assertEqual(force_bytes(inst.status), force_bytes("active"))
self.assertEqual(
force_bytes(inst.subType.coding[0].code), force_bytes("emergency")
)
self.assertEqual(
force_bytes(inst.subType.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/ex-claimsubtype"),
)
self.assertEqual(
force_bytes(inst.supportingInfo[0].category.coding[0].code),
force_bytes("employmentimpacted"),
)
self.assertEqual(
force_bytes(inst.supportingInfo[0].category.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/claiminformationcategory"
),
)
self.assertEqual(inst.supportingInfo[0].sequence, 1)
self.assertEqual(
inst.supportingInfo[0].timingPeriod.end.date, FHIRDate("2014-02-28").date
)
self.assertEqual(
inst.supportingInfo[0].timingPeriod.end.as_json(), "2014-02-28"
)
self.assertEqual(
inst.supportingInfo[0].timingPeriod.start.date, FHIRDate("2014-02-14").date
)
self.assertEqual(
inst.supportingInfo[0].timingPeriod.start.as_json(), "2014-02-14"
)
self.assertEqual(
force_bytes(inst.supportingInfo[1].category.coding[0].code),
force_bytes("hospitalized"),
)
self.assertEqual(
force_bytes(inst.supportingInfo[1].category.coding[0].system),
force_bytes(
"http://terminology.hl7.org/CodeSystem/claiminformationcategory"
),
)
self.assertEqual(inst.supportingInfo[1].sequence, 2)
self.assertEqual(
inst.supportingInfo[1].timingPeriod.end.date, FHIRDate("2014-02-16").date
)
self.assertEqual(
inst.supportingInfo[1].timingPeriod.end.as_json(), "2014-02-16"
)
self.assertEqual(
inst.supportingInfo[1].timingPeriod.start.date, FHIRDate("2014-02-14").date
)
self.assertEqual(
inst.supportingInfo[1].timingPeriod.start.as_json(), "2014-02-14"
)
self.assertEqual(force_bytes(inst.text.status), force_bytes("generated"))
self.assertEqual(force_bytes(inst.total[0].amount.currency), force_bytes("USD"))
self.assertEqual(inst.total[0].amount.value, 2478.57)
self.assertEqual(
force_bytes(inst.total[0].category.coding[0].code), force_bytes("submitted")
)
self.assertEqual(force_bytes(inst.total[1].amount.currency), force_bytes("USD"))
self.assertEqual(inst.total[1].amount.value, 0.0)
self.assertEqual(
force_bytes(inst.total[1].category.coding[0].code), force_bytes("benefit")
)
self.assertEqual(force_bytes(inst.type.coding[0].code), force_bytes("oral"))
self.assertEqual(
force_bytes(inst.type.coding[0].system),
force_bytes("http://terminology.hl7.org/CodeSystem/claim-type"),
)
self.assertEqual(force_bytes(inst.use), force_bytes("claim"))
| 42.567237
| 120
| 0.612234
| 1,942
| 17,410
| 5.394439
| 0.11277
| 0.155594
| 0.154639
| 0.193299
| 0.875143
| 0.841447
| 0.808133
| 0.760596
| 0.725372
| 0.687476
| 0
| 0.045167
| 0.247157
| 17,410
| 408
| 121
| 42.671569
| 0.754101
| 0.010569
| 0
| 0.452442
| 0
| 0.002571
| 0.110989
| 0.007957
| 0
| 0
| 0
| 0
| 0.365039
| 1
| 0.012853
| false
| 0
| 0.020566
| 0
| 0.03856
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be44996d6847133a05a19cc956d8d5c441fbfb55
| 17,467
|
py
|
Python
|
ml_insights/calibration.py
|
JustinKurland/introspective
|
3626c5a176c70fb6d09071307949032b5ff4f0e5
|
[
"MIT"
] | 126
|
2016-11-11T22:41:16.000Z
|
2022-02-14T07:42:48.000Z
|
ml_insights/calibration.py
|
JustinKurland/introspective
|
3626c5a176c70fb6d09071307949032b5ff4f0e5
|
[
"MIT"
] | 28
|
2016-10-28T21:43:24.000Z
|
2021-07-27T14:46:04.000Z
|
ml_insights/calibration.py
|
JustinKurland/introspective
|
3626c5a176c70fb6d09071307949032b5ff4f0e5
|
[
"MIT"
] | 66
|
2016-11-12T23:25:22.000Z
|
2021-12-13T19:22:48.000Z
|
"""Calibration of predicted probabilities."""
import numpy as np
import sklearn
import warnings
from sklearn.base import BaseEstimator, ClassifierMixin, clone
try:
from sklearn.model_selection import StratifiedKFold
except:
from sklearn.cross_validation import StratifiedKFold
from .calibration_utils import prob_calibration_function, compact_logit
class SplineCalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration using cubic splines.
With this class, the base_estimator is fit on each of the cross-validation
training set folds in order to generate scores on the (cross-validated)
test set folds. The test set scores are accumulated into a final vector
(the size of the full set) which is used to calibrate the answers.
The model is then fit on the full data set. The predict, and predict_proba
methods are then updated to use the combination of the predictions from the
full model and the calibration function computed as above.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv='prefit', the
classifier must have been fit already on data.
method : 'logistic' or 'ridge'
The default is 'logistic', which is best if you plan to use log-loss as your
performance metric. This method is relatively robust and will typically do
well on brier score as well. The 'ridge' method calibrates using an L2 loss,
and therefore should do better for brier score, but may do considerably worse
on log-loss.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- 'prefit', if you wish to use the data only for calibration
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
uncalibrated_classifier: this gives the uncalibrated version of the classifier, fit on the entire data set
calib_func: this is the calibration function that has been learned from the cross-validation. Applying this function
to the results of the uncalibrated classifier (via model.predict_proba(X_test)[:,1]) gives the fully calibrated classifier
References
----------
"""
def __init__(self, base_estimator=None, method='logistic', cv=5, transform_type='none', cl_eps = .000001, **calib_kwargs):
warn_msg = ('\nThis class is deprecated and will eventually be removed.' +
'\nPlease use the SplineCalib class for calibration.')
warnings.warn(warn_msg, FutureWarning)
self.base_estimator = base_estimator
self.uncalibrated_classifier = None
self.calib_func = None
self.method = method
self.cv = cv
self.cl_eps = cl_eps
self.calib_kwargs = calib_kwargs
self.fit_on_multiclass = False
self.transform_type = transform_type
self.pre_transform = lambda x: x
if type(self.transform_type) == str:
if self.transform_type == 'cl':
self.pre_transform = lambda x: compact_logit(x, eps = self.cl_eps)
if callable(self.transform_type):
self.pre_transform = self.transform_type
def fit(self, X, y, verbose=False):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
if len(np.unique(y)) > 2:
self.fit_on_multiclass = True
return self._fit_multiclass(X, y, verbose=verbose)
self.fit_on_multiclass=False
if ((type(self.cv)==str) and (self.cv=='prefit')):
self.uncalibrated_classifier = self.base_estimator
y_pred = self.uncalibrated_classifier.predict_proba(X)[:,1]
else:
y_pred = np.zeros(len(y))
if sklearn.__version__ < '0.18':
if type(self.cv)==int:
skf = StratifiedKFold(y, n_folds=self.cv,shuffle=True)
else:
skf = self.cv
else:
if type(self.cv)==int:
skf = StratifiedKFold(n_splits=self.cv, shuffle=True).split(X, y)
else:
skf = self.cv.split(X,y)
for idx, (train_idx, test_idx) in enumerate(skf):
if verbose:
print("training fold {} of {}".format(idx+1, self.cv))
X_train = np.array(X)[train_idx,:]
X_test = np.array(X)[test_idx,:]
y_train = np.array(y)[train_idx]
# We could also copy the model first and then fit it
this_estimator = clone(self.base_estimator)
this_estimator.fit(X_train,y_train)
y_pred[test_idx] = this_estimator.predict_proba(X_test)[:,1]
if verbose:
print("Training Full Model")
self.uncalibrated_classifier = clone(self.base_estimator)
self.uncalibrated_classifier.fit(X, y)
# calibrating function
if verbose:
print("Determining Calibration Function")
if self.method=='logistic':
self.calib_func = prob_calibration_function(y, self.pre_transform(y_pred), verbose=verbose, **self.calib_kwargs)
if self.method=='ridge':
self.calib_func = prob_calibration_function(y, self.pre_transform(y_pred), method='ridge', verbose=verbose, **self.calib_kwargs)
# training full model
return self
def _fit_multiclass(self, X, y, verbose=False):
"""Fit the calibrated model in multiclass setting
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
class_list = np.unique(y)
num_classes = len(class_list)
y_mod = np.zeros(len(y))
for i in range(num_classes):
y_mod[y==class_list[i]]=i
y_mod = y_mod.astype(int)
if ((type(self.cv)==str) and (self.cv=='prefit')):
self.uncalibrated_classifier = self.base_estimator
y_pred = self.uncalibrated_classifier.predict_proba(X)
else:
y_pred = np.zeros((len(y_mod),num_classes))
if sklearn.__version__ < '0.18':
skf = StratifiedKFold(y_mod, n_folds=self.cv,shuffle=True)
else:
skf = StratifiedKFold(n_splits=self.cv, shuffle=True).split(X, y)
for idx, (train_idx, test_idx) in enumerate(skf):
if verbose:
print("training fold {} of {}".format(idx+1, self.cv))
X_train = np.array(X)[train_idx,:]
X_test = np.array(X)[test_idx,:]
y_train = np.array(y_mod)[train_idx]
# We could also copy the model first and then fit it
this_estimator = clone(self.base_estimator)
this_estimator.fit(X_train,y_train)
y_pred[test_idx,:] = this_estimator.predict_proba(X_test)
if verbose:
print("Training Full Model")
self.uncalibrated_classifier = clone(self.base_estimator)
self.uncalibrated_classifier.fit(X, y_mod)
# calibrating function
if verbose:
print("Determining Calibration Function")
if self.method=='logistic':
self.calib_func, self.cf_list = prob_calibration_function_multiclass(y_mod, self.pre_transform(y_pred), verbose=verbose, **self.calib_kwargs)
if self.method=='ridge':
self.calib_func, self.cf_list = prob_calibration_function_multiclass(y_mod, self.pre_transform(y_pred), verbose=verbose, method='ridge', **self.calib_kwargs)
# training full model
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
# check_is_fitted(self, ["classes_", "calibrated_classifier"])
if self.fit_on_multiclass:
return self.calib_func(self.pre_transform(self.uncalibrated_classifier.predict_proba(X)))
col_1 = self.calib_func(self.pre_transform(self.uncalibrated_classifier.predict_proba(X)[:,1]))
col_0 = 1-col_1
return np.vstack((col_0,col_1)).T
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
# check_is_fitted(self, ["classes_", "calibrated_classifier"])
return self.uncalibrated_classifier.classes_[np.argmax(self.predict_proba(X), axis=1)]
def classes_(self):
return self.uncalibrated_classifier.classes_
"""Calibration of predicted probabilities."""
import numpy as np
import sklearn
from sklearn.base import BaseEstimator, ClassifierMixin, clone
try:
from sklearn.model_selection import StratifiedKFold
except:
from sklearn.cross_validation import StratifiedKFold
from .calibration_utils import prob_calibration_function_multiclass
class SplineCalibratedClassifierMulticlassCV(BaseEstimator, ClassifierMixin):
"""Probability calibration using cubic splines.
With this class, the base_estimator is fit on each of the cross-validation
training set folds in order to generate scores on the (cross-validated)
test set folds. The test set scores are accumulated into a final vector
(the size of the full set) which is used to calibrate the answers.
The model is then fit on the full data set. The predict, and predict_proba
methods are then updated to use the combination of the predictions from the
full model and the calibration function computed as above.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv='prefit', the
classifier must have been fit already on data.
method : 'logistic' or 'ridge'
The default is 'logistic', which is best if you plan to use log-loss as your
performance metric. This method is relatively robust and will typically do
well on brier score as well. The 'ridge' method calibrates using an L2 loss,
and therefore should do better for brier score, but may do considerably worse
on log-loss.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- 'prefit', if you wish to use the data only for calibration
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
uncalibrated_classifier: this gives the uncalibrated version of the classifier, fit on the entire data set
calib_func: this is the calibration function that has been learned from the cross-validation. Applying this function
to the results of the uncalibrated classifier (via model.predict_proba(X_test)[:,1]) gives the fully calibrated classifier
References
----------
"""
def __init__(self, base_estimator=None, method='logistic', cv=5, **calib_kwargs):
warn_msg = ('\nThis class is deprecated and will eventually be removed.' +
'\nPlease use the SplineCalib class for calibration.')
warnings.warn(warn_msg, FutureWarning)
self.base_estimator = base_estimator
self.uncalibrated_classifier = None
self.calib_func = None
self.method = method
self.cv = cv
self.calib_kwargs = calib_kwargs
def fit(self, X, y, verbose=False):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
class_list = np.unique(y)
num_classes = len(class_list)
y_mod = np.zeros(len(y))
for i in range(num_classes):
y_mod[np.where(y==class_list[i])]=i
y_mod = y_mod.astype(int)
if ((type(self.cv)==str) and (self.cv=='prefit')):
self.uncalibrated_classifier = self.base_estimator
y_pred = self.uncalibrated_classifier.predict_proba(X)[:,1]
else:
y_pred = np.zeros((len(y_mod),num_classes))
if sklearn.__version__ < '0.18':
skf = StratifiedKFold(y_mod, n_folds=self.cv,shuffle=True)
else:
skf = StratifiedKFold(n_splits=self.cv, shuffle=True).split(X, y)
for idx, (train_idx, test_idx) in enumerate(skf):
if verbose:
print("training fold {} of {}".format(idx+1, self.cv))
X_train = np.array(X)[train_idx,:]
X_test = np.array(X)[test_idx,:]
y_train = np.array(y_mod)[train_idx]
# We could also copy the model first and then fit it
this_estimator = clone(self.base_estimator)
this_estimator.fit(X_train,y_train)
y_pred[test_idx,:] = this_estimator.predict_proba(X_test)
if verbose:
print("Training Full Model")
self.uncalibrated_classifier = clone(self.base_estimator)
self.uncalibrated_classifier.fit(X, y_mod)
# calibrating function
if verbose:
print("Determining Calibration Function")
if self.method=='logistic':
self.calib_func = prob_calibration_function_multiclass(y_mod, y_pred, verbose=verbose, **self.calib_kwargs)
if self.method=='ridge':
self.calib_func = prob_calibration_function_multiclass(y_mod, y_pred, verbose=verbose, method='ridge', **self.calib_kwargs)
# training full model
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
# check_is_fitted(self, ["classes_", "calibrated_classifier"])
return self.calib_func(self.uncalibrated_classifier.predict_proba(X))
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
# check_is_fitted(self, ["classes_", "calibrated_classifier"])
return self.uncalibrated_classifier.classes_[np.argmax(self.predict_proba(X), axis=1)]
def classes_(self):
return self.uncalibrated_classifier.classes_
| 39.251685
| 169
| 0.630331
| 2,171
| 17,467
| 4.918931
| 0.122524
| 0.055623
| 0.051128
| 0.014046
| 0.955146
| 0.933327
| 0.929675
| 0.925087
| 0.922277
| 0.913475
| 0
| 0.003105
| 0.28093
| 17,467
| 444
| 170
| 39.34009
| 0.847134
| 0.413007
| 0
| 0.767045
| 0
| 0
| 0.058958
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.073864
| 0.011364
| 0.210227
| 0.051136
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be5a4aa2cd5e25eda2db6ec1ba8674d348ebb095
| 8,502
|
py
|
Python
|
utils/scripts/OOOlevelGen/src/levels/level_8_3.py
|
fullscreennl/bullettime
|
8967449cdf926aaed6bb7ec217d92e0689fb0c3c
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/levels/level_8_3.py
|
fullscreennl/bullettime
|
8967449cdf926aaed6bb7ec217d92e0689fb0c3c
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/levels/level_8_3.py
|
fullscreennl/bullettime
|
8967449cdf926aaed6bb7ec217d92e0689fb0c3c
|
[
"MIT"
] | null | null | null |
import LevelBuilder
from sprites import *
from sprite_templates import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Hero.HeroSprite(x=51, y=260,width=32,height=32))
lb.addObject(Bullet.BulletSprite(x=0, y=0,width=10,height=10,angle='0',restitution=0.5,static='false',friction=0.5,density=3,spawnEvent='onShoot'))
lb.addObject(Enemy.EnemySprite(x=871, y=107,width=208,height=208,angle='0',restitution=0.2,static='false',friction=0.5,density=20 , classname='BlobSprite',firstframe='monsterblob.png').setName('Enemy'))
lb.addObject(Friend.FriendSprite(x=532, y=44,width=89,height=89,angle='0',restitution=0.2,static='false',friction=0.5,density=5, firstframe='boulder.png' ))
lb.addObject(Beam.BeamSprite(x=1505, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1425, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1465, y=133,width=127,height=14,angle='0' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1505, y=204,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1425, y=203,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1257, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1463, y=174,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1963, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1753, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1852, y=133,width=250,height=14,angle='0' ,restitution=0.2,static='false',friction=0.5,density=5, firstframe='bar_long.png' ))
lb.addObject(Beam.BeamSprite(x=1903, y=204,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1823, y=203,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=1655, y=63,width=127,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1861, y=174,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=57,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=25,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=90,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=125,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=160,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=196,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=231,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=2334, y=275,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=58,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=26,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=91,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=126,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=160,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=196,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=232,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Enemy.EnemySprite(x=1091, y=276,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 ))
lb.addObject(Beam.BeamSprite(x=185, y=7,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=237, y=0,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=268, y=72,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=425, y=-7,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=659, y=313,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=2419, y=329,width=80,height=60,angle='30',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
PumpkinBomber.create(lb,0)
lb.addObject(Enemy.EnemySprite(x=408, y=61,width=56,height=56,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(Pickup.PickupSprite(x=286,y=39,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=286,y=69,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=286,y=99,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=286,y=129,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1464,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1464,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1464,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1788,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1788,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1788,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1824,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1824,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1824,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1862,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1862,y=55,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=1862,y=88,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=1907,y=22,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2523,y=24,width=32, height=32, static='false',angle=0))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=63-115-50,y=250,width=100,height=500,zoom_fact=1.0))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=63,y=320-60,width=128,height=100,zoom_fact=0.1666))
lb.addObject(ZoomTrigger.ZoomTriggerSprite(x=63+115+50,y=250,width=100,height=500,zoom_fact=1.0))
lb.addObject(WatchtowerVisual.WatchtowerVisualSprite(x=63, y=92,width=128,height=235-50,angle='0',restitution=0.2,static='true',friction=0.5,density=20,firstframe='watchtower.png' ))
lb.addObject(Enemy.EnemySprite(x=1614, y=11,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=5 , classname='BlobSprite',firstframe='monsterblob.png'))
lb.addObject(BulletTimePickup.BulletTimePickupSprite(x=1043,y=21,width=32, height=32, static='false',angle=0))
lb.addObject(Teleporter.TeleporterSprite(level_id='leveldata/menu'))
lb.render()
| 8,502
| 8,502
| 0.733004
| 1,453
| 8,502
| 4.284928
| 0.112182
| 0.116608
| 0.067459
| 0.11468
| 0.877128
| 0.855124
| 0.844523
| 0.830389
| 0.830389
| 0.830389
| 0
| 0.123608
| 0.059868
| 8,502
| 1
| 8,502
| 8,502
| 0.655323
| 0
| 0
| 0
| 0
| 0
| 0.061625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0
| 0.041096
| 0
| 0.054795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
be652c53497bf42854807177ce0d80bc1b7cec3f
| 40,912
|
py
|
Python
|
tests/data/formatters/HtmlFormatter.py
|
mr-mixas/nested.py
|
23857f248a6411b15961a0e2a169c2f14421ccb7
|
[
"Apache-2.0"
] | null | null | null |
tests/data/formatters/HtmlFormatter.py
|
mr-mixas/nested.py
|
23857f248a6411b15961a0e2a169c2f14421ccb7
|
[
"Apache-2.0"
] | null | null | null |
tests/data/formatters/HtmlFormatter.py
|
mr-mixas/nested.py
|
23857f248a6411b15961a0e2a169c2f14421ccb7
|
[
"Apache-2.0"
] | null | null | null |
"""
Autogenerated, do not edit manually!
"""
import sys
RESULTS = {
'0_vs_0': {
'result': '<div class="dif-body"><div> <span class="dif-vU">0</span></div></div>',
},
'0_vs_0_noU': {
'result': '<div class="dif-body"></div>',
},
'0_vs_1': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">0</span></div><div>+ <span class="dif-vN">1</span></div></div>',
},
'0_vs_empty_string': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">0</span></div><div>+ <span class="dif-vN">''</span></div></div>',
},
'0_vs_undef': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">0</span></div><div>+ <span class="dif-vN">None</span></div></div>',
},
'1.0_vs_1.0_as_string': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">1</span></div><div>+ <span class="dif-vN">'1.0'</span></div></div>',
},
'1_vs_-1': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">1</span></div><div>+ <span class="dif-vN">-1</span></div></div>',
},
'1_vs_1.0': {
'result': '<div class="dif-body"><div> <span class="dif-vU">1</span></div></div>',
},
'1_vs_1_as_string': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">1</span></div><div>+ <span class="dif-vN">'1'</span></div></div>',
},
'a_vs_a': {
'result': '<div class="dif-body"><div> <span class="dif-vU">'a'</span></div></div>',
},
'a_vs_b': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">'a'</span></div><div>+ <span class="dif-vN">'b'</span></div></div>',
},
'absent_yielder': {
'raises': NotImplementedError,
},
'brackets': {
'result': '<div class="dif-body"><div> <span class="dif-kO">{'('}</span></div><div>- <span class="dif-vO">')'</span></div><div>+ <span class="dif-vN">'('</span></div><div> <span class="dif-kO">{'<'}</span></div><div>- <span class="dif-vO">'>'</span></div><div>+ <span class="dif-vN">'<'</span></div><div> <span class="dif-kO">{'['}</span></div><div>- <span class="dif-vO">']'</span></div><div>+ <span class="dif-vN">'['</span></div><div> <span class="dif-kO">{'{'}</span></div><div>- <span class="dif-vO">'}'</span></div><div>+ <span class="dif-vN">'{'</span></div></div>',
},
'comment_is_empty_string': {
'result': '<div class="dif-body"><div># <span class="dif-vC"></span></div><div>- <span class="dif-vO">'old'</span></div><div>+ <span class="dif-vN">'new'</span></div></div>',
},
'comment_vs_type_hint': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1,2 +1,2 @@</span></div><div>- <span class="dif-vR">two</span></div><div>+ <span class="dif-vA">2</span></div><div> <span class="dif-vU">lines</span></div></div>',
},
'comment_with_HTML_tags': {
'result': '<div class="dif-body"><div># <span class="dif-vC"><h1>comment</h1></span></div><div> <span class="dif-vU">'same'</span></div></div>',
},
'comments': {
'result': '<div class="dif-body"><div># <span class="dif-vC">C-D</span></div><div> <span class="dif-kO">{'k'}</span></div><div># <span class="dif-vC">C-NO</span></div><div>- <span class="dif-vO">'v'</span></div><div>+ <span class="dif-vN">'V'</span></div></div>',
},
'deeply_nested_hash_vs_empty_hash': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">{'one'}</span></div><div>- <span class="dif-vR">{'two': {'three': 3}}</span></div></div>',
},
'deeply_nested_hash_vs_empty_hash_trimR': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">{'one'}</span></div><div>- <span class="dif-vR">None</span></div></div>',
},
'deeply_nested_list_vs_empty_list': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">[[0, 1]]</span></div></div>',
},
'deeply_nested_list_vs_empty_list_trimR': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">None</span></div></div>',
},
'deeply_nested_subhash_removed_from_hash': {
'result': '<div class="dif-body"><div> <span class="dif-kU">{'four'}</span></div><div> <span class="dif-vU">4</span></div><div>- <span class="dif-kR">{'one'}</span></div><div>- <span class="dif-vR">{'two': {'three': 3}}</span></div></div>',
},
'deeply_nested_subhash_removed_from_hash_trimR': {
'result': '<div class="dif-body"><div> <span class="dif-kU">{'four'}</span></div><div> <span class="dif-vU">4</span></div><div>- <span class="dif-kR">{'one'}</span></div><div>- <span class="dif-vR">None</span></div></div>',
},
'deeply_nested_sublist_removed_from_list': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">[[0, 1]]</span></div></div>',
},
'deeply_nested_sublist_removed_from_list_trimR': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">None</span></div></div>',
},
'empty_hash_vs_empty_hash': {
'result': '<div class="dif-body"><div> <span class="dif-vU">{}</span></div></div>',
},
'empty_hash_vs_empty_hash_noU': {
'result': '<div class="dif-body"></div>',
},
'empty_hash_vs_empty_list': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">{}</span></div><div>+ <span class="dif-vN">[]</span></div></div>',
},
'empty_hash_vs_hash_with_one_key': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">{'one'}</span></div><div>+ <span class="dif-vA">1</span></div></div>',
},
'empty_hash_vs_hash_with_one_key_noA': {
'result': '<div class="dif-body"></div>',
},
'empty_list_vs_deeply_nested_list': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[0]</span></div><div>+ <span class="dif-vA">[[0, 1]]</span></div></div>',
},
'empty_list_vs_empty_hash': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">[]</span></div><div>+ <span class="dif-vN">{}</span></div></div>',
},
'empty_list_vs_empty_list': {
'result': '<div class="dif-body"><div> <span class="dif-vU">[]</span></div></div>',
},
'empty_list_vs_empty_list_noU': {
'result': '<div class="dif-body"></div>',
},
'empty_list_vs_list_with_one_item': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[0]</span></div><div>+ <span class="dif-vA">0</span></div></div>',
},
'empty_list_vs_list_with_one_item_noA': {
'result': '<div class="dif-body"></div>',
},
'empty_string_vs_0': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">''</span></div><div>+ <span class="dif-vN">0</span></div></div>',
},
'empty_string_vs_text': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1 +1,2 @@</span></div><div>- <span class="dif-vR"></span></div><div>+ <span class="dif-vA">A</span></div><div>+ <span class="dif-vA">B</span></div></div>',
},
'empty_string_vs_undef': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">''</span></div><div>+ <span class="dif-vN">None</span></div></div>',
},
'escaped_symbols': {
'result': '<div class="dif-body"><div> <span class="dif-kO">{'\\n'}</span></div><div>- <span class="dif-vO">'\\r\\n'</span></div><div>+ <span class="dif-vN">'\\n'</span></div></div>',
},
'frozenset_extended': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><frozenset></span></div><div> <span class="dif-vU">1</span></div><div>+ <span class="dif-vA">2</span></div></div>',
},
'frozensets_lcs': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><frozenset></span></div><div>- <span class="dif-vR">1</span></div><div> <span class="dif-vU">2</span></div><div>+ <span class="dif-vA">3</span></div></div>',
},
'hash_with_one_key_vs_empty_hash': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">{'one'}</span></div><div>- <span class="dif-vR">1</span></div></div>',
},
'hash_with_one_key_vs_empty_hash_noR': {
'result': '<div class="dif-body"></div>',
},
'hashes_with_one_different_value_noN': {
'result': '<div class="dif-body"><div> <span class="dif-kO">{'one'}</span></div><div>- <span class="dif-vO">1</span></div></div>',
},
'hashes_with_one_different_value_noO': {
'result': '<div class="dif-body"><div> <span class="dif-kN">{'one'}</span></div><div>+ <span class="dif-vN">2</span></div></div>',
},
'line_added_to_empty_string': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1 +1,2 @@</span></div><div> <span class="dif-vU"></span></div><div>+ <span class="dif-vA"></span></div></div>',
},
'list_with_one_item_vs_empty_list': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">0</span></div></div>',
},
'list_with_one_item_vs_empty_list_noR': {
'result': '<div class="dif-body"></div>',
},
'lists_LCS_added_items': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[0]</span></div><div>+ <span class="dif-vA">0</span></div><div>+ <span class="dif-kA">[1]</span></div><div>+ <span class="dif-vA">1</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div><div> <span class="dif-kU">[3]</span></div><div> <span class="dif-vU">3</span></div><div>+ <span class="dif-kA">[4]</span></div><div>+ <span class="dif-vA">4</span></div><div> <span class="dif-kU">[5]</span></div><div> <span class="dif-vU">5</span></div><div>+ <span class="dif-kA">[6]</span></div><div>+ <span class="dif-vA">6</span></div><div>+ <span class="dif-kA">[7]</span></div><div>+ <span class="dif-vA">7</span></div></div>',
},
'lists_LCS_added_items_noU': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[0]</span></div><div>+ <span class="dif-vA">0</span></div><div>+ <span class="dif-kA">[1]</span></div><div>+ <span class="dif-vA">1</span></div><div>+ <span class="dif-kA">[2]</span></div><div>+ <span class="dif-vA">4</span></div><div>+ <span class="dif-kA">[3]</span></div><div>+ <span class="dif-vA">6</span></div><div>+ <span class="dif-kA">[4]</span></div><div>+ <span class="dif-vA">7</span></div></div>',
},
'lists_LCS_changed_items': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">1</span></div><div> <span class="dif-kO">[2]</span></div><div>- <span class="dif-vO">2</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kO">[3]</span></div><div>- <span class="dif-vO">3</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kU">[4]</span></div><div> <span class="dif-vU">4</span></div><div> <span class="dif-kO">[5]</span></div><div>- <span class="dif-vO">5</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kU">[6]</span></div><div> <span class="dif-vU">6</span></div><div> <span class="dif-kU">[7]</span></div><div> <span class="dif-vU">7</span></div></div>',
},
'lists_LCS_changed_items_noOU': {
'result': '<div class="dif-body"><div> <span class="dif-kN">[2]</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kN">[3]</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kN">[5]</span></div><div>+ <span class="dif-vN">9</span></div></div>',
},
'lists_LCS_changed_items_noU': {
'result': '<div class="dif-body"><div> <span class="dif-kO">[2]</span></div><div>- <span class="dif-vO">2</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kO">[3]</span></div><div>- <span class="dif-vO">3</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kO">[5]</span></div><div>- <span class="dif-vO">5</span></div><div>+ <span class="dif-vN">9</span></div></div>',
},
'lists_LCS_complex': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">'a'</span></div><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">'b'</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">'c'</span></div><div>+ <span class="dif-kA">[3]</span></div><div>+ <span class="dif-vA">'d'</span></div><div> <span class="dif-kU">[4]</span></div><div> <span class="dif-vU">'e'</span></div><div> <span class="dif-kO">[5]</span></div><div>- <span class="dif-vO">'h'</span></div><div>+ <span class="dif-vN">'f'</span></div><div> <span class="dif-kU">[6]</span></div><div> <span class="dif-vU">'j'</span></div><div>+ <span class="dif-kA">[7]</span></div><div>+ <span class="dif-vA">'k'</span></div><div> <span class="dif-kU">[8]</span></div><div> <span class="dif-vU">'l'</span></div><div> <span class="dif-kU">[9]</span></div><div> <span class="dif-vU">'m'</span></div><div> <span class="dif-kO">[10]</span></div><div>- <span class="dif-vO">'n'</span></div><div>+ <span class="dif-vN">'r'</span></div><div> <span class="dif-kO">[11]</span></div><div>- <span class="dif-vO">'p'</span></div><div>+ <span class="dif-vN">'s'</span></div><div>+ <span class="dif-kA">[12]</span></div><div>+ <span class="dif-vA">'t'</span></div></div>',
},
'lists_LCS_complex_noAU': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">'a'</span></div><div> <span class="dif-kO">[4]</span></div><div>- <span class="dif-vO">'h'</span></div><div>+ <span class="dif-vN">'f'</span></div><div> <span class="dif-kO">[8]</span></div><div>- <span class="dif-vO">'n'</span></div><div>+ <span class="dif-vN">'r'</span></div><div> <span class="dif-kO">[9]</span></div><div>- <span class="dif-vO">'p'</span></div><div>+ <span class="dif-vN">'s'</span></div></div>',
},
'lists_LCS_complex_noRU': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[3]</span></div><div>+ <span class="dif-vA">'d'</span></div><div> <span class="dif-kO">[4]</span></div><div>- <span class="dif-vO">'h'</span></div><div>+ <span class="dif-vN">'f'</span></div><div>+ <span class="dif-kA">[6]</span></div><div>+ <span class="dif-vA">'k'</span></div><div> <span class="dif-kO">[8]</span></div><div>- <span class="dif-vO">'n'</span></div><div>+ <span class="dif-vN">'r'</span></div><div> <span class="dif-kO">[9]</span></div><div>- <span class="dif-vO">'p'</span></div><div>+ <span class="dif-vN">'s'</span></div><div>+ <span class="dif-kA">[10]</span></div><div>+ <span class="dif-vA">'t'</span></div></div>',
},
'lists_LCS_complex_noU': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">'a'</span></div><div>+ <span class="dif-kA">[3]</span></div><div>+ <span class="dif-vA">'d'</span></div><div> <span class="dif-kO">[4]</span></div><div>- <span class="dif-vO">'h'</span></div><div>+ <span class="dif-vN">'f'</span></div><div>+ <span class="dif-kA">[6]</span></div><div>+ <span class="dif-vA">'k'</span></div><div> <span class="dif-kO">[8]</span></div><div>- <span class="dif-vO">'n'</span></div><div>+ <span class="dif-vN">'r'</span></div><div> <span class="dif-kO">[9]</span></div><div>- <span class="dif-vO">'p'</span></div><div>+ <span class="dif-vN">'s'</span></div><div>+ <span class="dif-kA">[10]</span></div><div>+ <span class="dif-vA">'t'</span></div></div>',
},
'lists_LCS_complex_onlyU': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">'b'</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">'c'</span></div><div> <span class="dif-kU">[3]</span></div><div> <span class="dif-vU">'e'</span></div><div> <span class="dif-kU">[5]</span></div><div> <span class="dif-vU">'j'</span></div><div> <span class="dif-kU">[6]</span></div><div> <span class="dif-vU">'l'</span></div><div> <span class="dif-kU">[7]</span></div><div> <span class="dif-vU">'m'</span></div></div>',
},
'lists_LCS_removed_items': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">0</span></div><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">1</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div><div> <span class="dif-kU">[3]</span></div><div> <span class="dif-vU">3</span></div><div>- <span class="dif-kR">[4]</span></div><div>- <span class="dif-vR">4</span></div><div> <span class="dif-kU">[5]</span></div><div> <span class="dif-vU">5</span></div><div>- <span class="dif-kR">[6]</span></div><div>- <span class="dif-vR">6</span></div><div>- <span class="dif-kR">[7]</span></div><div>- <span class="dif-vR">7</span></div></div>',
},
'lists_LCS_removed_items_noU': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">0</span></div><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">1</span></div><div>- <span class="dif-kR">[4]</span></div><div>- <span class="dif-vR">4</span></div><div>- <span class="dif-kR">[6]</span></div><div>- <span class="dif-vR">6</span></div><div>- <span class="dif-kR">[7]</span></div><div>- <span class="dif-vR">7</span></div></div>',
},
'lists_with_one_different_item': {
'result': '<div class="dif-body"><div> <span class="dif-kO">[0]</span></div><div>- <span class="dif-vO">0</span></div><div>+ <span class="dif-vN">1</span></div></div>',
},
'lists_with_one_different_item_noN': {
'result': '<div class="dif-body"><div> <span class="dif-kO">[0]</span></div><div>- <span class="dif-vO">0</span></div></div>',
},
'lists_with_one_different_item_noO': {
'result': '<div class="dif-body"><div> <span class="dif-kN">[0]</span></div><div>+ <span class="dif-vN">1</span></div></div>',
},
'mixed_specific_structures': {
'result': '<div class="dif-body"><div> <span class="dif-kO">(0)</span></div><div>- <span class="dif-vO">()</span></div><div>+ <span class="dif-vN">frozenset()</span></div><div> <span class="dif-kD">(1)</span></div><div># <span class="dif-vE"><set></span></div><div>+ <span class="dif-vA">True</span></div></div>',
},
'nested_hashes': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">{'four'}</span></div><div>+ <span class="dif-vA">4</span></div><div> <span class="dif-kU">{'one'}</span></div><div> <span class="dif-vU">1</span></div><div>- <span class="dif-kR">{'three'}</span></div><div>- <span class="dif-vR">3</span></div><div> <span class="dif-kD">{'two'}</span></div><div> <span class="dif-kO">{'nine'}</span></div><div>- <span class="dif-vO">9</span></div><div>+ <span class="dif-vN">8</span></div><div> <span class="dif-kU">{'ten'}</span></div><div> <span class="dif-vU">10</span></div></div>',
},
'nested_hashes_noU': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">{'four'}</span></div><div>+ <span class="dif-vA">4</span></div><div>- <span class="dif-kR">{'three'}</span></div><div>- <span class="dif-vR">3</span></div><div> <span class="dif-kD">{'two'}</span></div><div> <span class="dif-kO">{'nine'}</span></div><div>- <span class="dif-vO">9</span></div><div>+ <span class="dif-vN">8</span></div></div>',
},
'nested_hashes_with_one_different_value': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div> <span class="dif-kD">{'two'}</span></div><div> <span class="dif-kO">{'three'}</span></div><div>- <span class="dif-vO">3</span></div><div>+ <span class="dif-vN">4</span></div></div>',
},
'nested_hashes_with_one_equal_value': {
'result': '<div class="dif-body"><div> <span class="dif-vU">{'one': {'two': {'three': 3}}}</span></div></div>',
},
'nested_hashes_with_one_equal_value_noU': {
'result': '<div class="dif-body"></div>',
},
'nested_lists': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">[[100]]</span></div><div> <span class="dif-kD">[2]</span></div><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">20</span></div><div> <span class="dif-kO">[1]</span></div><div>- <span class="dif-vO">'30'</span></div><div>+ <span class="dif-vN">'31'</span></div><div> <span class="dif-kO">[3]</span></div><div>- <span class="dif-vO">4</span></div><div>+ <span class="dif-vN">5</span></div></div>',
},
'nested_lists_noU': {
'result': '<div class="dif-body"><div> <span class="dif-kD">[2]</span></div><div> <span class="dif-kO">[1]</span></div><div>- <span class="dif-vO">'30'</span></div><div>+ <span class="dif-vN">'31'</span></div><div> <span class="dif-kO">[3]</span></div><div>- <span class="dif-vO">4</span></div><div>+ <span class="dif-vN">5</span></div></div>',
},
'nested_lists_with_one_different_item': {
'result': '<div class="dif-body"><div> <span class="dif-kD">[0]</span></div><div> <span class="dif-kO">[0]</span></div><div>- <span class="dif-vO">0</span></div><div>+ <span class="dif-vN">1</span></div></div>',
},
'nested_lists_with_one_equal_item': {
'result': '<div class="dif-body"><div> <span class="dif-vU">[[0]]</span></div></div>',
},
'nested_lists_with_one_equal_item_noU': {
'result': '<div class="dif-body"></div>',
},
'nested_mixed_structures': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div> <span class="dif-kD">[0]</span></div><div> <span class="dif-kD">{'two'}</span></div><div> <span class="dif-kD">{'three'}</span></div><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">7</span></div><div> <span class="dif-kO">[1]</span></div><div>- <span class="dif-vO">4</span></div><div>+ <span class="dif-vN">3</span></div><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">8</span></div></div>',
},
'nested_mixed_structures_noOU': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div> <span class="dif-kD">[0]</span></div><div> <span class="dif-kD">{'two'}</span></div><div> <span class="dif-kD">{'three'}</span></div><div> <span class="dif-kN">[1]</span></div><div>+ <span class="dif-vN">3</span></div></div>',
},
'one_item_changed_in_the_middle_of_list': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kO">[1]</span></div><div>- <span class="dif-vO">1</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_changed_in_the_middle_of_list_noN': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kO">[1]</span></div><div>- <span class="dif-vO">1</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_changed_in_the_middle_of_list_noNO': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_changed_in_the_middle_of_list_noO': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kN">[1]</span></div><div>+ <span class="dif-vN">9</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_changed_in_the_middle_of_list_noU': {
'result': '<div class="dif-body"><div> <span class="dif-kO">[1]</span></div><div>- <span class="dif-vO">1</span></div><div>+ <span class="dif-vN">9</span></div></div>',
},
'one_item_inserted_in_the_middle_of_list': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div>+ <span class="dif-kA">[1]</span></div><div>+ <span class="dif-vA">1</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_inserted_in_the_middle_of_list_noA': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_inserted_in_the_middle_of_list_noU': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[1]</span></div><div>+ <span class="dif-vA">1</span></div></div>',
},
'one_item_popped_from_list': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">1</span></div></div>',
},
'one_item_popped_from_list_noU': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">1</span></div></div>',
},
'one_item_pushed_to_list': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div>+ <span class="dif-kA">[1]</span></div><div>+ <span class="dif-vA">1</span></div></div>',
},
'one_item_pushed_to_list_noU': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[1]</span></div><div>+ <span class="dif-vA">1</span></div></div>',
},
'one_item_removed_from_the_middle_of_list': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">1</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_removed_from_the_middle_of_list_noR': {
'result': '<div class="dif-body"><div> <span class="dif-kU">[0]</span></div><div> <span class="dif-vU">0</span></div><div> <span class="dif-kU">[2]</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_item_removed_from_the_middle_of_list_noU': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[1]</span></div><div>- <span class="dif-vR">1</span></div></div>',
},
'one_item_shifted_from_list': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">0</span></div><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">1</span></div></div>',
},
'one_item_shifted_from_list_noU': {
'result': '<div class="dif-body"><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">0</span></div></div>',
},
'one_item_unshifted_to_list': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[0]</span></div><div>+ <span class="dif-vA">0</span></div><div> <span class="dif-kU">[1]</span></div><div> <span class="dif-vU">1</span></div></div>',
},
'one_item_unshifted_to_list_noU': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">[0]</span></div><div>+ <span class="dif-vA">0</span></div></div>',
},
'one_key_added_to_subhash': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div>+ <span class="dif-kA">{'three'}</span></div><div>+ <span class="dif-vA">3</span></div><div> <span class="dif-kU">{'two'}</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_key_added_to_subhash_noU': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div>+ <span class="dif-kA">{'three'}</span></div><div>+ <span class="dif-vA">3</span></div></div>',
},
'one_key_removed_from_subhash': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div>- <span class="dif-kR">{'three'}</span></div><div>- <span class="dif-vR">3</span></div><div> <span class="dif-kU">{'two'}</span></div><div> <span class="dif-vU">2</span></div></div>',
},
'one_key_removed_from_subhash_noU': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div>- <span class="dif-kR">{'three'}</span></div><div>- <span class="dif-vR">3</span></div></div>',
},
'quote_symbols': {
'result': '<div class="dif-body"><div> <span class="dif-kO">{'"double"'}</span></div><div>- <span class="dif-vO">'""'</span></div><div>+ <span class="dif-vN">'"'</span></div><div> <span class="dif-kO">{"'single'"}</span></div><div>- <span class="dif-vO">"''"</span></div><div>+ <span class="dif-vN">"'"</span></div><div> <span class="dif-kO">{'`backticks`'}</span></div><div>- <span class="dif-vO">'``'</span></div><div>+ <span class="dif-vN">'`'</span></div></div>',
},
'redefined_depth': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">0</span></div><div>+ <span class="dif-vN">1</span></div></div>',
},
'set_extended': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><set></span></div><div> <span class="dif-vU">1</span></div><div>+ <span class="dif-vA">2</span></div></div>',
},
'sets_empty_diff': {
'result': '<div class="dif-body"></div>',
},
'sets_lcs': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><set></span></div><div>- <span class="dif-vR">1</span></div><div> <span class="dif-vU">2</span></div><div>+ <span class="dif-vA">3</span></div></div>',
},
'sets_lcs_noAR': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><set></span></div><div> <span class="dif-vU">2</span></div></div>',
},
'sets_lcs_noU': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><set></span></div><div>- <span class="dif-vR">1</span></div><div>+ <span class="dif-vA">3</span></div></div>',
},
'sets_lcs_trimR': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><set></span></div><div>- <span class="dif-vR">1</span></div><div> <span class="dif-vU">2</span></div><div>+ <span class="dif-vA">3</span></div></div>',
},
'simple_strings_in_text_mode': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">'bar'</span></div><div>+ <span class="dif-vN">'baz'</span></div></div>',
},
'str_vs_bytes': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">'a'</span></div><div>+ <span class="dif-vN">b'a'</span></div></div>',
},
'subhash_emptied': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div>- <span class="dif-kR">{'two'}</span></div><div>- <span class="dif-vR">2</span></div></div>',
},
'subhash_emptied_noR': {
'result': '<div class="dif-body"></div>',
},
'subhash_filled': {
'result': '<div class="dif-body"><div> <span class="dif-kD">{'one'}</span></div><div>+ <span class="dif-kA">{'two'}</span></div><div>+ <span class="dif-vA">2</span></div></div>',
},
'subhash_filled_noA': {
'result': '<div class="dif-body"></div>',
},
'sublist_emptied': {
'result': '<div class="dif-body"><div> <span class="dif-kD">[0]</span></div><div>- <span class="dif-kR">[0]</span></div><div>- <span class="dif-vR">0</span></div></div>',
},
'sublist_emptied_noR': {
'result': '<div class="dif-body"></div>',
},
'sublist_filled': {
'result': '<div class="dif-body"><div> <span class="dif-kD">[0]</span></div><div>+ <span class="dif-kA">[0]</span></div><div>+ <span class="dif-vA">0</span></div></div>',
},
'sublist_filled_noA': {
'result': '<div class="dif-body"></div>',
},
'text_diff_disabled': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">'A\\nB'</span></div><div>+ <span class="dif-vN">'B\\nC'</span></div></div>',
},
'text_equal': {
'result': '<div class="dif-body"></div>',
},
'text_lcs': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1,3 +1,2 @@</span></div><div> <span class="dif-vU">A</span></div><div>- <span class="dif-vR">B</span></div><div> <span class="dif-vU">C</span></div></div>',
},
'text_line_added': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1,2 +1,3 @@</span></div><div>+ <span class="dif-vA">A</span></div><div> <span class="dif-vU">B</span></div><div> <span class="dif-vU">C</span></div></div>',
},
'text_line_changed': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1,3 +1,3 @@</span></div><div> <span class="dif-vU">A</span></div><div>- <span class="dif-vR">B</span></div><div>+ <span class="dif-vA">b</span></div><div> <span class="dif-vU">C</span></div></div>',
},
'text_line_changed_ctx_0': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -2 +2 @@</span></div><div>- <span class="dif-vR">B</span></div><div>+ <span class="dif-vA">b</span></div></div>',
},
'text_line_removed': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1,3 +1,2 @@</span></div><div> <span class="dif-vU">A</span></div><div>- <span class="dif-vR">B</span></div><div> <span class="dif-vU">C</span></div></div>',
},
'text_multiple_hunks': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1 +1 @@</span></div><div>+ <span class="dif-vA">A</span></div><div> <span class="dif-kX0-0">@@ -3 +4 @@</span></div><div>- <span class="dif-vR">C</span></div></div>',
},
'text_trailing_newlines': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1,3 +1,3 @@</span></div><div> <span class="dif-vU">A</span></div><div>- <span class="dif-vR">B</span></div><div>+ <span class="dif-vA">b</span></div><div> <span class="dif-vU"></span></div></div>',
},
'text_vs_empty_string': {
'result': '<div class="dif-body"><div># <span class="dif-vE"><str></span></div><div> <span class="dif-kX0-0">@@ -1,2 +1 @@</span></div><div>- <span class="dif-vR">A</span></div><div>- <span class="dif-vR">B</span></div><div>+ <span class="dif-vA"></span></div></div>',
},
'tuple_extended': {
'result': '<div class="dif-body"><div> <span class="dif-kU">(0)</span></div><div> <span class="dif-vU">1</span></div><div>+ <span class="dif-kA">(1)</span></div><div>+ <span class="dif-vA">2</span></div></div>',
},
'tuples_lcs': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">(0)</span></div><div>+ <span class="dif-vA">0</span></div><div> <span class="dif-kU">(1)</span></div><div> <span class="dif-vU">1</span></div><div> <span class="dif-kU">(2)</span></div><div> <span class="dif-vU">2</span></div><div> <span class="dif-kO">(3)</span></div><div>- <span class="dif-vO">4</span></div><div>+ <span class="dif-vN">3</span></div><div>- <span class="dif-kR">(4)</span></div><div>- <span class="dif-vR">5</span></div></div>',
},
'tuples_lcs_noOU': {
'result': '<div class="dif-body"><div>+ <span class="dif-kA">(0)</span></div><div>+ <span class="dif-vA">0</span></div><div> <span class="dif-kN">(2)</span></div><div>+ <span class="dif-vN">3</span></div><div>- <span class="dif-kR">(3)</span></div><div>- <span class="dif-vR">5</span></div></div>',
},
'type_hints_disabled': {
'result': '<div class="dif-body"><div> <span class="dif-kX0-0">@@ -1,2 +1,2 @@</span></div><div>- <span class="dif-vR">two</span></div><div>+ <span class="dif-vA">2</span></div><div> <span class="dif-vU">lines</span></div></div>',
},
'undef_vs_0': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">None</span></div><div>+ <span class="dif-vN">0</span></div></div>',
},
'undef_vs_empty_hash': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">None</span></div><div>+ <span class="dif-vN">{}</span></div></div>',
},
'undef_vs_empty_hash_noNO': {
'result': '<div class="dif-body"></div>',
},
'undef_vs_empty_list': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">None</span></div><div>+ <span class="dif-vN">[]</span></div></div>',
},
'undef_vs_empty_string': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">None</span></div><div>+ <span class="dif-vN">''</span></div></div>',
},
'undef_vs_negative_number': {
'result': '<div class="dif-body"><div>- <span class="dif-vO">None</span></div><div>+ <span class="dif-vN">-1</span></div></div>',
},
'undef_vs_undef': {
'result': '<div class="dif-body"><div> <span class="dif-vU">None</span></div></div>',
},
'wrapping_text': {
'result': 'Header\n<div class="dif-body"><div>- <span class="dif-vO">0</span></div><div>+ <span class="dif-vN">1</span></div></div>Footer\n',
},
}
if __name__ == '__main__':
names = sys.argv[1:] if len(sys.argv) > 1 else sorted(RESULTS.keys())
headers = len(names) > 1
for name in names:
if headers:
print('========== ' + name + ' ==========')
print(RESULTS[name].get('result', None), end='')
| 92.981818
| 1,520
| 0.553065
| 6,514
| 40,912
| 3.394688
| 0.030396
| 0.258671
| 0.312576
| 0.39072
| 0.96432
| 0.953602
| 0.944286
| 0.931172
| 0.891286
| 0.858997
| 0
| 0.029418
| 0.128422
| 40,912
| 439
| 1,521
| 93.193622
| 0.590723
| 0.00088
| 0
| 0.074419
| 1
| 0.286047
| 0.879658
| 0.685321
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002326
| 0
| 0.002326
| 0.004651
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
be7fbaaf77f74cf4104f93c1f363817f35bc033d
| 96
|
py
|
Python
|
mhz19.py
|
watanabemasahiro128/IoTManager
|
4d5e665f9d5f06c073e0b4c12b3909dba9f62fed
|
[
"MIT"
] | null | null | null |
mhz19.py
|
watanabemasahiro128/IoTManager
|
4d5e665f9d5f06c073e0b4c12b3909dba9f62fed
|
[
"MIT"
] | null | null | null |
mhz19.py
|
watanabemasahiro128/IoTManager
|
4d5e665f9d5f06c073e0b4c12b3909dba9f62fed
|
[
"MIT"
] | null | null | null |
import mh_z19
def measure_co2():
return mh_z19.read(serial_console_untouched=True)["co2"]
| 16
| 60
| 0.760417
| 15
| 96
| 4.533333
| 0.8
| 0.147059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.125
| 96
| 5
| 61
| 19.2
| 0.738095
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
fe97c42030f52c8470c45589816580a15f49c4db
| 19,402
|
py
|
Python
|
metrixpp/tests/general/test_basic.py
|
Reisz/metrixplusplus
|
73380a36c4c44d83973c8411672c56a9c951e507
|
[
"MIT"
] | 52
|
2019-04-03T00:12:26.000Z
|
2022-02-24T13:23:28.000Z
|
metrixpp/tests/general/test_basic.py
|
Reisz/metrixplusplus
|
73380a36c4c44d83973c8411672c56a9c951e507
|
[
"MIT"
] | 44
|
2019-04-03T04:32:13.000Z
|
2022-03-06T06:47:37.000Z
|
metrixpp/tests/general/test_basic.py
|
dxworks/metrixplusplus
|
b2b4043446b680a1b9f68b05bf47171e99792b42
|
[
"MIT"
] | 25
|
2019-04-03T06:45:34.000Z
|
2022-02-24T10:50:04.000Z
|
#
# Metrix++, Copyright 2009-2019, Metrix++ Project
# Link: https://github.com/metrixplusplus/metrixplusplus
#
# This file is a part of Metrix++ Tool.
#
import unittest
import tests.common
class Test(tests.common.TestCase):
def test_workflow(self):
# first collection
runner = tests.common.ToolRunner('collect',
['--std.code.complexity.cyclomatic',
'--std.code.lines.total',
'--std.code.lines.code',
'--std.code.lines.preprocessor',
'--std.code.lines.comments',
'--std.suppress',
'--log-level=INFO'],
check_stderr=[(0, -1)],
save_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--format=xml'],
check_stderr=[(0, -1)])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('limit',
['--log-level=INFO',
'--max-limit=std.code.complexity:cyclomatic:0'],
check_stderr=[(0, -1)],
exit_code=8)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('info',
['--log-level=INFO'],
check_stderr=[(0, -1)],
exit_code=0)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('export',
['--log-level=INFO'],
check_stderr=[(0, -1)])
self.assertExec(runner.run())
# second collection
runner = tests.common.ToolRunner('collect',
['--std.code.complexity.cyclomatic',
'--std.code.lines.total',
'--std.code.lines.code',
'--std.code.lines.preprocessor',
'--std.code.lines.comments',
'--std.suppress',
'--log-level=INFO'],
check_stderr=[(0, -1)],
prefix='second',
cwd="sources_changed",
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--format=xml'],
check_stderr=[(0, -1)],
prefix='second',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--format=xml'],
check_stderr=[(0, -1)],
prefix='second_per_file',
dirs_list=['./simple.cpp'],
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--scope-mode=all'],
check_stderr=[(0, -1)],
prefix='second_txt_all',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--scope-mode=all'],
check_stderr=[(0, -1)],
prefix='second_per_file_txt_all',
dirs_list=['./simple.cpp'],
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--scope-mode=touched'],
check_stderr=[(0, -1)],
prefix='second_txt_touched',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--scope-mode=touched'],
check_stderr=[(0, -1)],
prefix='second_per_file_txt_touched',
dirs_list=['./simple.cpp'],
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--scope-mode=new'],
check_stderr=[(0, -1)],
prefix='second_txt_new',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--log-level=INFO', '--scope-mode=new'],
check_stderr=[(0, -1)],
prefix='second_per_file_txt_new',
dirs_list=['./simple.cpp'],
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('limit',
['--log-level=INFO',
'--max-limit=std.code.complexity:cyclomatic:0'],
check_stderr=[(0, -1)],
exit_code=6,
prefix='second',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('limit',
['--log-level=INFO',
'--max-limit=std.code.complexity:cyclomatic:0',
'--warn-mode=all'],
check_stderr=[(0, -1)],
exit_code=6,
prefix='second_warn_all',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('limit',
['--log-level=INFO',
'--max-limit=std.code.complexity:cyclomatic:0',
'--warn-mode=touched'],
check_stderr=[(0, -1)],
exit_code=4,
prefix='second_warn_touched',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('limit',
['--log-level=INFO',
'--max-limit=std.code.complexity:cyclomatic:0',
'--warn-mode=trend'],
check_stderr=[(0, -1)],
exit_code=3,
prefix='second_warn_trend',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('limit',
['--log-level=INFO',
'--max-limit=std.code.complexity:cyclomatic:0',
'--warn-mode=new'],
check_stderr=[(0, -1)],
exit_code=2,
prefix='second_warn_new',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('info',
['--log-level=INFO'],
check_stderr=[(0, -1)],
prefix='second',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('export',
['--log-level=INFO'],
check_stderr=[(0, -1)],
prefix='second',
use_prev=True)
self.assertExec(runner.run())
def test_help(self):
runner = tests.common.ToolRunner('--help')
self.assertExec(runner.run())
runner = tests.common.ToolRunner('unknown', exit_code=2)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('collect', ['--help'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('info', ['--help'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--help'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('limit', ['--help'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('export', ['--help'])
self.assertExec(runner.run())
def test_view_format(self):
# note: --scope-mode is tested in workflow test above
runner = tests.common.ToolRunner('collect', ['--std.code.complexity.cyclomatic'], save_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=txt'], prefix='txt')
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=python'], prefix='python')
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=xml'], prefix='xml')
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=prometheus', '--log-level=ERROR'], prefix='prometheus')
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--format=prometheus', '--log-level=ERROR'],
prefix='prometheus_simple.cpp',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('collect',
['--std.code.complexity.cyclomatic'],
prefix='nest',
cwd="sources_changed",
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions', '--format=xml'],
prefix='nest',
use_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions', '--format=xml'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'],
use_prev=True)
self.assertExec(runner.run())
def test_std_general_metrics(self):
runner = tests.common.ToolRunner('collect',
['--std.general.size',
'--std.general.procerrors',
'--std.general.proctime'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=txt'], prefix='txt')
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions', '--format=txt'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
def test_std_lines_metrics(self):
runner = tests.common.ToolRunner('collect',
['--std.code.lines.code',
'--std.code.lines.preprocessor',
'--std.code.lines.comments',
'--std.code.lines.total'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions', '--format=txt'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=txt'], prefix='txt')
self.assertExec(runner.run())
def test_std_filelines_metrics(self):
runner = tests.common.ToolRunner('collect',
['--std.code.filelines.code',
'--std.code.filelines.preprocessor',
'--std.code.filelines.comments',
'--std.code.filelines.total'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions', '--format=txt'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=txt'], prefix='txt')
self.assertExec(runner.run())
def test_std_longlines_metrics(self):
runner = tests.common.ToolRunner('collect',
['--std.code.longlines',
'--std.code.longlines.limit=50'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions', '--format=txt'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=txt'], prefix='txt')
self.assertExec(runner.run())
def test_std_complexity_maxindent(self):
runner = tests.common.ToolRunner('collect',
['--std.code.complexity.maxindent'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view')
self.assertExec(runner.run())
def test_std_code_magic(self):
runner = tests.common.ToolRunner('collect',
['--std.code.magic.numbers'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view')
self.assertExec(runner.run())
runner = tests.common.ToolRunner('collect',
['--std.code.magic.numbers', '--std.code.magic.numbers.simplier'],
prefix='nozeros',)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions'],
prefix='nozeros_nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', prefix='nozeros')
self.assertExec(runner.run())
def test_std_member_metrics(self):
runner = tests.common.ToolRunner('collect',
['--std.code.member.fields',
'--std.code.member.globals',
'--std.code.member.classes',
'--std.code.member.structs',
'--std.code.member.interfaces',
'--std.code.member.types',
'--std.code.member.methods',
'--std.code.member.namespaces'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view',
['--nest-regions', '--format=txt'],
prefix='nest_per_file',
dirs_list=['./simple.cpp'])
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=txt'], prefix='txt')
self.assertExec(runner.run())
def test_std_maintindex(self):
runner = tests.common.ToolRunner('collect',
['--std.code.complexity.cyclomatic',
'--std.code.lines.code',
'--std.code.maintindex.simple',
'--log-level=INFO'],
check_stderr=[(0, -1)],
save_prev=True)
self.assertExec(runner.run())
runner = tests.common.ToolRunner('view', ['--format=txt'], prefix='txt')
self.assertExec(runner.run())
if __name__ == '__main__':
unittest.main()
| 47.553922
| 116
| 0.385991
| 1,417
| 19,402
| 5.178546
| 0.085392
| 0.097438
| 0.145953
| 0.231807
| 0.879667
| 0.874898
| 0.85432
| 0.845462
| 0.835241
| 0.72036
| 0
| 0.006883
| 0.490774
| 19,402
| 407
| 117
| 47.670762
| 0.73583
| 0.012421
| 0
| 0.7625
| 0
| 0
| 0.175231
| 0.073742
| 0
| 0
| 0
| 0
| 0.196875
| 1
| 0.034375
| false
| 0
| 0.00625
| 0
| 0.04375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fe15e93349023fe40d1f577b210ca18ba9ea584c
| 48,600
|
py
|
Python
|
A-star/L_sprit.py
|
SP2LC/procon25-main
|
7f17dc882c2e33455651e672fca3c486c2f56bde
|
[
"Apache-2.0"
] | 1
|
2015-04-19T03:56:57.000Z
|
2015-04-19T03:56:57.000Z
|
A-star/L_sprit.py
|
SP2LC/procon25-main
|
7f17dc882c2e33455651e672fca3c486c2f56bde
|
[
"Apache-2.0"
] | null | null | null |
A-star/L_sprit.py
|
SP2LC/procon25-main
|
7f17dc882c2e33455651e672fca3c486c2f56bde
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def make_problem(w, h):
arr = []
for i in range(w):
column = []
for j in range(h):
column.append((i, j))
arr.append(column)
return arr
def transpose(arr2d): #転置した2次元配列を返す
result = []
for i in range(len(arr2d[0])):
arr = []
for j in range(len(arr2d)):
arr.append(arr2d[j][i])
result.append(arr)
return result
def L_exchange (board, selection_positon, exchange_positon):
si,sj = selection_positon
ei,ej = exchange_positon
temp = board[si][sj]
board[si][sj] = board[ei][ej]
board[ei][ej] = temp
return board
def check_matrix(matrix_A,matrix_B,selection_positon):
ok_count = 0
no_count = 0
for i in range(len(matrix_A)):
print ""
for j in range(len(matrix_A[0])):
if matrix_A[i][j] == matrix_B[i][j]:
print "OK ",
ok_count += 1
else:
if selection_positon == (i,j):
print "SL ",
else:
print "FF ",
no_count += 1
print ""
print " 一致マス数",ok_count
print "不一致マス数",no_count
def position_up(board,selection_positon,answer_text):
print "want to up",
i,j = selection_positon
new_board = L_exchange(board,(i,j),(i-1,j))
new_answer_text = answer_text + "U"
new_selection_position = (i-1,j)
print "selection_positon U ",selection_positon," -> ",new_selection_position
return new_board,new_selection_position,new_answer_text
def position_down(board,selection_positon,answer_text):
print "want to down",
i,j = selection_positon
new_board = L_exchange(board,(i,j),(i+1,j))
new_answer_text = answer_text + "D"
new_selection_position = (i+1,j)
print "selection_positon D ",selection_positon," -> ",new_selection_position
return new_board,new_selection_position,new_answer_text
def position_right(board,selection_positon,answer_text):
print "want to right",
i,j = selection_positon
new_board = L_exchange(board,(i,j),(i,j+1))
new_answer_text = answer_text + "R"
new_selection_position = (i,j+1)
print "selection_positon R ",selection_positon," -> ",new_selection_position
return new_board,new_selection_position,new_answer_text
def position_left(board,selection_positon,answer_text):
print "want to left",
i,j = selection_positon
new_board = L_exchange(board,(i,j),(i,j-1))
new_answer_text = answer_text + "L"
new_selection_position = (i,j-1)
print "selection_positon L ",selection_positon," -> ",new_selection_position
return new_board,new_selection_position,new_answer_text
def search(board,selection):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == selection :
return (i,j)
def purpose_position_up(board,selection_positon,answer_text):#purposeの下からスタート
board,selection_positon,answer_text = position_right(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_up(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_up(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_left(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_down(board,selection_positon,answer_text)
return board,selection_positon,answer_text
def purpose_position_right(board,selection_positon,answer_text):
board,selection_positon,answer_text = position_down(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_right(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_right(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_up(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_left(board,selection_positon,answer_text)
return board,selection_positon,answer_text
def purpose_position_left(board,selection_positon,answer_text):
board,selection_positon,answer_text = position_down(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_left(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_left(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_up(board,selection_positon,answer_text)
board,selection_positon,answer_text = position_right(board,selection_positon,answer_text)
return board,selection_positon,answer_text
def encode_perfect_answer(LRUD_text):
ans_LRUD = ""
i = 0
while (1):
text = LRUD_text[i]+LRUD_text[i+1]
if text == "LR" or text == "RL" or text == "UD" or text == "DU":
i += 1
else:
ans_LRUD = ans_LRUD + LRUD_text[i]
i+= 1
if i > len(LRUD_text)-2:
break
ans_LRUD = ans_LRUD + LRUD_text[len(LRUD_text)-1]
return ans_LRUD
def loop_encode_text(LRUD_text):
while (1):
old_text = LRUD_text
LRUD_text = encode_perfect_answer(LRUD_text)
if old_text == LRUD_text:
return LRUD_text
def transpose_operations(LRUD_text):
answer_text = ""
for i in range(len(LRUD_text)):
if LRUD_text[i] == "R":
answer_text += "D"
if LRUD_text[i] == "L":
answer_text += "U"
if LRUD_text[i] == "U":
answer_text += "L"
if LRUD_text[i] == "D":
answer_text += "R"
return answer_text
def rotation(matrix):
ans_matrix = []
for i in reversed(xrange(len(matrix))):
temp = []
for j in reversed(xrange(len(matrix[0]))):
temp.append(matrix[i][j])
ans_matrix.append(temp)
return ans_matrix
def rotation_operations(LRUD_text):
answer_text = ""
for i in range(len(LRUD_text)):
if LRUD_text[i] == "R":
answer_text += "L"
if LRUD_text[i] == "L":
answer_text += "R"
if LRUD_text[i] == "U":
answer_text += "D"
if LRUD_text[i] == "D":
answer_text += "U"
return answer_text
def move(pi,pj,i,j,problem,selection_positon,answer_text,answer):
purpose = answer[i][j]
purpose_positon = search(problem,purpose)
p_to_pp_dis = (pi - purpose_positon[0],pj - purpose_positon[1])
s_to_p_dis = (purpose_positon[0] - selection_positon[0],purpose_positon[1] - selection_positon[1])
#print "目的ピース",purpose,"目的地",(pi,pj),"目的ピースポジション",purpose_positon,"目的ピースから目的地までの距離",p_to_pp_dis
#print "s_to_p","選択ピース位置",selection_positon,"選択ピースから目的ピースまでの距離",s_to_p_dis
height = len(problem)-1
width = len(problem[0])-1
flg = False#目的ピースの位置判定で排他的になる用
exception = False
# すでに目的地に目的ピースがいる場合
if p_to_pp_dis[0] == 0 and p_to_pp_dis[1] == 0:
#print "すでに目的地にいる"
return (problem,selection_positon,answer_text)
#目的ピースの位置判定
if flg == False and purpose_positon[1] == 0 and purpose_positon[0] != height :#目的ピースが左端にあって左下角ではない
flg = True
if purpose_positon[0] == 0:#目的ピースが左上角にあったとき(このif文に入ることはない)
print "入った!すごい!プログラムミスだ!"
else :#目的ピースが左端にあったとき
if p_to_pp_dis[1] == 0:#真上に行きたい(=目的ピースの下に回りこんで上に上げる)
if s_to_p_dis[1] == 0:#目的ピースの真上(真下)に選択ピースがあったとき
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if s_to_p_dis[0] == 0:#選択ピースが目的ピースと同じ高さにあるときにあるとき
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if s_to_p_dis[0] > 0:#選択ピースが目的ピースの上側にある
for n in range(abs(s_to_p_dis[0]) + 1):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if s_to_p_dis[0] < 0:#選択ピースが目的ピースの側下にある
for n in range(abs(s_to_p_dis[0]) - 1):
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
if s_to_p_dis[1] == 0:#目的ピースの真上に選択ピースがあった時は右に一つ動かしたので左に一つ動かす
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
else:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):#目的ピースを上に動かす
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
else:#真上以外に行きたいとき
if s_to_p_dis[1] == 0:#目的ピースの真上(真下)に選択ピースがあったとき
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if s_to_p_dis[0] > 0:#選択ピースが目的ピースの上側にある
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if s_to_p_dis[0] < 0:#選択ピースが目的ピースの側下にある
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
if s_to_p_dis[1] == 0:
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
else:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])-1):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if flg == False and purpose_positon[1] == width and purpose_positon[0] != height :#目的ピースが右端にあって左角ではない
flg = True
if purpose_positon[0] == 0:#目的ピースが右上角にあったとき(このif文は入ると思う)
if s_to_p_dis[1] == 0:#目的ピースの真下に選択ピースがあったとき
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
else:
for n in range(abs(s_to_p_dis[1]) - 1 ):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
#ここまでで、目的ピースの左隣にくる
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])-1):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
else:#目的ピースが右端にあったとき
loop = abs(s_to_p_dis[1])
if s_to_p_dis[1] == 0:#選択ピースと目的ピースが同じ高さにある
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
loop += 1
else:#揃ったピースを考慮するため
for n in range(abs(s_to_p_dis[1])-1):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if s_to_p_dis[0] < 0:
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
if s_to_p_dis[0] > 0:
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])-1):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if flg == False and purpose_positon[0] == height : #目的ピースが下端にある
if p_to_pp_dis[1] == 0:#真上に行きたい(=目的ピースの下に回りこんで上に上げる)
flg = True
if s_to_p_dis[0] == 0:#目的ピースと選択ピースが同じ高さにあるとき
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
if s_to_p_dis[1] > 0:#目的ピースが選択ピースの右側にあるとき
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if s_to_p_dis[1] < 0:#目的ピースが選択ピースの左側にあるとき
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if s_to_p_dis[0] == 0:
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
else:
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0]) - 1):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
else:
if purpose_positon[1] == 0:#目的ピースが左下角
flg = True
if s_to_p_dis[1] == 0:#目的ピースの上に選択ピースがある
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if s_to_p_dis[1] == 0:
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
else:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if abs(p_to_pp_dis[1] ) - 1 != 0:#目的地が上じゃないとき
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1]) - 1):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0]) - 1):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if purpose_positon[1] == width:#目的ピースが右下角
flg = True
if s_to_p_dis[1] == 0:#目的ピースの上に選択ピースがある
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if s_to_p_dis[1] == 0:
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
else:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if abs(p_to_pp_dis[1] ) - 1 != 0:#目的地が上じゃないとき
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1]) - 1):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0]) - 1):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if flg == False:#普通に下端だったとき
flg = True
if s_to_p_dis[1] == 0:#目的ピースの真上に選択ピースがあったとき
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if p_to_pp_dis[1] > 0:
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if p_to_pp_dis[1] < 0:
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0]) - 1):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
else:
loop = abs(s_to_p_dis[0])
if selection_positon[0] == pi and pi < height-1:
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
loop -= 1
if s_to_p_dis[0] == 0:#目的ピースと選択ピースが同じ高さにあるとき
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
if s_to_p_dis[1] < 0:#目的ピースが選択ピースの右側にあるとき
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if s_to_p_dis[1] > 0:#目的ピースが選択ピースの左側にあるとき
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if s_to_p_dis[0] == 0:
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
else:
for n in range(loop):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if p_to_pp_dis[1] < 0:#目的ピースが左側に行きたいとき
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if p_to_pp_dis[1] > 0:#目的ピースが右側に行きたいとき
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0]) - 1):#目的ピースを上に上げる
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
#例外処理
if flg == False and s_to_p_dis[0] == 0 and ((s_to_p_dis[1] < 0 and p_to_pp_dis[1] > 0) or (s_to_p_dis[1] > 0 and p_to_pp_dis[1] < 0)):#選択ピースと目的ピースの行きたい方向がぶつかったとき
flg = True
if s_to_p_dis[1] < 0 and p_to_pp_dis[1] > 0:#選択ピースが右にある かつ 目的ピースが右に行きたい
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1]) - 1):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if s_to_p_dis[1] > 0 and p_to_pp_dis[1] < 0:#選択ピースが左にある かつ 目的ピースが左に行きたい
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1]) - 1):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if flg == False and s_to_p_dis[1] == 0 and s_to_p_dis[0] > 0 and p_to_pp_dis[0] < 0: #縦にぶつかったとき
flg = True
if p_to_pp_dis[1] == 0:
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(s_to_p_dis[0])+1):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if p_to_pp_dis[1] > 0:#目的ピースは右に行きたい
#print
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])-1):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if p_to_pp_dis[1] < 0:#目的ピースは左に行きたい
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[1])-1):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if flg == False:#基本的にこれ 左・右・下端でなく、特殊条件でもない
right = False
left = False
loop = 0
if selection_positon[0] == pi and s_to_p_dis[0] != 0:
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
exception = True
if abs(s_to_p_dis[0]) - 1 == 0:
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
s_to_p_dis = (purpose_positon[0] - selection_positon[0],purpose_positon[1] - selection_positon[1])
if s_to_p_dis[1] < 0:#選択ピースは目的ピースの右側にある
right = True
if p_to_pp_dis[1] > 0:#目的ピースは右に行きたい
loop = abs(s_to_p_dis[1]) + 1
if p_to_pp_dis[1] < 0:#目的ピースは左に行きたい
loop = abs(s_to_p_dis[1]) - 1
for n in range(loop):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if s_to_p_dis[1] > 0:#選択ピースは目的ピースの左側にある
left = True
if p_to_pp_dis[1] > 0:#目的ピースは右に行きたい
loop = abs(s_to_p_dis[1]) - 1
if p_to_pp_dis[1] < 0:#目的ピースは左に行きたい
loop = abs(s_to_p_dis[1]) + 1
for n in range(loop):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if s_to_p_dis[0] == 0 :#同じ高さに選択ピースと目的ピースがある
if p_to_pp_dis[1] == 0:#目的地が真上だったとき
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if right:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if left:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
else:#目的地は真上ではない
if right:#選択ピースは目的ピースの右側にある
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if left:#選択ピースは目的ピースの左側にある
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if s_to_p_dis[0] > 0:#選択ピースは目的ピースの上側にある
if exception == True:
for n in range(abs(s_to_p_dis[0])-1):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
else:
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if p_to_pp_dis[1] > 0:
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if p_to_pp_dis[1] < 0:
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if p_to_pp_dis[1] == 0:#真上に行きたい
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if left:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if right:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
if s_to_p_dis[0] < 0:#選択ピースは目的ピースの下側にある
if s_to_p_dis[1] == 0:#同じ幅に選択ピースと目的ピースがある
if p_to_pp_dis[1] < 0:
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if p_to_pp_dis[1] > 0:
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
for n in range(abs(s_to_p_dis[0])):
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
if p_to_pp_dis[1] < 0:#目的ピースは左に行きたい
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
if p_to_pp_dis[1] > 0:#目的ピースは右に行きたい
for n in range(abs(p_to_pp_dis[1])):
problem,selection_positon,answer_text = purpose_position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if p_to_pp_dis[1] == 0:#真上に行きたい
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if left:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if right:
for n in range(abs(s_to_p_dis[1])):
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
#目的ピースは上に行きたい
for n in range(abs(p_to_pp_dis[0])):
problem,selection_positon,answer_text = purpose_position_up(problem,selection_positon,answer_text)
return (problem,selection_positon,answer_text)
def small_problem(i_max,j_max,problem,selection_positon,answer_text,answer):
for i in range(i_max):
for j in range(j_max):
if answer[i][j] != problem[i][j]:
problem,selection_positon,answer_text = move(i,j,i,j,problem,selection_positon,answer_text,answer)
#print_matrix(problem)
#print ""
#ここから車庫入れ処理
#print "後ろ2つ ---------------------------------------------------------------"
print selection_positon,i
if selection_positon[0] == i:
#print "選択ピースの位置が悪い"
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
#print "answer"
#print_matrix(answer)
if answer[i] != problem[i]:
problem,selection_positon,answer_text = move(i,len(problem[0])-2,i,len(problem[0])-1,problem,selection_positon,answer_text,answer)
#print "problem"
#print_matrix(problem)
#例外処理
#print "後入れ-----------------------------------------------------------------"
if problem[i][len(problem[0])-1] == answer[i][len(problem[0])-2] :#or problem[i+1][len(problem)-1] == answer[i][len(problem)-2]:
if selection_positon[1] != len(problem[0])-2:
if selection_positon[1] == len(problem[0])-1:
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
else:
for n in range(len(problem[0])-2 - selection_positon[1]):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if selection_positon[0] != i :
for n in range(i - selection_positon[0]):
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
#print "めんどくさいパターン"
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
else:
if selection_positon[0] == i:
#print "選択ピースの位置が悪い"
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
#print "test"
problem,selection_positon,answer_text = move(i+1,len(problem[0])-2,i,len(problem[0])-2,problem,selection_positon,answer_text,answer)
#print "test"
if (selection_positon[0] == i+1 and selection_positon[1] == len(problem[0]) -3) or (selection_positon[0] == i+1 and selection_positon[1] == len(problem[0]) -1) or (selection_positon[0] == i+2 and selection_positon[1] == len(problem[0]) -2) :
if selection_positon[0] == i+1 and selection_positon[1] == len(problem[0]) -3:
#print "パターン1"
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if selection_positon[0] == i+1 and selection_positon[1] == len(problem[0]) -1:
#print "パターン2"
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if selection_positon[0] == i+2 and selection_positon[1] == len(problem[0]) -2:
#print "パターン3"
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
else:
#print "OKKKKKKKKKKK"
if i + 1 == selection_positon[0]:
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
if selection_positon[1] < len(problem[0])-2:
for n in range(abs(selection_positon[1] - (len(problem[0])-2))):
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
if selection_positon[1] > len(problem[0])-2:
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
#print "パターン3"
problem,selection_positon,answer_text = position_right(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_up(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_left(problem,selection_positon,answer_text)
problem,selection_positon,answer_text = position_down(problem,selection_positon,answer_text)
#print i,"行目終わり!!*******************************************************"
#check_matrix(answer,problem,selection_positon)
#print "*****************************************************************"
return problem,selection_positon,answer_text
def L_sprit(target_columns,target_rows,solve_problem,solve_answer,corner_text):
if len(corner_text) != 2:
print "L字にする四つ角を選択して、引数としてください(UL,DL,UR,DR)"
return 0
h = target_rows
w = target_columns
answer_text = ""
if corner_text[0] == "U":#上側をソート
problem = transpose(solve_problem)
answer = transpose(solve_answer)
if corner_text[1] == "R":
selection = answer[len(answer)-1][0]
selection_positon = search(problem,selection)
first_selection_position = selection_positon
if corner_text[1] == "L":
selection = answer[len(answer)-1][len(answer[0])-1]
selection_positon = search(problem,selection)
first_selection_position = selection_positon
ip_max = len(problem)-h
jp_max = len(problem[0])-2
problem,selection_positon,LRUD_text1 = small_problem(ip_max,jp_max,problem,selection_positon,answer_text,answer)
matrixB = []
matrixB_answer = []
for i in range(len(problem)):
if i >= len(problem)-h:
matrixB.append(problem[i])
matrixB_answer.append(answer[i])
if corner_text[0] == "D":#下側をソート
problem = rotation(transpose(solve_problem))
answer = rotation(transpose(solve_answer))
if corner_text[1] == "R":
selection = answer[len(answer)-1][len(answer[0])-1]
selection_positon = search(problem,selection)
selection = transpose(solve_answer)[0][0]
first_selection_position = search(transpose(solve_problem),selection)
if corner_text[1] == "L":
selection = answer[len(answer)-1][0]
selection_positon = search(problem,selection)
selection = transpose(solve_answer)[0][len(transpose(solve_answer)[0])-1]
first_selection_position = search(transpose(solve_problem),selection)
ip_max = len(problem)-h
jp_max = len(problem[0])-2
problem,selection_positon,LRUD_text1 = small_problem(ip_max,jp_max,problem,selection_positon,answer_text,answer)
problem = rotation(problem)
answer = rotation(answer)
LRUD_text1 = rotation_operations(LRUD_text1)
matrixB = []
matrixB_answer = []
for i in range(len(problem)):
if i < h:
matrixB.append(problem[i])
matrixB_answer.append(answer[i])
#print "半分終わった---------------------------------------------------------------------------------------------------------------------------"
answer_text = ""
if corner_text[1] == "R":#右側をソート
matrixB = transpose(rotation(matrixB))
matrixB_answer = transpose(rotation(matrixB_answer))
selection_positon = search(matrixB,selection)
ib_max = len(matrixB)-w
jb_max = len(matrixB[0])-2
matrixB,selection_positon,LRUD_text2 = small_problem(ib_max,jb_max,matrixB,selection_positon,answer_text,matrixB_answer)
LRUD_text2 = rotation_operations(transpose_operations(LRUD_text2))
matrixB = rotation(transpose(matrixB))
count = 0
if corner_text[0] == "U":
for i in range(len(problem)):
if i >= len(problem)-h:
problem[i] = matrixB[count]
count += 1
if corner_text[0] == "D":
for i in range(len(problem)):
if i < h:
problem[i] = matrixB[count]
count += 1
if corner_text[1] == "L":#左側をソート
matrixB = transpose(matrixB)
matrixB_answer = transpose(matrixB_answer)
selection_positon = search(matrixB,selection)
#print selection_positon
ib_max = len(matrixB)-w
jb_max = len(matrixB[0])-2
matrixB,selection_positon,LRUD_text2 = small_problem(ib_max,jb_max,matrixB,selection_positon,answer_text,matrixB_answer)
LRUD_text2 = transpose_operations(LRUD_text2)
matrixB = transpose(matrixB)
count = 0
if corner_text[0] == "U":
for i in range(len(problem)):
if i >= len(problem)-h:
problem[i] = matrixB[count]
count += 1
if corner_text[0] == "D":
for i in range(len(problem)):
if i < h:
problem[i] = matrixB[count]
count += 1
LRUD_text = LRUD_text1 + LRUD_text2
LRUD_text = loop_encode_text(LRUD_text)
answer_text = "%X%X"%(first_selection_position[1],first_selection_position[0]) +"\r\n"+ str(len(LRUD_text)) +"\r\n"+ LRUD_text
#check_matrix(transpose(solve_answer),problem,selection_positon)
problem = transpose(problem)
return problem,answer_text
def corner_L_sprit(target_columns,target_rows,solve_problem,solve_answer):
A_problem,A_answer_text = L_sprit(target_columns,target_rows,solve_problem,solve_answer,"UL")
B_problem,B_answer_text = L_sprit(target_columns,target_rows,solve_problem,solve_answer,"UR")
if len(A_answer_text) > len(B_answer_text):
A_problem = B_problem
A_answer_text = B_answer_text
B_problem,B_answer_text = L_sprit(target_columns,target_rows,solve_problem,solve_answer,"DL")
if len(A_answer_text) > len(B_answer_text):
A_problem = B_problem
A_answer_text = B_answer_text
B_problem,B_answer_text = L_sprit(target_columns,target_rows,solve_problem,solve_answer,"DR")
if len(A_answer_text) > len(B_answer_text):
A_problem = B_problem
A_answer_text = B_answer_text
return A_problem,A_answer_text
| 57.651246
| 257
| 0.64714
| 5,988
| 48,600
| 4.914997
| 0.035738
| 0.283239
| 0.343108
| 0.405491
| 0.891237
| 0.872719
| 0.869016
| 0.842581
| 0.82549
| 0.811627
| 0
| 0.012165
| 0.257449
| 48,600
| 842
| 258
| 57.719715
| 0.803314
| 0.04784
| 0
| 0.740688
| 0
| 0
| 0.006022
| 0.00078
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.025788
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
a3a9ccd07dbf7aca31931f4262252e12d91e5733
| 55,181
|
py
|
Python
|
tccli/services/tat/tat_client.py
|
tencentcloudapi-test/tencentcloud-cli
|
da9733765df2b405b83b7acff48256f31e053ab1
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/tat/tat_client.py
|
tencentcloudapi-test/tencentcloud-cli
|
da9733765df2b405b83b7acff48256f31e053ab1
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/tat/tat_client.py
|
tencentcloudapi-test/tencentcloud-cli
|
da9733765df2b405b83b7acff48256f31e053ab1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import six
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError, ClientError, ParamError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.tat.v20201028 import tat_client as tat_client_v20201028
from tencentcloud.tat.v20201028 import models as models_v20201028
from jmespath import search
import time
def doEnableInvoker(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.EnableInvokerRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.EnableInvoker(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteInvoker(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteInvokerRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteInvoker(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCommands(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCommandsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeCommands(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInvocations(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInvocationsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeInvocations(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCancelInvocation(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CancelInvocationRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CancelInvocation(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInvocationTasks(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInvocationTasksRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeInvocationTasks(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateInvoker(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateInvokerRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateInvoker(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInvokerRecords(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInvokerRecordsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeInvokerRecords(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeRegions(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeRegionsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeRegions(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDisableInvoker(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DisableInvokerRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DisableInvoker(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyInvoker(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyInvokerRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyInvoker(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateCommand(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateCommandRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateCommand(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteCommand(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteCommandRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteCommand(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyCommand(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyCommandRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyCommand(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAutomationAgentStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAutomationAgentStatusRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeAutomationAgentStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doPreviewReplacedCommandContent(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.PreviewReplacedCommandContentRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.PreviewReplacedCommandContent(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRunCommand(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RunCommandRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.RunCommand(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeInvokers(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeInvokersRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeInvokers(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInvokeCommand(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TatClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InvokeCommandRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.InvokeCommand(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20201028": tat_client_v20201028,
}
MODELS_MAP = {
"v20201028": models_v20201028,
}
ACTION_MAP = {
"EnableInvoker": doEnableInvoker,
"DeleteInvoker": doDeleteInvoker,
"DescribeCommands": doDescribeCommands,
"DescribeInvocations": doDescribeInvocations,
"CancelInvocation": doCancelInvocation,
"DescribeInvocationTasks": doDescribeInvocationTasks,
"CreateInvoker": doCreateInvoker,
"DescribeInvokerRecords": doDescribeInvokerRecords,
"DescribeRegions": doDescribeRegions,
"DisableInvoker": doDisableInvoker,
"ModifyInvoker": doModifyInvoker,
"CreateCommand": doCreateCommand,
"DeleteCommand": doDeleteCommand,
"ModifyCommand": doModifyCommand,
"DescribeAutomationAgentStatus": doDescribeAutomationAgentStatus,
"PreviewReplacedCommandContent": doPreviewReplacedCommandContent,
"RunCommand": doRunCommand,
"DescribeInvokers": doDescribeInvokers,
"InvokeCommand": doInvokeCommand,
}
AVAILABLE_VERSION_LIST = [
"v20201028",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
if os.environ.get(OptionsDefine.ENV_ROLE_ARN) and os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME):
cred[OptionsDefine.RoleArn] = os.environ.get(OptionsDefine.ENV_ROLE_ARN)
cred[OptionsDefine.RoleSessionName] = os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
elif not g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param.replace('_', '-') in [OptionsDefine.RoleArn, OptionsDefine.RoleSessionName]:
if param.replace('_', '-') in cred:
g_param[param] = cred[param.replace('_', '-')]
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["tat"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["tat"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
if g_param[OptionsDefine.Waiter]:
param = eval(g_param[OptionsDefine.Waiter])
if 'expr' not in param:
raise Exception('`expr` in `--waiter` must be defined')
if 'to' not in param:
raise Exception('`to` in `--waiter` must be defined')
if 'timeout' not in param:
if 'waiter' in conf and 'timeout' in conf['waiter']:
param['timeout'] = conf['waiter']['timeout']
else:
param['timeout'] = 180
if 'interval' not in param:
if 'waiter' in conf and 'interval' in conf['waiter']:
param['interval'] = conf['waiter']['interval']
else:
param['timeout'] = 5
param['interval'] = min(param['interval'], param['timeout'])
g_param['OptionsDefine.WaiterInfo'] = param
# 如果在配置文件中读取字段的值,python2中的json.load函数会读取unicode类型的值,因此这里要转化类型
if six.PY2:
for key, value in g_param.items():
if isinstance(value, six.text_type):
g_param[key] = value.encode('utf-8')
return g_param
| 51.426841
| 155
| 0.676628
| 5,984
| 55,181
| 6.017881
| 0.038269
| 0.095971
| 0.286496
| 0.123212
| 0.884952
| 0.877704
| 0.874205
| 0.868818
| 0.86232
| 0.856766
| 0
| 0.005189
| 0.189703
| 55,181
| 1,072
| 156
| 51.474813
| 0.800192
| 0.004911
| 0
| 0.760163
| 0
| 0
| 0.136426
| 0.068778
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021341
| false
| 0
| 0.01626
| 0.001016
| 0.039634
| 0.019309
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a3b2068b58f6d41bb78c7bce620725ebc03c5e1b
| 817
|
py
|
Python
|
swig-2.0.4/Examples/test-suite/python/varargs_overload_runme.py
|
vidkidz/crossbridge
|
ba0bf94aee0ce6cf7eb5be882382e52bc57ba396
|
[
"MIT"
] | 1
|
2016-04-09T02:58:13.000Z
|
2016-04-09T02:58:13.000Z
|
swig-2.0.4/Examples/test-suite/python/varargs_overload_runme.py
|
vidkidz/crossbridge
|
ba0bf94aee0ce6cf7eb5be882382e52bc57ba396
|
[
"MIT"
] | null | null | null |
swig-2.0.4/Examples/test-suite/python/varargs_overload_runme.py
|
vidkidz/crossbridge
|
ba0bf94aee0ce6cf7eb5be882382e52bc57ba396
|
[
"MIT"
] | null | null | null |
import varargs_overload
if varargs_overload.vararg_over1("Hello") != "Hello":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over1(2) != "2":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over2("Hello") != "Hello":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over2(2, 2.2) != "2 2.2":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over3("Hello") != "Hello":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over3(2, 2.2, "hey") != "2 2.2 hey":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over4("Hello") != "Hello":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over4(123) != "123":
raise RuntimeError, "Failed"
if varargs_overload.vararg_over4("Hello", 123) != "Hello":
raise RuntimeError, "Failed"
| 25.53125
| 63
| 0.707466
| 104
| 817
| 5.375
| 0.153846
| 0.268336
| 0.273703
| 0.370304
| 0.874776
| 0.808587
| 0.808587
| 0.808587
| 0.763864
| 0
| 0
| 0.045911
| 0.146879
| 817
| 31
| 64
| 26.354839
| 0.756098
| 0
| 0
| 0.473684
| 0
| 0
| 0.153186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.052632
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4330bc37b58c8ebfd30d03c7429dd5e4cc38ce8f
| 37,149
|
py
|
Python
|
st/clitests/negative_spec.py
|
soniyamoholkar/cortx-s3server
|
90a07d20af4d6d30298b8f6308ff59fc3346ec38
|
[
"Apache-2.0"
] | null | null | null |
st/clitests/negative_spec.py
|
soniyamoholkar/cortx-s3server
|
90a07d20af4d6d30298b8f6308ff59fc3346ec38
|
[
"Apache-2.0"
] | null | null | null |
st/clitests/negative_spec.py
|
soniyamoholkar/cortx-s3server
|
90a07d20af4d6d30298b8f6308ff59fc3346ec38
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
#!/usr/bin/python3.6
from framework import Config
from framework import S3PyCliTest
from s3cmd import S3cmdTest
from s3fi import S3fiTest
from jclient import JClientTest
from s3client_config import S3ClientConfig
from s3kvstool import S3kvTest
import s3kvs
import yaml
# Helps debugging
# Config.log_enabled = True
# Config.dummy_run = True
# Config.client_execution_timeout = 300 * 1000
# Config.request_timeout = 300 * 1000
# Config.socket_timeout = 300 * 1000
# Enable retry flag to limit retries on failure
Config.s3cmd_max_retries = 2
# Set time_readable_format to False if you want to display the time in milli seconds.
# Config.time_readable_format = False
# TODO
# DNS-compliant bucket names should not contains underscore or other special characters.
# The allowed characters are [a-zA-Z0-9.-]*
#
# Add validations to S3 server and write system tests for the same.
# ***MAIN ENTRY POINT
# Run before all to setup the test environment.
print("Configuring LDAP")
S3PyCliTest('Before_all').before_all()
# Set pathstyle =false to run jclient for partial multipart upload
S3ClientConfig.pathstyle = False
S3ClientConfig.access_key_id = 'AKIAJPINPFRBTPAYOGNA'
S3ClientConfig.secret_key = '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'
S3fiTest('Disable bucket metadata cache').\
enable_fi("enable", "", "disable_bucket_metadata_cache").\
execute_test().command_is_successful()
config_types = ["pathstyle.s3cfg", "virtualhoststyle.s3cfg"]
for i, type in enumerate(config_types):
Config.config_file = type
# Create bucket list index failure when creating bucket
S3fiTest('s3cmd enable FI create index fail').enable_fi("enable", "always", "motr_idx_create_fail").execute_test().command_is_successful()
S3cmdTest('s3cmd cannot create bucket').create_bucket("seagatebucket").execute_test(negative_case=True).command_should_fail().command_error_should_have("InternalError")
S3fiTest('s3cmd disable Fault injection').disable_fi("motr_idx_create_fail").execute_test().command_is_successful()
# Create object list index failure when creating bucket
S3fiTest('s3cmd enable FI create index fail').enable_fi_offnonm("enable", "motr_idx_create_fail", "1", "1").execute_test().command_is_successful()
S3cmdTest('s3cmd cannot create bucket').create_bucket("seagatebucket").execute_test(negative_case=True).command_should_fail().command_error_should_have("InternalError")
S3fiTest('s3cmd disable Fault injection').disable_fi("motr_idx_create_fail").execute_test().command_is_successful()
# Create multipart list index failure when creating bucket
S3fiTest('s3cmd enable FI create index fail').enable_fi_offnonm("enable", "motr_idx_create_fail", "1", "99").execute_test().command_is_successful()
S3cmdTest('s3cmd cannot create bucket').create_bucket("seagatebucket").execute_test(negative_case=True).command_should_fail().command_error_should_have("InternalError")
S3fiTest('s3cmd disable Fault injection').disable_fi("motr_idx_create_fail").execute_test().command_is_successful()
# Create extended metadata index failure when creating bucket
S3fiTest('s3cmd enable FI create extended metadata index fail').enable_fi("enable", "always", "motr_idx_create_fail").execute_test().command_is_successful()
S3cmdTest('s3cmd cannot create bucket').create_bucket("seagatebucket").execute_test(negative_case=True).command_should_fail().command_error_should_have("InternalError")
S3fiTest('s3cmd disable Fault injection').disable_fi("motr_idx_create_fail").execute_test().command_is_successful()
# ************ Create bucket ************
S3cmdTest('s3cmd can create bucket').create_bucket("seagatebucket").\
execute_test().command_is_successful()
# ************ List buckets ************
S3cmdTest('s3cmd can list buckets').list_buckets().execute_test().\
command_is_successful().command_response_should_have('s3://seagatebucket')
# ************ BUCKET METADATA CORRUPTION TEST ***********
# Bucket listing shouldn't list corrupted bucket
S3fiTest('s3cmd enable FI bucket_metadata_corrupted').\
enable_fi("enable", "always", "bucket_metadata_corrupted").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not list corrupted bucket metadata').list_buckets().\
execute_test().command_is_successful().command_response_should_have('')
S3fiTest('s3cmd can disable FI bucket_metadata_corrupted').\
disable_fi("bucket_metadata_corrupted").\
execute_test().command_is_successful()
# ************ BUCKET METADATA CORRUPTION TEST ***********
# Bucket listing shouldn't list corrupted bucket
S3cmdTest('s3cmd can create bucket').create_bucket("seagatebucket123").\
execute_test().command_is_successful()
S3fiTest('s3cmd enable FI bucket_metadata_corrupted').\
enable_fi_enablen("enable", "bucket_metadata_corrupted", "2").\
execute_test().command_is_successful()
S3cmdTest('s3cmd does not list corrupted bucket').list_buckets().\
execute_test().command_is_successful().\
command_response_should_not_have('s3://seagatebucket123').\
command_response_should_have('s3://seagatebucket')
S3fiTest('s3cmd can disable FI bucket_metadata_corrupted').\
disable_fi("bucket_metadata_corrupted").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete bucket').delete_bucket("seagatebucket123").\
execute_test().command_is_successful()
# If bucket metadata is corrupted then object listing within bucket shall
# return an error
S3fiTest('s3cmd enable FI bucket_metadata_corrupted').\
enable_fi("enable", "always", "bucket_metadata_corrupted").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not list objects within bucket').list_objects('seagatebucket').\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("InternalError")
S3fiTest('s3cmd can disable FI bucket_metadata_corrupted').\
disable_fi("bucket_metadata_corrupted").\
execute_test().command_is_successful()
# ************ OBJECT METADATA CORRUPTION TEST ***********
# Object listing shouldn't list corrupted objects
S3cmdTest('s3cmd can upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test().command_is_successful()
S3cmdTest('s3cmd can upload 9K file').\
upload_test("seagatebucket", "9Kfile", 9000).\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI object_metadata_corrupted').\
enable_fi_enablen("enable", "object_metadata_corrupted", "2").\
execute_test().command_is_successful()
S3cmdTest('s3cmd does not list corrupted objects').list_objects('seagatebucket').\
execute_test().command_is_successful().\
command_response_should_not_have('9Kfile').\
command_response_should_have('3Kfile')
S3fiTest('s3cmd can disable FI object_metadata_corrupted').\
disable_fi("object_metadata_corrupted").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3K file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 9K file').\
delete_test("seagatebucket", "9Kfile").\
execute_test().command_is_successful()
# `Get Object` for corrupted object shall return an error
S3cmdTest('s3cmd can upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI object_metadata_corrupted').\
enable_fi("enable", "always", "object_metadata_corrupted").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not download corrupted object').\
download_test("seagatebucket", "3Kfile").\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI object_metadata_corrupted').\
disable_fi("object_metadata_corrupted").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3K file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_enity_open').\
enable_fi("enable", "always", "motr_entity_open_fail").\
execute_test().command_is_successful()
# test for delete bucket
S3cmdTest('s3cmd cannot delete bucket').delete_bucket("seagatebucket").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd can disable FI motr_enity_open_fail').\
disable_fi("motr_entity_open_fail").\
execute_test().command_is_successful()
#motr_enity_open failure and chunk upload
S3fiTest('s3cmd can enable FI motr_enity_open').\
enable_fi("enable", "always", "motr_entity_open_fail").\
execute_test().command_is_successful()
JClientTest('Jclient can upload 3k file in chunked mode').\
put_object("seagatebucket", "3Kfile", 3000, chunked=True).\
execute_test().command_is_successful()
S3fiTest('s3cmd can disable FI motr_enity_open_fail').\
disable_fi("motr_entity_open_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3k file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
# motr_open_entity fails read failure
S3cmdTest('s3cmd can upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_enity_open').\
enable_fi("enable", "always", "motr_entity_open_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd cannot download 3k file').\
download_test("seagatebucket", "3kfile").\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_enity_open_fail').\
disable_fi("motr_entity_open_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3k file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
# motr_idx_op failure
S3cmdTest('s3cmd can upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi("enable", "always", "motr_idx_op_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd cannot download 3K file').\
download_test("seagatebucket", "3Kfile").\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3k file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
save_max_retry = Config.s3cmd_max_retries
Config.s3cmd_max_retries = 1
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi("enable", "always", "motr_idx_op_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not create bucket').create_bucket("seagatebucket123").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3cmdTest('s3cmd cannot upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3cmdTest('s3cmd can not set acl on bucket').\
setacl_bucket("seagatebucket","read:C12345").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3cmdTest('s3cmd can not list buckets').list_buckets().\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
JClientTest('Jclient can not upload 3k file in chunked mode').\
put_object("seagatebucket", "3Kfile", 3000, chunked=True).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
JClientTest('Jclient can verify object does not exist').\
head_object("seagatebucket", "3kfile").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("Service Unavailable")
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# Don't trigger FI first time, then trigger FI next 99 times, then
# repeat the cycle
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "1", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd cannot upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
JClientTest('Jclient can not upload 3k file in chunked mode').\
put_object("seagatebucket", "3Kfile", 3000, chunked=True).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# Don't trigger FI first two times, then trigger FI next 99 times, then
# repeat the cycle
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# Don't trigger FI first three times, then trigger FI next 99 times, then
# repeat the cycle
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "3", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "1", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
fi_off="5"
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", fi_off, "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
result = JClientTest('Jclient can list all multipart uploads.').\
list_multipart("seagatebucket").execute_test()
result.command_response_should_have('18MBfile')
upload_id = result.status.stdout.split("id - ")[1]
JClientTest('Jclient can abort multipart upload').\
abort_multipart("seagatebucket", "18MBfile", upload_id).\
execute_test().command_is_successful()
fi_off="2"
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", fi_off, "99").\
execute_test().command_is_successful()
# S3PutMultiObjectAction::fetch_multipart_metadata
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
fi_off="3"
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", fi_off, "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "21", "99").\
execute_test().command_is_successful()
#Post complete operation -- fetch_multipart_info
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
result = JClientTest('Jclient can list all multipart uploads.').\
list_multipart("seagatebucket").execute_test()
result.command_response_should_have('18MBfile')
upload_id = result.status.stdout.split("id - ")[1]
JClientTest('Jclient can abort multipart upload').\
abort_multipart("seagatebucket", "18MBfile", upload_id).\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "36", "99").\
execute_test().command_is_successful()
#Post complete operation -- fetch_multipart_info
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd can disable FI motr_idx_op_fail').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
result = JClientTest('Jclient can list all multipart uploads.').\
list_multipart("seagatebucket").execute_test()
result.command_response_should_have('18MBfile')
upload_id = result.status.stdout.split("id - ")[1]
JClientTest('Jclient can abort multipart upload').\
abort_multipart("seagatebucket", "18MBfile", upload_id).\
execute_test().command_is_successful()
Config.s3cmd_max_retries = save_max_retry
# motr_enity_create fails for object upload
S3fiTest('s3cmd can enable FI motr_enity_create').\
enable_fi("enable", "always", "motr_entity_create_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_enity_create').\
disable_fi("motr_entity_create_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3k file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
#motr_enity_create failure and chunk upload
S3fiTest('s3cmd can enable FI motr_enity_create').\
enable_fi("enable", "always", "motr_entity_create_fail").\
execute_test().command_is_successful()
JClientTest('Jclient can not upload 3k file in chunked mode').\
put_object("seagatebucket", "3Kfile", 3000, chunked=True).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_enity_create').\
disable_fi("motr_entity_create_fail").\
execute_test().command_is_successful()
JClientTest('Jclient can upload 3k file in chunked mode').\
put_object("seagatebucket", "3Kfile", 3000, chunked=True).\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3k file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
# motr_enity_create failure with multipart object
S3fiTest('s3cmd can enable FI motr_enity_create').\
enable_fi("enable", "always", "motr_entity_create_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_enity_create').\
disable_fi("motr_entity_create_fail").\
execute_test().command_is_successful()
# motr_enity_delete fails delete failure
S3cmdTest('s3cmd can upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_enity_delete').\
enable_fi("enable", "always", "motr_entity_delete_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3k file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
S3fiTest('s3cmd can disable FI motr_enity_delete').\
disable_fi("motr_entity_delete_fail").\
execute_test().command_is_successful()
#motr_enity_delete failure and chunk upload
JClientTest('Jclient can upload 3k file in chunked mode').\
put_object("seagatebucket", "3Kfile", 3000, chunked=True).\
execute_test().command_is_successful()
S3fiTest('s3cmd can enable FI motr_entity_delete').\
enable_fi("enable", "always", "motr_entity_delete_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 3k file').\
delete_test("seagatebucket", "3Kfile").\
execute_test().command_is_successful()
S3fiTest('s3cmd can disable FI motr_entity_delete').\
disable_fi("motr_entity_delete_fail").\
execute_test().command_is_successful()
# motr_enity_delete failure with multipart object
S3cmdTest('s3cmd can upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test().command_is_successful()
S3fiTest('s3cmd can eneble FI motr_enity_delete').\
enable_fi("enable", "always", "motr_entity_delete_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete 18MB file').\
delete_test("seagatebucket", "18MBfile").\
execute_test().command_is_successful()
S3fiTest('s3cmd can disable FI motr_enity_delete').\
disable_fi("motr_entity_delete_fail").\
execute_test().command_is_successful()
# motr_enity_create failure for Bucket metadata
S3fiTest('s3cmd can enable FI motr_enity_create').\
enable_fi("enable", "always", "motr_entity_create_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not create bucket').create_bucket("seagatebucket123").\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd can disable FI motr_enity_create').\
disable_fi("motr_entity_create_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd does not list corrupted bucket').list_buckets().\
execute_test().command_is_successful().\
command_response_should_not_have('s3://seagatebucket123').\
command_response_should_have('s3://seagatebucket')
# negative tests cases for put_keyval
# set and delete policy negative testing
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "3", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd cannot set acl on bucket').\
setacl_bucket("seagatebucket","read:C12345").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd cannot set policy on bucket').\
setpolicy_bucket("seagatebucket","policy.txt").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can set policy on bucket').\
setpolicy_bucket("seagatebucket","policy.txt").\
execute_test().command_is_successful()
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd cannot delete policy on bucket').\
delpolicy_bucket("seagatebucket").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
fi_off="29"
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", fi_off, "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
result = S3cmdTest('s3cmd can list multipart uploads in progress').\
list_multipart_uploads("seagatebucket").execute_test()
result.command_response_should_have('18MBfile')
upload_id = result.status.stdout.split('\n')[2].split('\t')[2]
S3cmdTest('S3cmd can abort multipart upload').\
abort_multipart("seagatebucket", "18MBfile", upload_id).\
execute_test().command_is_successful()
# S3cmdTest('s3cmd can delete policy on bucket').\
# delpolicy_bucket("seagatebucket").\
# execute_test().command_is_successful()
# object metadata save negative testing
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "3", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 3K file').\
upload_test("seagatebucket", "3Kfile", 3000).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# bucket metadata save negative testing
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not create bucket').create_bucket("seagatebucket123").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# multipart object metadata negative test
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not upload 18MBfile file').\
upload_test("seagatebucket", "18MBfile", 18000000).\
execute_test(negative_case=True).command_should_fail()
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# Multipart listing shall return an error for corrupted object
JClientTest('Jclient can upload partial parts').\
partial_multipart_upload("seagatebucket", "18MBfile", 18000000, 1, 2).\
execute_test().command_is_successful()
result = JClientTest('Jclient can list all multipart uploads.').\
list_multipart("seagatebucket").execute_test()
result.command_response_should_have('18MBfile')
upload_id = result.status.stdout.split("id - ")[1]
print(upload_id)
S3fiTest('s3cmd can enable FI object_metadata_corrupted').\
enable_fi("enable", "always", "object_metadata_corrupted").\
execute_test().command_is_successful()
JClientTest('Jclient can not list multipart uploads of corrupted object').\
list_parts("seagatebucket", "18MBfile", upload_id).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("InternalError")
S3fiTest('s3cmd can disable FI object_metadata_corrupted').\
disable_fi("object_metadata_corrupted").\
execute_test().command_is_successful()
JClientTest('Jclient can abort multipart upload').\
abort_multipart("seagatebucket", "18MBfile", upload_id).\
execute_test().command_is_successful()
# negative tests cases for next_keyval
# bucket deletion negative test
S3cmdTest('s3cmd can create bucket').create_bucket("seagatebucket123").\
execute_test().command_is_successful()
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
# fetch_first_object_metadata_failed motr idx op fail
S3cmdTest('s3cmd can not delete bucket').delete_bucket("seagatebucket123").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "3", "99").\
execute_test().command_is_successful()
# fetch_first_multipart_object_metadata_failed motr idx op fail
S3cmdTest('s3cmd can not delete bucket').delete_bucket("seagatebucket123").\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can delete bucket').delete_bucket("seagatebucket123").\
execute_test().command_is_successful()
# object list negative test
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not list objects').list_objects('seagatebucket').\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# list bucket negative test
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi("enable", "always", "motr_idx_op_fail").\
execute_test().command_is_successful()
S3cmdTest('s3cmd can not list buckets').list_buckets().\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
# multipart object metadata negative test
# Multipart listing shall return an error on motr_idx_op
JClientTest('Jclient can upload partial parts').\
partial_multipart_upload("seagatebucket", "18MBfile", 18000000, 1, 2).\
execute_test().command_is_successful()
result = JClientTest('Jclient can list all multipart uploads.').\
list_multipart("seagatebucket").execute_test()
result.command_response_should_have('18MBfile')
upload_id = result.status.stdout.split("id - ")[1]
S3fiTest('s3cmd enable FI motr idx op fail').\
enable_fi_offnonm("enable", "motr_idx_op_fail", "2", "99").\
execute_test().command_is_successful()
JClientTest('Jclient can not list multipart uploads of corrupted object').\
list_parts("seagatebucket", "18MBfile", upload_id).\
execute_test(negative_case=True).command_should_fail().\
command_error_should_have("ServiceUnavailable")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
JClientTest('Jclient can abort multipart upload').\
abort_multipart("seagatebucket", "18MBfile", upload_id).\
execute_test().command_is_successful()
# ************ PART METADATA CORRUPTION TEST ***********
# Multipart listing shouldn't list corrupted parts
JClientTest('Jclient can upload partial parts').\
partial_multipart_upload("seagatebucket", "18MBfile", 18000000, 1, 2).\
execute_test().command_is_successful()
result = JClientTest('Jclient can list all multipart uploads.').\
list_multipart("seagatebucket").execute_test()
result.command_response_should_have('18MBfile')
upload_id = result.status.stdout.split("id - ")[1]
print(upload_id)
S3fiTest('s3cmd can enable FI part_metadata_corrupted').\
enable_fi_enablen("enable", "part_metadata_corrupted", "2").\
execute_test().command_is_successful()
result = JClientTest('Jclient does not list corrupted part').\
list_parts("seagatebucket", "18MBfile", upload_id).\
execute_test()
result.command_response_should_have("part number - 1").\
command_response_should_not_have("part number - 2")
S3fiTest('s3cmd can disable FI part_metadata_corrupted').\
disable_fi("part_metadata_corrupted").\
execute_test().command_is_successful()
JClientTest('Jclient can abort multipart upload').\
abort_multipart("seagatebucket", "18MBfile", upload_id).\
execute_test().command_is_successful()
# ************ Delete bucket ************
S3cmdTest('s3cmd can delete bucket').delete_bucket("seagatebucket").\
execute_test().command_is_successful()
S3fiTest('Enable bucket metadata cache').\
disable_fi("disable_bucket_metadata_cache").\
execute_test().command_is_successful()
| 50.201351
| 172
| 0.708444
| 4,464
| 37,149
| 5.571685
| 0.06698
| 0.082703
| 0.0977
| 0.108556
| 0.88272
| 0.875523
| 0.866919
| 0.857832
| 0.850716
| 0.836242
| 0
| 0.02889
| 0.169803
| 37,149
| 739
| 173
| 50.269283
| 0.777569
| 0.103717
| 0
| 0.859348
| 0
| 0
| 0.331858
| 0.039212
| 0
| 0
| 0
| 0.001353
| 0
| 1
| 0
| false
| 0
| 0.015437
| 0
| 0.015437
| 0.005146
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a49a72848dfa5f254ce70ca46e6cd45be3819fe
| 47,845
|
py
|
Python
|
data/AnomalDataLoader.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | 4
|
2021-04-21T05:09:49.000Z
|
2022-01-17T13:02:45.000Z
|
data/AnomalDataLoader.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | null | null | null |
data/AnomalDataLoader.py
|
qgking/DASC_COVID19
|
3300516b1d0e9896e2fb2ffda8527e0e1a1fcf2c
|
[
"MIT"
] | 1
|
2021-07-08T02:20:43.000Z
|
2021-07-08T02:20:43.000Z
|
# -*- coding: utf-8 -*-
# @Time : 20/5/1 16:58
# @Author : qgking
# @Email : qgking@tju.edu.cn
# @Software: PyCharm
# @Desc : LungSegDataLoader.py
from skimage.transform import resize
from torch.utils.data import Dataset
from common.base_utls import *
from common.data_utils import *
import torch
from torchvision import transforms
from torchvision.utils import make_grid
from data.data_augmentation import *
# --------------5fold start-------------
class CovidInf5foldDatasetBase(Dataset):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.pos = pos
self.generate_each = generate_each
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.mean = mean
self.std = std
print('mean %.8f std %.8f' % (self.mean, self.std))
for idx in range(len(img_list)):
# if idx > 1:
# break
file_name = basename(img_list[idx])[:-4]
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
txt_path = join(root_dir, file_name + '_inf.txt')
if not exists(txt_path):
txt_path = join(root_dir, file_name[1:] + '_inf.txt')
values = np.loadtxt(txt_path, delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(txt_path, 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
return None
# resize 5 fold
class CovidInf5fold2dAugSegDataset(CovidInf5foldDatasetBase):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
super(CovidInf5fold2dAugSegDataset, self).__init__(root_dir, img_list, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std)
# resize 5 fold
class CovidInf5fold2dResizeSegDataset(CovidInf5foldDatasetBase):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
super(CovidInf5fold2dResizeSegDataset, self).__init__(root_dir, img_list, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# crop 5 fold
class CovidInf5fold2dSegDataset(CovidInf5foldDatasetBase):
def __init__(self, root_dir, img_list, input_size, generate_each, mean, std, pos):
super(CovidInf5fold2dSegDataset, self).__init__(root_dir, img_list, input_size, generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# --------------5fold end-------------
# --------------UnsuData start-------------
class CovidInfUnsuDatasetBase(Dataset):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.pos = pos
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.mean = mean
self.std = std
print('mean %.8f std %.8f' % (self.mean, self.std))
if 'MosMedData' in root_dir:
img_list = sorted(glob(join(root_dir, 'm*.npy')), reverse=True)
idx = []
np.random.seed(666)
indx = np.random.choice(range(len(img_list)), size=int(len(img_list) * 0.2), replace=False)
idx.extend(indx)
if split == 'train':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii not in idx]
elif split == 'valid':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii in idx]
elif split == None:
img_list
elif 'COVID-19-CT' in root_dir:
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
idx = []
np.random.seed(666)
indx = np.random.choice(range(10), size=2, replace=False)
idx.extend(indx)
np.random.seed(666)
indx = np.random.choice(range(10, 20), size=2, replace=False)
idx.extend(indx)
if split == 'train':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii not in idx]
elif split == 'valid':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii in idx]
elif split == None:
img_list
for idx in range(len(img_list)):
# if idx > 1:
# break
file_name = basename(img_list[idx])[:-4]
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
txt_path = join(root_dir, file_name + '_inf.txt')
if not exists(txt_path):
txt_path = join(root_dir, file_name[1:] + '_inf.txt')
values = np.loadtxt(txt_path, delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(txt_path, 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
# while True:
return None
# resize unsupervised
class CovidInfUnsu2dResizeSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dResizeSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
# save_dir = '/home/qgking/COVID3DSeg/log/3DCOVIDCT/deeplab2d/inf_da_0_run_dapt_from_50_to_20_reszie_eeetest/tmp'
#
# minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_tmp", channel=1, nrow=8)
#
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
#
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img", channel=1, nrow=8)
#
# cropp_pppp = torch.from_numpy((img * self.std + self.mean)[minx: maxx, miny: maxy, minz: maxz])
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_process_back_crop", channel=1, nrow=8)
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
# nbb = agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# cropp_pppp = np.expand_dims(np.transpose(nbb['image_patch'], (2, 0, 1)), axis=0)
# visual_batch(torch.from_numpy(cropp_pppp), save_dir, "test_img_process_back_crop_cc", channel=1, nrow=8)
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# resize unsupervised slice
class CovidInfUnsu2dAugSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dAugSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
# save_dir = '/home/qgking/COVID3DSeg/log/3DCOVIDCT/deeplab2d/inf_da_0_run_dapt_from_50_to_20_reszie_eeetest/tmp'
#
# minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_tmp", channel=1, nrow=8)
#
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
#
# cropp_pppp = torch.from_numpy(tmp)
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img", channel=1, nrow=8)
#
# cropp_pppp = torch.from_numpy((img * self.std + self.mean)[minx: maxx, miny: maxy, minz: maxz])
# cropp_pppp = cropp_pppp.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img_process_back_crop", channel=1, nrow=8)
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
scale = np.random.uniform(0.8, 1.2)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
cropp_infection = infection[minx: maxx, miny: maxy, c - flo: c + cols - flo].copy()
# nbb = agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# cropp_pppp = np.expand_dims(np.transpose(nbb['image_patch'], (2, 0, 1)), axis=0)
# visual_batch(torch.from_numpy(cropp_pppp), save_dir, "test_img_process_back_crop_cc", channel=1, nrow=8)
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std)
# resize unsupervised slice
class CovidInfValidUnsu2dAugSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfValidUnsu2dAugSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __len__(self):
return int(len(self.img_list))
def __getitem__(self, index):
# while True:
count = index
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
cropp_img = img[minx: maxx, miny: maxy, minz: maxz].copy()
cropp_infection = infection[minx: maxx, miny: maxy, minz: maxz].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, (maxz - minz))
# crop unsupervised
class CovidInfUnsu2dSegDataset(CovidInfUnsuDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dSegDataset, self).__init__(root_dir, split, input_size, generate_each, mean, std, pos)
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0].copy()
if 'MosMedData' in self.root_dir:
lung = scans[2]
infection = scans[1]
elif 'COVID-19-CT' in self.root_dir:
lung = scans[1]
infection = scans[2]
# print(cen)
# cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
# c - cols // 2: c + cols // 2].copy()
# cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
# c - cols // 2:c + cols // 2].copy()
#
# minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# cropped_im = img[minx: maxx, miny: maxy, minz: maxz]
# cropped_if = infection[minx: maxx, miny: maxy, minz: maxz]
# sed = np.random.randint(1, numid)
# cen = lines[sed - 1]
# cen = np.fromstring(cen, dtype=int, sep=' ')
# c = cen[2] - minz
# cols = int(self.input_z)
# maxz = cropped_if.shape[2]
# minz = 0
# # c = np.random.randint(minz, maxz - cols - 1)
# flo = int(np.floor(cols / 2))
# cel = int(np.ceil(cols // 2))
# c = int(min(max(minz + flo, c), maxz - cel - 1))
# cropp_img = cropped_im[:, :, c - flo: c + cols - cel].copy()
# cropp_infection = cropped_if[:, :, c - flo: c + cols - cel].copy()
# if not (c >= minz and c < maxz):
# print('shape:', img.shape)
# print('min max:', (minx, maxx, miny, maxy, minz, maxz))
# print('cropped shape:', cropp_img.shape)
# print(self.img_list[count])
# print('min c %d, max c %d' % (c - flo, c + cols - cel))
# print(cen)
# exit(0)
# save_dir = '/home/qgking/COVID3DSeg/log/3DCOVIDCT/deeplabdilate2d/inf_seg_0_run_unsu_mos_covid_0/tmp'
# cropp_pppp = torch.from_numpy(cropp_img)
# cropp_pppp=cropp_pppp.unsqueeze(0).unsqueeze(0)
# cropp_iiii = torch.from_numpy(cropp_infection)
# cropp_iiii = cropp_iiii.unsqueeze(0).unsqueeze(0)
# visual_batch(cropp_pppp, save_dir, "test_img", channel=1, nrow=8)
# visual_batch(cropp_iiii, save_dir, "test_gt", channel=1, nrow=8)
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
# tmp = img[minx: maxx, miny: maxy, minz: maxz].copy()
# tmp = (tmp - self.mean) / self.std
# img[minx: maxx, miny: maxy, minz: maxz] = tmp
pos = np.random.random()
if pos > self.pos:
# only inf region selected
# print('only inf region selected')
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
else:
# inf region and none inf region selected
# print('inf region and none inf region selected')
minx, maxx, miny, maxy, minz, maxz = min_max_voi(lung, superior=3, inferior=3)
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
x = np.random.randint(minx, maxx)
y = np.random.randint(miny, maxy)
z = np.random.randint(minz, maxz)
cen = [x, y, z]
a = int(min(max(minx + deps / 2, cen[0]), maxx - deps / 2 - 1))
b = int(min(max(miny + rows / 2, cen[1]), maxy - rows / 2 - 1))
c = int(min(max(minz + cols / 2, cen[2]), maxz - cols / 2 - 1))
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
flo = int(np.floor(cols / 2))
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - flo: c + cols - flo].copy()
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
# --------------UnsuData end-------------
# --------------2D slice start-------------
class CovidInfUnsu2dDatasetBase(Dataset):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.pos = pos
self.img_list = []
self.lung_list = []
self.inf_list = []
self.total_slices = 0
self.mean = mean
self.std = std
print('mean %.8f std %.8f' % (self.mean, self.std))
if 'MosMedData' in root_dir:
img_list = sorted(glob(join(root_dir, 'm*.npy')), reverse=True)
elif 'COVID-19-CT' in root_dir:
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
elif 'Italy' in root_dir:
# TODO need to be modified
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
for idx in range(len(img_list)):
print(img_list[idx])
scans = np.load(img_list[idx])
img = scans[0].copy()
if 'MosMedData' in img_list[idx]:
lung = scans[2].copy()
infection = scans[1].copy()
# use lung and inf
# sums = np.sum(lung, axis=(0, 1))
# if np.sum(lung) == 0:
# continue
sums = np.sum(infection, axis=(0, 1))
inf_sli = np.where(sums > 1)[0]
elif 'COVID-19-CT' in img_list[idx]:
lung = scans[1].copy()
infection = scans[2].copy()
# use lung and inf
# sums = np.sum(lung, axis=(0, 1))
sums = np.sum(infection, axis=(0, 1))
inf_sli = np.where(sums > 1)[0]
elif 'Italy' in img_list[idx]:
lung = scans[1].copy()
infection = scans[2].copy()
# GGO and Consolidation
infection[np.where(infection == 3)] = 0
infection[np.where(infection > 0)] = 1
# use inf
sums = np.sum(infection, axis=(0, 1))
inf_sli = np.where(sums > 1)[0]
s_img = img[:, :, inf_sli]
s_lung = lung[:, :, inf_sli]
s_infection = infection[:, :, inf_sli]
for ii in range(s_img.shape[-1]):
# if 'Italy' in img_list[idx]:
# semi_inf = os.listdir('../../log/3DCOVIDCT/Semi-Inf-Net/')
# if str(ii) + '.png' not in semi_inf:
# continue
self.img_list.append(s_img[:, :, ii])
self.lung_list.append(s_lung[:, :, ii])
self.inf_list.append(s_infection[:, :, ii])
del scans
# if 'MosMedData' in root_dir:
# idx = []
# np.random.seed(666)
# indx = np.random.choice(range(len(self.img_list)), size=int(len(self.img_list) * 0.2), replace=False)
# idx.extend(indx)
# if split == 'train':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii not in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii not in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii not in idx]
# elif split == 'valid':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii in idx]
# elif 'COVID-19-CT' in root_dir:
# # img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
# idx = []
# np.random.seed(666)
# indx = np.random.choice(range(10), size=2, replace=False)
# idx.extend(indx)
# np.random.seed(666)
# indx = np.random.choice(range(10, 20), size=2, replace=False)
# idx.extend(indx)
# if split == 'train':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii not in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii not in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii not in idx]
# elif split == 'valid':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii in idx]
# elif 'Italy' in root_dir:
# idx = []
# np.random.seed(666)
# indx = np.random.choice(range(len(self.img_list)), size=int(len(self.img_list) * 0.2), replace=False)
# idx.extend(indx)
# # TODO need to be modified
# if split == 'train':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii not in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii not in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii not in idx]
# elif split == 'valid':
# self.img_list = [self.img_list[ii] for ii in range(len(self.img_list)) if ii in idx]
# self.lung_list = [self.lung_list[ii] for ii in range(len(self.lung_list)) if ii in idx]
# self.inf_list = [self.inf_list[ii] for ii in range(len(self.inf_list)) if ii in idx]
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
# while True:
return None
# resize unsupervised slice
class CovidInfValidUnsu2dDatasetBase(CovidInfUnsu2dDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfValidUnsu2dDatasetBase, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __len__(self):
return int(len(self.img_list))
def __getitem__(self, index):
im = self.img_list[index]
lung = self.lung_list[index]
inf = self.inf_list[index]
minx, maxx, miny, maxy = min_max_voi_2d(lung, superior=5, inferior=5)
cropp_img = im[minx: maxx, miny: maxy].copy()
cropp_infection = inf[minx: maxx, miny: maxy].copy()
cropp_img = np.tile(np.expand_dims(cropp_img, axis=-1), self.input_z)
cropp_infection = np.tile(np.expand_dims(cropp_infection, axis=-1), self.input_z)
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std,
num=4)
# simple 2D slices
class CovidInfUnsu2dSliceSegDataset(CovidInfUnsu2dDatasetBase):
def __init__(self, root_dir, split, input_size, generate_each, mean, std, pos):
super(CovidInfUnsu2dSliceSegDataset, self).__init__(root_dir, split, input_size,
generate_each, mean, std, pos)
def __getitem__(self, index):
im = self.img_list[index]
lung = self.lung_list[index]
# print(np.unique(lung))
# print(index)
inf = self.inf_list[index]
minx, maxx, miny, maxy = min_max_voi_2d(lung, superior=5, inferior=5)
cropp_img = im[minx: maxx, miny: maxy].copy()
cropp_infection = inf[minx: maxx, miny: maxy].copy()
cropp_img = np.tile(np.expand_dims(cropp_img, axis=-1), self.input_z)
cropp_infection = np.tile(np.expand_dims(cropp_infection, axis=-1), self.input_z)
return agumentation_img_inf_2d(cropp_img, cropp_infection, self.input_x, self.input_y, self.mean, self.std,
num=4)
# --------------2D slice end-------------
class CovidInf20SegDataset(Dataset):
def __init__(self, root_dir, split='train', input_size=(256, 256, 64), generate_each=6):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
idx = []
np.random.seed(666)
indx = np.random.choice(range(10), size=2, replace=False)
idx.extend(indx)
np.random.seed(666)
indx = np.random.choice(range(10, 20), size=2, replace=False)
idx.extend(indx)
if split == 'train':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii not in idx]
elif split == 'valid':
img_list = [img_list[ii] for ii in range(len(img_list)) if ii in idx]
for idx in range(len(img_list)):
file_name = basename(img_list[idx])[:-4]
# if idx > 3:
# break
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
values = np.loadtxt(join(root_dir, file_name + '_inf.txt'), delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(join(root_dir, file_name + '_inf.txt'), 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0]
infection = scans[2]
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
# randomly scale
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
# print(c)
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
# print(c)
# print(minindex)
# print(maxindex)
# print(cen)
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2: c + cols // 2].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2:c + cols // 2].copy()
# print(img.shape)
# print(cropp_infection.shape)
# print('a %d,b %d,c %d' % (a, b, c))
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
class CovidInfDegDataset(Dataset):
def __init__(self, img_list, split='train', input_size=(256, 256, 64)):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.img_list = []
self.split = split
for img_path in img_list:
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
label = int(img_path[st_index + 1:end_index])
if label >= 3:
self.img_list.extend([img_path, img_path, img_path, img_path, img_path])
else:
self.img_list.append(img_path)
# print('Total dataset %d: ' % (len(self.img_list)))
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
# while True:
img_path = self.img_list[index]
# print(img_path)
scans = np.load(img_path)
img = scans[0]
coarse_seg = scans[1]
minx, maxx, miny, maxy, minz, maxz = min_max_voi(coarse_seg, superior=3, inferior=3)
patch = img[minx: maxx, miny: maxy, minz: maxz]
bagging_imgs = agumentation_img_3d(patch, self.input_x, self.input_y, self.input_z)
bagging_imgs = torch.from_numpy(np.expand_dims(bagging_imgs, 0))
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
image_label = int(img_path[st_index + 1:end_index])
# if image_label >= 3:
# l = 1
# else:
# l = 0
if image_label >= 3:
l = 2
else:
l = image_label - 1
return {
"image_patch": bagging_imgs,
'image_label': l,
}
class CovidInfDegDatasetMIL(Dataset):
def __init__(self, img_list, input_size=(256, 256, 64), generate_bag=6, seg_bagging_aug=None):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.generate_bag = generate_bag
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
self.img_list = img_list
self.seg_bagging_aug = seg_bagging_aug
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
# while True:
img_path = self.img_list[index]
# print(img_path)
scans = np.load(img_path)
img = scans[0]
coarse_seg = scans[1]
bagging_imgs = self.seg_bagging_aug(img.copy(), coarse_seg, self.generate_bag, self.input_x, self.input_y,
self.input_z)
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
image_label = int(img_path[st_index + 1:end_index])
# print(img.shape)
# print(cropp_infection.shape)
# print('a %d,b %d,c %d' % (a, b, c))
return {
"image_patch": bagging_imgs,
'image_label': image_label,
}
class CovidInf50CoarseSegDataset(Dataset):
def __init__(self, root_dir, input_size=(256, 256, 64), generate_each=6):
self.input_x = input_size[0]
self.input_y = input_size[1]
self.input_z = input_size[2]
self.root_dir = root_dir
self.generate_each = generate_each
self.img_list = []
self.minindex_list = []
self.maxindex_list = []
self.infidx = []
self.inflines = []
img_list = sorted(glob(join(root_dir, '*.npy')), reverse=True)
for idx in range(len(img_list)):
file_name = basename(img_list[idx])[:-4]
# if idx > 3:
# break
print(img_list[idx])
self.img_list.append(img_list[idx])
scans = np.load(img_list[idx])
values = np.loadtxt(join(root_dir, file_name + '_inf.txt'), delimiter=' ')
minindex = np.min(values, axis=0)
maxindex = np.max(values, axis=0)
minindex = np.array(minindex, dtype='int')
maxindex = np.array(maxindex, dtype='int')
minindex[0] = max(minindex[0] - 3, 0)
minindex[1] = max(minindex[1] - 3, 0)
minindex[2] = max(minindex[2] - 3, 0)
maxindex[0] = min(scans[0].shape[0], maxindex[0] + 3)
maxindex[1] = min(scans[0].shape[1], maxindex[1] + 3)
maxindex[2] = min(scans[0].shape[2], maxindex[2] + 3)
self.minindex_list.append(minindex)
self.maxindex_list.append(maxindex)
f2 = open(join(root_dir, file_name + '_inf.txt'), 'r')
liverline = f2.readlines()
self.inflines.append(liverline)
self.infidx.append(len(liverline))
f2.close()
del scans
def __len__(self):
return int(self.generate_each * len(self.img_list))
def __getitem__(self, index):
# while True:
count = index // self.generate_each
scans = np.load(self.img_list[count])
img = scans[0]
infection = scans[1]
minindex = self.minindex_list[count]
maxindex = self.maxindex_list[count]
lines = self.inflines[count]
numid = self.infidx[count]
# randomly scale
scale = np.random.uniform(0.8, 1.2)
deps = int(self.input_x * scale)
rows = int(self.input_y * scale)
cols = int(self.input_z)
sed = np.random.randint(1, numid)
cen = lines[sed - 1]
cen = np.fromstring(cen, dtype=int, sep=' ')
a = int(min(max(minindex[0] + deps / 2, cen[0]), maxindex[0] - deps / 2 - 1))
b = int(min(max(minindex[1] + rows / 2, cen[1]), maxindex[1] - rows / 2 - 1))
c = int(min(max(minindex[2] + cols / 2, cen[2]), maxindex[2] - cols / 2 - 1))
# print(c)
c = c if c - cols // 2 >= 0 else cols // 2
c = c if c + cols // 2 < img.shape[-1] else img.shape[-1] - cols // 2 - 1
# print(c)
# print(minindex)
# print(maxindex)
# print(cen)
cropp_img = img[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2: c + cols // 2].copy()
cropp_infection = infection[a - deps // 2:a + deps // 2, b - rows // 2:b + rows // 2,
c - cols // 2:c + cols // 2].copy()
# print(img.shape)
# print(cropp_infection.shape)
# print('a %d,b %d,c %d' % (a, b, c))
return agumentation_img_inf_3d(cropp_img, cropp_infection, self.input_x, self.input_y, self.input_z)
def CovidInfDegData(root_dir, npy_prefix='mstudy*'):
img_list = sorted(glob(join(root_dir, npy_prefix + '.npy')), reverse=False)
labels = []
imgs = []
for img_path in img_list:
st_index = img_path.rfind('_')
end_index = img_path.rfind('.')
label = int(img_path[st_index + 1:end_index])
if label == 0:
continue
# if label >= 3:
# l = 1
# else:
# l = 0
if label >= 3:
l = 2
else:
l = label - 1
labels.append(l)
imgs.append(img_path)
print('total imgs %d' % (len(imgs)))
return imgs, labels
| 44.673203
| 121
| 0.549943
| 6,524
| 47,845
| 3.869099
| 0.043072
| 0.036606
| 0.02789
| 0.031693
| 0.898899
| 0.888202
| 0.878932
| 0.865462
| 0.858728
| 0.852389
| 0
| 0.027461
| 0.312718
| 47,845
| 1,070
| 122
| 44.714953
| 0.74017
| 0.20581
| 0
| 0.874656
| 0
| 0
| 0.014148
| 0
| 0
| 0
| 0
| 0.000935
| 0
| 1
| 0.057851
| false
| 0
| 0.011019
| 0.016529
| 0.126722
| 0.012397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ab23d3390a17a43367f3ead872f1f044511765f
| 106
|
py
|
Python
|
FBEM/__init__.py
|
icemtel/stokes
|
022de2417919a18ed5b0262111e430384053137d
|
[
"MIT"
] | null | null | null |
FBEM/__init__.py
|
icemtel/stokes
|
022de2417919a18ed5b0262111e430384053137d
|
[
"MIT"
] | null | null | null |
FBEM/__init__.py
|
icemtel/stokes
|
022de2417919a18ed5b0262111e430384053137d
|
[
"MIT"
] | null | null | null |
from FBEM.run import *
from FBEM.postproc import *
from FBEM.write_input import *
import FBEM.logs as logs
| 26.5
| 30
| 0.792453
| 18
| 106
| 4.611111
| 0.5
| 0.289157
| 0.337349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141509
| 106
| 4
| 31
| 26.5
| 0.912088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
435ab3b15fc5436835809d4169c57f4ab23b143a
| 30,526
|
py
|
Python
|
tests/test_args_and_configs.py
|
MUSC-TBIC/etude-engine
|
943608ae3458bfcecc5e1c0b24fb3aa5c8bc0cad
|
[
"Apache-2.0"
] | 9
|
2018-11-03T20:49:41.000Z
|
2021-10-30T23:11:28.000Z
|
tests/test_args_and_configs.py
|
MUSC-TBIC/etude-engine
|
943608ae3458bfcecc5e1c0b24fb3aa5c8bc0cad
|
[
"Apache-2.0"
] | 1
|
2019-06-04T17:17:41.000Z
|
2019-06-04T17:17:41.000Z
|
tests/test_args_and_configs.py
|
MUSC-TBIC/etude-engine
|
943608ae3458bfcecc5e1c0b24fb3aa5c8bc0cad
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import json
import args_and_configs
#############################################
## Test passing command line arguments
#############################################
def test_default_ignore_whitespace_flag():
command_line_args = [ '--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.ignore_whitespace == True
def test_ignore_whitespace_flag_usage():
command_line_args = [ '--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ,
'--heed-whitespace' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.ignore_whitespace == False
def test_heed_whitespace_flag_usage():
command_line_args = [ '--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ,
'--ignore-whitespace' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.ignore_whitespace == True
def test_skip_missing_test_files_usage():
command_line_args = [ '--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ,
'--skip-missing-files' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.skip_missing_files == True
## Performance should be identical to default
command_line_args = [ '--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.skip_missing_files == True
def test_score_missing_test_files_usage():
command_line_args = [ '--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ,
'--score-missing-files' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.skip_missing_files == False
def test_required_input_flags_ref_only():
command_line_args = [ '--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_required_input_flags_test_only():
command_line_args = [ '--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_print_counts_neither_ref_nor_test():
command_line_args = [ '--print-counts' , '--no-metrics' , '--no-confusion-matrix' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_print_counts_ref_only():
command_line_args = [ '--print-counts' , '--no-metrics' , '--no-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.print_counts
assert not args.print_metrics
assert not args.print_confusion_matrix
assert args.reference_input is not None
assert args.test_input is None
def test_print_counts_test_only():
command_line_args = [ '--print-counts' , '--no-metrics' , '--no-confusion-matrix' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.print_counts
assert not args.print_metrics
assert not args.print_confusion_matrix
assert args.reference_input is None
assert args.test_input is not None
def test_print_counts_ref_and_test():
command_line_args = [ '--print-counts' , '--no-metrics' , '--no-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.print_counts
assert not args.print_metrics
assert not args.print_confusion_matrix
assert args.reference_input is not None
assert args.test_input is not None
def test_print_counts_and_metrics_for_ref_and_test():
command_line_args = [ '--print-counts' , '--print-metrics' , '--no-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.print_counts
assert args.print_metrics
assert not args.print_confusion_matrix
assert args.reference_input is not None
assert args.test_input is not None
def test_print_counts_and_metrics_for_ref():
command_line_args = [ '--print-counts' , '--print-metrics' , '--no-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_print_counts_and_metrics_for_test():
command_line_args = [ '--print-counts' , '--print-metrics' , '--no-confusion-matrix' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_print_counts_and_confusion_for_ref_test():
command_line_args = [ '--print-counts' , '--no-metrics' , '--print-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.print_counts
assert not args.print_metrics
assert args.print_confusion_matrix
assert args.reference_input is not None
assert args.test_input is not None
def test_print_counts_and_confusion_for_ref():
command_line_args = [ '--print-counts' , '--no-metrics' , '--print-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_print_counts_and_confusion_for_test():
command_line_args = [ '--print-counts' , '--no-metrics' , '--print-confusion-matrix' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_print_counts_and_metrics_and_confusion_for_ref_and_test():
command_line_args = [ '--print-counts' , '--print-metrics' , '--print-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
args = args_and_configs.get_arguments( command_line_args )
assert args.print_counts
assert args.print_metrics
assert args.print_confusion_matrix
assert args.reference_input is not None
assert args.test_input is not None
def test_print_counts_and_metrics_and_confusion_for_ref():
command_line_args = [ '--print-counts' , '--print-metrics' , '--print-confusion-matrix' ,
'--reference-input' , 'tests/data/i2b2_2016_track-1_reference' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
def test_print_counts_and_metrics_and_confusion_for_test():
command_line_args = [ '--print-counts' , '--print-metrics' , '--print-confusion-matrix' ,
'--test-input' , 'tests/data/i2b2_2016_track-1_test' ]
with pytest.raises( SystemExit ) as e_info:
args = args_and_configs.get_arguments( command_line_args )
#############################################
## Test loading and reading of config files
#############################################
## Namespaces
def test_i2b2_2016_track_1_has_empty_namespace():
config_file = 'config/i2b2_2016_track-1.conf'
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = config_file ,
score_key = 'Short Name' ,
score_values = [ '.*' ] )
## Empty dictionary resolves as False
assert not bool( namespaces )
def test_sentences_has_defined_namespaces():
config_file = 'config/uima_sentences.conf'
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = config_file ,
score_key = 'Short Name' ,
score_values = [ '.*' ] )
## Non-empty dictionary resolves as True
expected_namespaces = \
{ 'cas' : 'http:///uima/cas.ecore' ,
'type': 'http:///com/clinacuity/deid/nlp/uima/type.ecore',
'type4': 'http:///de/tudarmstadt/ukp/dkpro/core/api/segmentation/type.ecore'
}
assert namespaces == expected_namespaces
def test_webanno_custom_namespaces():
config_file = 'config/webanno_uima_xmi.conf'
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = config_file ,
score_key = 'Short Name' ,
score_values = [ '.*' ] )
## Non-empty dictionary resolves as True
expected_namespaces = { 'custom': 'http:///webanno/custom.ecore' }
with open( '/tmp/stdout.log' , 'w' ) as fp:
fp.write( '-----------\n{}\n-------------\n'.format( namespaces ) )
assert namespaces == expected_namespaces
## Patterns
def test_set_score_key_Sentences():
filename = 'config/uima_sentences.conf'
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = [ '.*' ] )
for pattern in patterns:
assert pattern[ 'type' ] == "Sentence"
def test_set_score_key_DateTime_Tutorial():
filename = 'config/CAS_XMI.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "DateTime"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Parent' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Time"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Long Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Date and Time Information"
def test_skip_missing_XPath():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'long_name' ] != "Other Person Name"
def test_set_score_key_match_Time_Tutorial():
filename = 'config/CAS_XMI.conf'
score_values = [ 'Time' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "DateTime"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Parent' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Time"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Long Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Date and Time Information"
def test_set_score_key_match_strict_start_and_end_char_Tutorial():
filename = 'config/CAS_XMI.conf'
score_values = [ '^[DT].*[en]$' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "DateTime"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Parent' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Time"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Long Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Date and Time Information"
def test_set_score_key_match_over_multiple_values_Tutorial():
filename = 'config/CAS_XMI.conf'
score_values = [ '^D.*e$' , '^D.*n$' , '^T.*e$' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "DateTime"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Parent' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Time"
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Long Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'type' ] == "Date and Time Information"
def test_skip_missing_XPath():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'long_name' ] != "Other Person Name"
def test_union_patterns_exact_match():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '(Patient|Provider)' ]
namespaces , document_data , ref_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
score_values = [ '(Patient|Provider)' ]
namespaces , document_data , test_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
ref_patterns , test_patterns = \
args_and_configs.align_patterns( ref_patterns , test_patterns ,
collapse_all_patterns = False )
for ref_pattern in ref_patterns:
match_flag = False
for test_pattern in test_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
assert test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert ref_pattern[ 'type' ] == False
for test_pattern in test_patterns:
match_flag = False
for ref_pattern in ref_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
assert test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert test_pattern[ 'type' ] == False
def test_union_patterns_more_in_ref():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '(Patient|Provider)' ]
namespaces , document_data , ref_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
score_values = [ '(Patient)' ]
namespaces , document_data , test_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
ref_patterns , test_patterns = \
args_and_configs.align_patterns( ref_patterns , test_patterns ,
collapse_all_patterns = False )
for ref_pattern in ref_patterns:
match_flag = False
for test_pattern in test_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert ref_pattern[ 'type' ] == False
for test_pattern in test_patterns:
match_flag = False
for ref_pattern in ref_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert test_pattern[ 'type' ] == False
def test_union_patterns_more_in_test():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '(Patient)' ]
namespaces , document_data , ref_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
score_values = [ '(Patient|Provider)' ]
namespaces , document_data , test_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
ref_patterns , test_patterns = \
args_and_configs.align_patterns( ref_patterns , test_patterns ,
collapse_all_patterns = False )
for ref_pattern in ref_patterns:
match_flag = False
for test_pattern in test_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert ref_pattern[ 'type' ] == False
for test_pattern in test_patterns:
match_flag = False
for ref_pattern in ref_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert test_pattern[ 'type' ] == False
def test_union_patterns_venn_diagram():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '(Patient|Provider|StreetCity)' ]
namespaces , document_data , ref_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
score_values = [ '(Patient|Provider|StateCountry)' ]
namespaces , document_data , test_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
ref_patterns , test_patterns = \
args_and_configs.align_patterns( ref_patterns , test_patterns ,
collapse_all_patterns = False )
for ref_pattern in ref_patterns:
match_flag = False
for test_pattern in test_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert ref_pattern[ 'type' ] == False
for test_pattern in test_patterns:
match_flag = False
for ref_pattern in ref_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert test_pattern[ 'type' ] == False
def test_union_patterns_empty_ref():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ 'I.Do.Not.Exist' ]
namespaces , document_data , ref_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
score_values = [ '(Patient|Provider)' ]
namespaces , document_data , test_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
ref_patterns , test_patterns = \
args_and_configs.align_patterns( ref_patterns , test_patterns ,
collapse_all_patterns = False )
for ref_pattern in ref_patterns:
match_flag = False
for test_pattern in test_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert ref_pattern[ 'type' ] == False
for test_pattern in test_patterns:
match_flag = False
for ref_pattern in ref_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert test_pattern[ 'type' ] == False
def test_union_patterns_empty_test():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '(Patient|Provider)' ]
namespaces , document_data , ref_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
score_values = [ 'I.Do.No.Exist' ]
namespaces , document_data , test_patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
ref_patterns , test_patterns = \
args_and_configs.align_patterns( ref_patterns , test_patterns ,
collapse_all_patterns = False )
for ref_pattern in ref_patterns:
match_flag = False
for test_pattern in test_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert ref_pattern[ 'type' ] == False
for test_pattern in test_patterns:
match_flag = False
for ref_pattern in ref_patterns:
if( test_pattern[ 'type' ] == ref_pattern[ 'type' ] ):
match_flag = True
test_pattern[ 'type' ] == ref_pattern[ 'type' ]
break
if( match_flag == False ):
assert test_pattern[ 'type' ] == False
## Document Data
def test_default_document_format():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
assert document_data[ 'format' ] == 'Unknown'
def test_plaintext_document_format():
filename = 'config/plaintext_sentences.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
assert document_data[ 'format' ] == 'txt'
def test_brat_standoff_format():
filename = 'config/brat_problems_allergies_standoff.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
for pattern in patterns:
assert pattern[ 'short_name' ] == 'Problem' or pattern[ 'short_name' ] == 'Allergen'
assert pattern[ 'type_prefix' ] == 'T'
assert pattern[ 'optional_attributes' ] == [ 'Conditional' ,
'Generic' ,
'Historical' ,
'Negated' ,
'NotPatient' ,
'Uncertain' ]
## Raw Content
def test_raw_content_extraction_from_cdata():
filename = 'config/i2b2_2016_track-1.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
assert document_data[ 'cdata_xpath' ] == './TEXT'
assert 'tag_xpath' not in document_data
assert 'content_attribute' not in document_data
def test_raw_content_extraction_from_attribute():
filename = 'config/webanno_phi_xmi.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
assert 'cdata_xpath' not in document_data
assert document_data[ 'tag_xpath' ] == './cas:Sofa'
assert document_data[ 'content_attribute' ] == 'sofaString'
def test_raw_content_extraction_from_plaintext():
filename = 'config/plaintext_sentences.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
assert 'cdata_xpath' not in document_data
assert 'tag_xpath' not in document_data
assert 'content_attribute' not in document_data
## Required and Optional Attribute Extraction
def test_optional_attributes():
filename = 'config/webanno_problems_allergies_xmi.conf'
score_values = [ '.*' ]
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = score_values )
assert 'conditional' in patterns[ 0 ][ 'optional_attributes' ]
assert 'generic' in patterns[ 0 ][ 'optional_attributes' ]
assert 'historical' in patterns[ 0 ][ 'optional_attributes' ]
assert 'negated' in patterns[ 0 ][ 'optional_attributes' ]
assert 'not_patient' in patterns[ 0 ][ 'optional_attributes' ]
assert 'uncertain' in patterns[ 0 ][ 'optional_attributes' ]
#############################################
## Helper functions to help in setting up tests
#############################################
def convert_configs_to_json():
fileroots = [ 'CAS_XMI' ,
'i2b2_2016_track-1' ,
'uima_sentences' ,
'webanno_uima_xmi' ]
for fileroot in fileroots:
filename = 'config/' + fileroot + '.conf'
namespaces , document_data , patterns = \
args_and_configs.process_config( config_file = filename ,
score_key = 'Short Name' ,
score_values = [ '.*' ] )
with open( 'tests/data/' + fileroot + '.json' , 'w' ) as fp:
json.dump( patterns , fp ,
indent = 4 )
| 46.533537
| 93
| 0.581504
| 3,228
| 30,526
| 5.145911
| 0.061648
| 0.063572
| 0.055626
| 0.058275
| 0.896394
| 0.88634
| 0.868039
| 0.868039
| 0.858226
| 0.854915
| 0
| 0.014799
| 0.315993
| 30,526
| 655
| 94
| 46.60458
| 0.780747
| 0.011924
| 0
| 0.807829
| 0
| 0
| 0.159713
| 0.068406
| 0
| 0
| 0
| 0
| 0.156584
| 1
| 0.078292
| false
| 0
| 0.005338
| 0
| 0.08363
| 0.078292
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43b4de3434ec467dfec7c2e5252fce930b0d7fb4
| 17,336
|
py
|
Python
|
src_graph/edge_formation_and_deletion_REL_ST.py
|
sanja7s/SR_Twitter
|
2eb499c9aa25ba6e9860cd77eac6832890d2c126
|
[
"MIT"
] | null | null | null |
src_graph/edge_formation_and_deletion_REL_ST.py
|
sanja7s/SR_Twitter
|
2eb499c9aa25ba6e9860cd77eac6832890d2c126
|
[
"MIT"
] | null | null | null |
src_graph/edge_formation_and_deletion_REL_ST.py
|
sanja7s/SR_Twitter
|
2eb499c9aa25ba6e9860cd77eac6832890d2c126
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
from the month of edge formation, find the SR before, at the time and after
"""
from collections import defaultdict
import codecs
import os
import json
import numpy as np
from igraph import *
IN_DIR = "../../../DATA/General/"
os.chdir(IN_DIR)
F_IN = "mention/edge_formation_deletion_MOs.dat"
F_OUT = "mention/edge_formation_and_deletion_SR_stats_STRICT777.dat"
MONTHS = ["5", "6", "7", "8", "9", "10", "11"]
#########################
# read from a file that is an edge list with weights
#########################
def read_in_MO_graph(MO):
G = Graph.Read_Ncol('mention/' + MO + '_MENT_weight_dir_self_loops', directed=True, weights=True)
print G.summary()
return G
def read_in_MO_graph_MUTUAL_UNW(MO):
G = Graph.Read_Ncol('mention/' + MO + '_MENT_weight_dir_self_loops', directed=True, weights=True)
G.to_undirected(mode="mutual", combine_edges='ignore')
print G.summary()
return G
def extract_edge_formation_and_deletion_REL_ST_with_STDEV_POP():
MO_MENT = defaultdict(int)
for MO in MONTHS:
MO_MENT[MO] = read_in_MO_graph(MO).copy()
edges_MOs = defaultdict(int)
output_file = open(F_OUT, 'w')
cnt = 0
TOT_SR_BEFORE = 0
TOT_SR_FORMATION = 0
TOT_SR_MID = 0
TOT_SR_DELETION = 0
TOT_SR_AFTER = 0
TOT_BEFORE = []
TOT_DELETION = []
TOT_AFTER = []
TOT_FORMATION = []
TOT_MID = []
with codecs.open(F_IN,'r', encoding='utf8') as input_file:
for line in input_file:
(userA, userB, MO_formation, MO_deletion) = line.split()
MO_formation = int(MO_formation)
if MO_formation == 4 or MO_formation >= 10:
continue
MO_deletion = int(MO_deletion)
if MO_deletion <= 6 or MO_deletion >= 10:
continue
cnt += 1
userA = int(userA)
userB = int(userB)
if userA < userB:
u1 = userA
u2 = userB
else:
u1 = userB
u2 = userA
SR_before = 0
SR_formation = 0
SR_mid = 0
SR_deletion = 0
SR_after = 0
MO_prior = MONTHS[int(MO_formation)-1-5]
MO_prior = str(MO_prior)
G = MO_MENT[MO_prior]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=IN, weights='weight')
except IndexError:
popA = 0
try:
popB = G.strength(nB[0].index, mode=IN, weights='weight')
except IndexError:
popB = 0
prior = abs(popA - popB)
MO_formation = str(MO_formation)
G = MO_MENT[MO_formation]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=IN, weights='weight')
except IndexError:
popA = 0
print u1, u2, MO_formation
try:
popB = G.strength(nB[0].index, mode=IN, weights='weight')
except IndexError:
popB = 0
print u2, u1, MO_formation
formation = abs(popA - popB)
i = int(MO_formation)- 5 + 1
#N = 7
#MO = MONTHS[i]
while i < MO_deletion-5+1:
MO = MONTHS[i]
G = MO_MENT[MO]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=IN, weights='weight')
except IndexError:
popA = 0
try:
popB = G.strength(nB[0].index, mode=IN, weights='weight')
except IndexError:
popB = 0
diff = abs(popA - popB)
TOT_MID.append(diff)
i += 1
MO_deletion = str(MO_deletion)
G = MO_MENT[MO_deletion]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
popA = G.strength(nA[0].index, mode=IN, weights='weight')
popB = G.strength(nB[0].index, mode=IN, weights='weight')
deletion = abs(popA - popB)
"""
if MO_formation == MO_deletion:
assert i - 1 == MO_deletion - 5
SR_mid += SR_formation
assert SR_formation == SR_deletion
"""
MO_after = MONTHS[int(MO_deletion)+1-5]
MO_after = str(MO_after)
G = MO_MENT[MO_after]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=IN, weights='weight')
except IndexError:
popA = 0
try:
popB = G.strength(nB[0].index, mode=IN, weights='weight')
except IndexError:
popB = 0
after = abs(popA - popB)
TOT_AFTER.append(after)
TOT_FORMATION.append(formation)
TOT_BEFORE.append(prior)
TOT_DELETION.append(deletion)
avg_bef = np.mean(TOT_BEFORE)
stdev_bef = np.std(TOT_BEFORE, dtype=np.float64)
#print TOT_BEFORE
avg_form = np.mean(TOT_FORMATION)
stdev_form = np.std(TOT_FORMATION, dtype=np.float64)
#print TOT_FORMATION
avg_mid = np.mean(TOT_MID)
stdev_mid = np.std(TOT_MID, dtype=np.float64)
#print TOT_MID
avg_del = np.mean(TOT_DELETION)
stdev_del = np.std(TOT_DELETION, dtype=np.float64)
#print TOT_DELETION
avg_aft = np.mean(TOT_AFTER)
stdev_aft = np.std(TOT_AFTER, dtype=np.float64)
#print TOT_AFTER
print "processed %d edges " % cnt
cnt = float(cnt)
print "Average REL ST POP, stdev before %f, %f, at the time %f, %f of formation, in the middle %f, %f, at deletion %f, %f and after %f, %f edges formation " % \
(avg_bef, stdev_bef, avg_form, stdev_form, avg_mid, stdev_mid, avg_del, stdev_del, avg_aft, stdev_aft)
print
print avg_bef, avg_form, avg_mid, avg_del, avg_aft
print stdev_bef, stdev_form, stdev_mid, stdev_del, stdev_aft
def extract_edge_formation_and_deletion_REL_ST_with_STDEV_ACT():
MO_MENT = defaultdict(int)
for MO in MONTHS:
MO_MENT[MO] = read_in_MO_graph(MO).copy()
edges_MOs = defaultdict(int)
output_file = open(F_OUT, 'w')
cnt = 0
TOT_SR_BEFORE = 0
TOT_SR_FORMATION = 0
TOT_SR_MID = 0
TOT_SR_DELETION = 0
TOT_SR_AFTER = 0
TOT_BEFORE = []
TOT_DELETION = []
TOT_AFTER = []
TOT_FORMATION = []
TOT_MID = []
with codecs.open(F_IN,'r', encoding='utf8') as input_file:
for line in input_file:
(userA, userB, MO_formation, MO_deletion) = line.split()
MO_formation = int(MO_formation)
if MO_formation == 4 or MO_formation >= 10:
continue
MO_deletion = int(MO_deletion)
if MO_deletion <= 6 or MO_deletion >= 10:
continue
cnt += 1
userA = int(userA)
userB = int(userB)
if userA < userB:
u1 = userA
u2 = userB
else:
u1 = userB
u2 = userA
SR_before = 0
SR_formation = 0
SR_mid = 0
SR_deletion = 0
SR_after = 0
MO_prior = MONTHS[int(MO_formation)-1-5]
MO_prior = str(MO_prior)
G = MO_MENT[MO_prior]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=OUT, weights='weight')
except IndexError:
popA = 0
try:
popB = G.strength(nB[0].index, mode=OUT, weights='weight')
except IndexError:
popB = 0
prior = abs(popA - popB)
MO_formation = str(MO_formation)
G = MO_MENT[MO_formation]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=OUT, weights='weight')
except IndexError:
popA = 0
print u1, u2, MO_formation
try:
popB = G.strength(nB[0].index, mode=OUT, weights='weight')
except IndexError:
popB = 0
print u2, u1, MO_formation
formation = abs(popA - popB)
i = int(MO_formation)- 5 + 1
#N = 7
#MO = MONTHS[i]
while i < MO_deletion-5+1:
MO = MONTHS[i]
G = MO_MENT[MO]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=OUT, weights='weight')
except IndexError:
popA = 0
try:
popB = G.strength(nB[0].index, mode=OUT, weights='weight')
except IndexError:
popB = 0
diff = abs(popA - popB)
TOT_MID.append(diff)
i += 1
MO_deletion = str(MO_deletion)
G = MO_MENT[MO_deletion]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
popA = G.strength(nA[0].index, mode=OUT, weights='weight')
popB = G.strength(nB[0].index, mode=OUT, weights='weight')
deletion = abs(popA - popB)
"""
if MO_formation == MO_deletion:
assert i - 1 == MO_deletion - 5
SR_mid += SR_formation
assert SR_formation == SR_deletion
"""
MO_after = MONTHS[int(MO_deletion)+1-5]
MO_after = str(MO_after)
G = MO_MENT[MO_after]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.strength(nA[0].index, mode=OUT, weights='weight')
except IndexError:
popA = 0
try:
popB = G.strength(nB[0].index, mode=OUT, weights='weight')
except IndexError:
popB = 0
after = abs(popA - popB)
TOT_AFTER.append(after)
TOT_FORMATION.append(formation)
TOT_BEFORE.append(prior)
TOT_DELETION.append(deletion)
avg_bef = np.mean(TOT_BEFORE)
stdev_bef = np.std(TOT_BEFORE, dtype=np.float64)
#print TOT_BEFORE
avg_form = np.mean(TOT_FORMATION)
stdev_form = np.std(TOT_FORMATION, dtype=np.float64)
#print TOT_FORMATION
avg_mid = np.mean(TOT_MID)
stdev_mid = np.std(TOT_MID, dtype=np.float64)
#print TOT_MID
avg_del = np.mean(TOT_DELETION)
stdev_del = np.std(TOT_DELETION, dtype=np.float64)
#print TOT_DELETION
avg_aft = np.mean(TOT_AFTER)
stdev_aft = np.std(TOT_AFTER, dtype=np.float64)
#print TOT_AFTER
print "processed %d edges " % cnt
cnt = float(cnt)
print "Average REL ST ACT, stdev before %f, %f, at the time %f, %f of formation, in the middle %f, %f, at deletion %f, %f and after %f, %f edges formation " % \
(avg_bef, stdev_bef, avg_form, stdev_form, avg_mid, stdev_mid, avg_del, stdev_del, avg_aft, stdev_aft)
print
print avg_bef, avg_form, avg_mid, avg_del, avg_aft
print stdev_bef, stdev_form, stdev_mid, stdev_del, stdev_aft
def extract_edge_formation_and_deletion_REL_ST_with_STDEV_MUTUAL_UNW():
MO_MENT = defaultdict(int)
for MO in MONTHS:
MO_MENT[MO] = read_in_MO_graph_MUTUAL_UNW(MO).copy()
edges_MOs = defaultdict(int)
output_file = open(F_OUT, 'w')
cnt = 0
TOT_BEFORE = []
TOT_DELETION = []
TOT_AFTER = []
TOT_FORMATION = []
TOT_MID = []
with codecs.open(F_IN,'r', encoding='utf8') as input_file:
for line in input_file:
(userA, userB, MO_formation, MO_deletion) = line.split()
MO_formation = int(MO_formation)
if MO_formation == 4 or MO_formation >= 10:
continue
MO_deletion = int(MO_deletion)
if MO_deletion <= 6 or MO_deletion >= 10:
continue
cnt += 1
userA = int(userA)
userB = int(userB)
if userA < userB:
u1 = userA
u2 = userB
else:
u1 = userB
u2 = userA
MO_prior = MONTHS[int(MO_formation)-1-5]
MO_prior = str(MO_prior)
G = MO_MENT[MO_prior]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index,)
except IndexError:
popA = 0
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
prior = abs(popA - popB)
MO_formation = str(MO_formation)
G = MO_MENT[MO_formation]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index)
except IndexError:
popA = 0
print u1, u2, MO_formation
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
print u2, u1, MO_formation
formation = abs(popA - popB)
i = int(MO_formation)- 5 + 1
#N = 7
#MO = MONTHS[i]
while i < MO_deletion-5+1:
MO = MONTHS[i]
G = MO_MENT[MO]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index)
except IndexError:
popA = 0
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
diff = abs(popA - popB)
TOT_MID.append(diff)
i += 1
MO_deletion = str(MO_deletion)
G = MO_MENT[MO_deletion]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
popA = G.degree(nA[0].index)
popB = G.degree(nB[0].index)
deletion = abs(popA - popB)
"""
if MO_formation == MO_deletion:
assert i - 1 == MO_deletion - 5
SR_mid += SR_formation
assert SR_formation == SR_deletion
"""
MO_after = MONTHS[int(MO_deletion)+1-5]
MO_after = str(MO_after)
G = MO_MENT[MO_after]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index)
except IndexError:
popA = 0
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
after = abs(popA - popB)
TOT_AFTER.append(after)
TOT_FORMATION.append(formation)
TOT_BEFORE.append(prior)
TOT_DELETION.append(deletion)
avg_bef = np.mean(TOT_BEFORE)
stdev_bef = np.std(TOT_BEFORE, dtype=np.float64)
#print TOT_BEFORE
avg_form = np.mean(TOT_FORMATION)
stdev_form = np.std(TOT_FORMATION, dtype=np.float64)
#print TOT_FORMATION
avg_mid = np.mean(TOT_MID)
stdev_mid = np.std(TOT_MID, dtype=np.float64)
#print TOT_MID
avg_del = np.mean(TOT_DELETION)
stdev_del = np.std(TOT_DELETION, dtype=np.float64)
#print TOT_DELETION
avg_aft = np.mean(TOT_AFTER)
stdev_aft = np.std(TOT_AFTER, dtype=np.float64)
#print TOT_AFTER
print "processed %d edges " % cnt
cnt = float(cnt)
print "Average REL ST MUTUAL CONT, stdev before %f, %f, at the time %f, %f of formation, in the middle %f, %f, at deletion %f, %f and after %f, %f edges formation " % \
(avg_bef, stdev_bef, avg_form, stdev_form, avg_mid, stdev_mid, avg_del, stdev_del, avg_aft, stdev_aft)
print
print avg_bef, avg_form, avg_mid, avg_del, avg_aft
print stdev_bef, stdev_form, stdev_mid, stdev_del, stdev_aft
def extract_edge_formation_and_deletion_REL_ST_with_STDEV_TOTAL_UNW():
MO_MENT = defaultdict(int)
for MO in MONTHS:
MO_MENT[MO] = read_in_MO_graph(MO).copy()
edges_MOs = defaultdict(int)
output_file = open(F_OUT, 'w')
cnt = 0
TOT_BEFORE = []
TOT_DELETION = []
TOT_AFTER = []
TOT_FORMATION = []
TOT_MID = []
with codecs.open(F_IN,'r', encoding='utf8') as input_file:
for line in input_file:
(userA, userB, MO_formation, MO_deletion) = line.split()
MO_formation = int(MO_formation)
if MO_formation == 4 or MO_formation >= 10:
continue
MO_deletion = int(MO_deletion)
if MO_deletion <= 6 or MO_deletion >= 10:
continue
cnt += 1
userA = int(userA)
userB = int(userB)
if userA < userB:
u1 = userA
u2 = userB
else:
u1 = userB
u2 = userA
MO_prior = MONTHS[int(MO_formation)-1-5]
MO_prior = str(MO_prior)
G = MO_MENT[MO_prior]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index,)
except IndexError:
popA = 0
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
prior = abs(popA - popB)
MO_formation = str(MO_formation)
G = MO_MENT[MO_formation]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index)
except IndexError:
popA = 0
print u1, u2, MO_formation
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
print u2, u1, MO_formation
formation = abs(popA - popB)
i = int(MO_formation)- 5 + 1
#N = 7
#MO = MONTHS[i]
while i < MO_deletion-5+1:
MO = MONTHS[i]
G = MO_MENT[MO]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index)
except IndexError:
popA = 0
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
diff = abs(popA - popB)
TOT_MID.append(diff)
i += 1
MO_deletion = str(MO_deletion)
G = MO_MENT[MO_deletion]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
popA = G.degree(nA[0].index)
popB = G.degree(nB[0].index)
deletion = abs(popA - popB)
"""
if MO_formation == MO_deletion:
assert i - 1 == MO_deletion - 5
SR_mid += SR_formation
assert SR_formation == SR_deletion
"""
MO_after = MONTHS[int(MO_deletion)+1-5]
MO_after = str(MO_after)
G = MO_MENT[MO_after]
nA = G.vs.select(name = str(u1))
nB = G.vs.select(name = str(u2))
try:
popA = G.degree(nA[0].index)
except IndexError:
popA = 0
try:
popB = G.degree(nB[0].index)
except IndexError:
popB = 0
after = abs(popA - popB)
TOT_AFTER.append(after)
TOT_FORMATION.append(formation)
TOT_BEFORE.append(prior)
TOT_DELETION.append(deletion)
avg_bef = np.mean(TOT_BEFORE)
stdev_bef = np.std(TOT_BEFORE, dtype=np.float64)
#print TOT_BEFORE
avg_form = np.mean(TOT_FORMATION)
stdev_form = np.std(TOT_FORMATION, dtype=np.float64)
#print TOT_FORMATION
avg_mid = np.mean(TOT_MID)
stdev_mid = np.std(TOT_MID, dtype=np.float64)
#print TOT_MID
avg_del = np.mean(TOT_DELETION)
stdev_del = np.std(TOT_DELETION, dtype=np.float64)
#print TOT_DELETION
avg_aft = np.mean(TOT_AFTER)
stdev_aft = np.std(TOT_AFTER, dtype=np.float64)
#print TOT_AFTER
print "processed %d edges " % cnt
cnt = float(cnt)
print "Average REL ST TOTAL CONT, stdev before %f, %f, at the time %f, %f of formation, in the middle %f, %f, at deletion %f, %f and after %f, %f edges formation " % \
(avg_bef, stdev_bef, avg_form, stdev_form, avg_mid, stdev_mid, avg_del, stdev_del, avg_aft, stdev_aft)
print
print avg_bef, avg_form, avg_mid, avg_del, avg_aft
print stdev_bef, stdev_form, stdev_mid, stdev_del, stdev_aft
print 'STRONG contacts'
extract_edge_formation_and_deletion_REL_ST_with_STDEV_MUTUAL_UNW()
print 'TOTAL, including weak contacts'
extract_edge_formation_and_deletion_REL_ST_with_STDEV_TOTAL_UNW()
| 24.044383
| 169
| 0.6509
| 2,797
| 17,336
| 3.835181
| 0.055417
| 0.053323
| 0.03356
| 0.048476
| 0.958143
| 0.956651
| 0.952643
| 0.949753
| 0.949753
| 0.949753
| 0
| 0.022167
| 0.216717
| 17,336
| 720
| 170
| 24.077778
| 0.767803
| 0.028438
| 0
| 0.947674
| 0
| 0.007752
| 0.067284
| 0.010758
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.011628
| null | null | 0.062016
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
43b9079daad6158e20a761d229d4930f14cdb35e
| 101
|
py
|
Python
|
mysql/__init__.py
|
IBM/backwork-backup-mysql
|
013f32ccc1ce005d90e6f0b29fdb44b5b7f8bf79
|
[
"Apache-2.0"
] | null | null | null |
mysql/__init__.py
|
IBM/backwork-backup-mysql
|
013f32ccc1ce005d90e6f0b29fdb44b5b7f8bf79
|
[
"Apache-2.0"
] | null | null | null |
mysql/__init__.py
|
IBM/backwork-backup-mysql
|
013f32ccc1ce005d90e6f0b29fdb44b5b7f8bf79
|
[
"Apache-2.0"
] | 2
|
2019-11-02T15:06:29.000Z
|
2020-06-29T14:49:19.000Z
|
"""Add support for MySQL backups
"""
from .mysql import MySQLBackup
from .mysql import MySQLRestore
| 16.833333
| 32
| 0.772277
| 13
| 101
| 6
| 0.692308
| 0.230769
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148515
| 101
| 5
| 33
| 20.2
| 0.906977
| 0.287129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
601e4d23e2cb01b0b4c3c40f78e6f6e333c630d9
| 41,131
|
py
|
Python
|
battles.py
|
fleeb24/tnt
|
fd9d432d39809f5fb21fbaa527ac490d180d3329
|
[
"MIT"
] | null | null | null |
battles.py
|
fleeb24/tnt
|
fd9d432d39809f5fb21fbaa527ac490d180d3329
|
[
"MIT"
] | null | null | null |
battles.py
|
fleeb24/tnt
|
fd9d432d39809f5fb21fbaa527ac490d180d3329
|
[
"MIT"
] | 2
|
2019-03-13T03:50:17.000Z
|
2019-04-29T16:09:00.000Z
|
from util import adict, xset, tdict, tlist, tset, idict, PhaseComplete
from tnt_cards import discard_cards
from tnt_units import add_unit, move_unit, remove_unit
from tnt_util import travel_options, ANS_rebase_options, fill_movement
from command import make_undisputed, switch_ownership, eval_movement
import random
#******************************
# helpers *
#******************************
def apply_damage(G, b, unit_hit):
id = unit_hit.id
unit = G.objects.table[id]
if unit.cv == 1 or unit.type == 'Convoy' and unit.cv == 2:
# units takes a Hit. Units reduced to 0 CV
# are eliminated and removed from play
#unit is removed
G.logger.write('unit {} removed'.format(id))
remove_unit(G, unit)
#add unit to dead
if not 'dead' in b:
b.dead = []
b.dead.append(unit_hit)
#remove unit from fire_order!!!
b.fire_order = res = [i for i in b.fire_order if i.unit._id != id]
b.idx = b.fire_order.index(b.fire)
else:
diff = 1 if unit.type != 'Convoy' else 2
unit.cv -= diff
G.logger.write('{} lost {} cv: {}'.format(id, diff, unit.cv))
G.objects.updated[id] = unit
def apply_damage_sea(G, b, unit_hit):
id = unit_hit.id
unit = G.objects.table[id]
if unit.cv == 1 or unit.type == 'Convoy' and unit.cv == 2:
# units takes a Hit. Units reduced to 0 CV
# are eliminated and removed from play
#unit is removed
G.logger.write('unit {} removed'.format(id))
remove_unit(G, unit)
#add unit to dead
if not 'dead' in b:
b.dead = []
b.dead.append(unit_hit)
#remove unit from fire_order!!!
b.fire_order = res = [i for i in b.fire_order if i.unit._id != id]
#re-compute b.fire_orders per battle_group!
b.fire_orders = adict()
for bg in b.battle_groups:
b.fire_orders[bg] = [u for u in b.fire_order if (u.owner != b.attacker and u.type != 'Convoy' or u.battle_group == bg)]
b.idx = b.fire_orders[b.battle_group].index(b.fire)
else:
diff = 1 if unit.type != 'Convoy' else 2
unit.cv -= diff
G.logger.write('{} lost {} cv: {}'.format(id, diff, unit.cv))
G.objects.updated[id] = unit
def attacker_moved_from(G, b, player, tilenames):
result = []
for tilename in tilenames:
for u in b.fire_order:
id = u.unit._id
has_moved = G.temp.has_moved
if id in has_moved and has_moved[id] == tilename:
result.append(tilename)
return result
def calc_target_classes(b, units, opponent):
b.opp_types = list({u.type for u in units if u.owner == opponent})
#brauche eigentlich nicht den type sondern die group!!!!
b.opp_groups = list({u.group for u in units if u.owner == opponent})
def calc_retreat_options_for_unit(G, player, b, c, u):
result = []
if u.type == 'Fortress':
return result
if player in G.players:
tile = b.tile
unit = u.unit
id = u.id
if player == b.attacker and id in G.temp.has_moved:
# attacker: ONLY to tile from wwhich moved if moved this turn!!!
# G units can only retreat to ADJACENT friendly tile!
if u.group != 'G' or G.temp.has_moved[id] in b.tile.borders.keys():
result.append((id, G.temp.has_moved[id]))
elif u.group == 'G':
neighbors = tile.borders.keys()
# if defender: not to tile from which attackers came
forbid = attacker_moved_from(G, b, player, neighbors) if player == b.defender else []
for nei in neighbors:
# G unit can retreat into adjacent undisputed friendly territory
if is_friendly_to_unit(G, id, u.group, nei, player) and not nei in forbid:
result.append((id, nei))
else:
# ANS unit undisputed friendly within movement range
locs = ANS_rebase_options(G, unit)
#print('locs:', locs, type(locs))
if len(locs):
for loc in locs:
result.append((id, loc))
#print(b.retreat_options)
return result
def calc_retreat_options_for_fire_unit(G, player, b, c):
result = []
if b.fire.unit.type == 'Fortress':
return result
if player in G.players:
tile = b.tile
u = b.fire
unit = u.unit
id = u.id
if player == b.attacker and id in G.temp.has_moved:
# attacker: ONLY to tile from wwhich moved if moved this turn!!!
# G units can only retreat to ADJACENT friendly tile!
if u.group != 'G' or G.temp.has_moved[id] in b.tile.borders.keys():
result.append((id, G.temp.has_moved[id]))
elif u.group == 'G':
neighbors = tile.borders.keys()
# if defender: not to tile from which attackers came
forbid = attacker_moved_from(G, b, player, neighbors) if player == b.defender else []
for nei in neighbors:
# G unit can retreat into adjacent undisputed friendly territory
if is_friendly_to_unit(G, id, u.group, nei, player) and not nei in forbid:
result.append((id, nei))
else:
# ANS unit undisputed friendly within movement range
locs = ANS_rebase_options(G, unit)
#print('locs:', locs, type(locs))
if len(locs):
for loc in locs:
result.append((id, loc))
#print(b.retreat_options)
return result
def calc_all_retreat_options(G, player, b, c):
b.retreat_options = []
b.must_retreat = [] #ANS without friendly ground support
#calc_all_retreat_options: border limits must be kept track once select
# a retreat option!
#retreats must be pairs: unit_id,tile for each possible retreat
#as user selects retreat for a unit, need to reduce set of other possible retreats
#accordingly
#once retreat has been selected, only more retreats are possible
#then land battle ends even if units are left
if player in G.players:
#tileneighbors
tile = b.tile
units = [u for u in b.fire_order if u.owner == player]
for u in units:
#TODO: add rebase options! retreat for AirForce:
id = u.id
if u.group != 'G':
b.must_retreat.append(id)
if id in G.temp.has_moved:
b.retreat_options.append((id, G.temp.has_moved[id]))
continue
elif u.group == 'G':
#unit can retreat into adjacent friendly territory
neighbors = tile.borders.keys()
for nei in neighbors:
if is_friendly(G, nei, player):
if u.group == 'G' and G.tiles[nei].type == 'sea':
continue
b.retreat_options.append((id, nei))
else: #ANS unit rebase options
locs = ANS_rebase_options(G, u.unit)
for loc in locs:
b.retreat_options.append((id, loc))
def calc_mandatory_rebase_options(G, b, c):
#TODO code rewrite
#mand rebase for non-owner troups when no G support
#rebase for player who does NOT own the tile
player = b.attacker if b.attacker != b.owner else b.defender
non_owner_units = [u for u in b.fire_order if u.owner != b.owner]
n_o_G = [u for u in non_owner_units if u.group == 'G']
n_o_ANS = [u for u in non_owner_units if u.group != 'G']
n_o_ground_support = len(n_o_G) > 0
if len(n_o_ANS) and not n_o_ground_support:
options = xset()
#find out who is owner of options
#TODO: if more than 1 opponent?!? kann das ueberhaupt sein? need to send 2 separate option sets!!!
unit_owner = n_o_ANS[0].owner
b.mandatory_rebase_options = []
for ans in n_o_ANS:
unit = ans.unit
#if this unit has just moved in, retreat to same tile
if player == b.attacker and unit._id in G.temp.has_moved:
##options.add((unit._id,G.temp.has_moved[unit._id]))
#this unit has to move back to has_moved, so don't add to options
#just move it
##unit = G.players[player].units[id]
id = unit._id
destination = G.temp.has_moved[id]
move_unit(G, unit, destination)
b.fire_order = [u for u in b.fire_order if u.id != id]
#revert visibility to just owner!
unit.visible.clear()
unit.visible.add(player)
#TODO: mind border limits!!!!!!
G.logger.write('{} unit {} mandatory rebase to {}'.format(player, id, destination))
else:
locs = ANS_rebase_options(G, unit)
#print('locs:', locs, type(locs))
if len(locs):
for loc in locs:
b.mandatory_rebase_options.append((unit._id, loc))
options.add((unit._id, locs))
if len(options):
code = adict()
code[unit_owner] = options
G.logger.write('{} select rebase option for ANS units'.format(player))
return code
player = b.owner
owner_units = [u for u in b.fire_order if u.owner == b.owner]
o_G = [u for u in owner_units if u.group == 'G']
o_ANS = [u for u in owner_units if u.group != 'G']
o_ground_support = len(o_G) > 0
if len(o_ANS) and not o_ground_support and n_o_ground_support:
options = xset()
unit_owner = o_ANS[0].owner
b.mandatory_rebase_options = []
for ans in o_ANS:
unit = ans.unit
#if this unit has just moved in, retreat to same tile
if player == b.attacker and unit._id in G.temp.has_moved:
id = unit._id
destination = G.temp.has_moved[id]
move_unit(G, unit, destination)
b.fire_order = [u for u in b.fire_order if u.id != id]
#revert visibility to just owner!
unit.visible.clear()
unit.visible.add(player)
#TODO: mind border limits!!!!!!
G.logger.write('{} unit {} mandatory rebase to {}'.format(player, id, destination))
else:
locs = ANS_rebase_options(G, unit)
#print('locs:', locs, type(locs))
if len(locs):
for loc in locs:
b.mandatory_rebase_options.append((unit._id, loc))
options.add((unit._id, locs))
if not len(options):
return None
else:
code = adict()
code[unit_owner] = options
G.logger.write('{} select rebase option for ANS units'.format(player))
return code
def calc_target_units_with_max_cv(b):
#find units with maximal cv
maxCV = 0
for u in b.target_units:
unit = u.unit
if unit.cv > maxCV:
maxCV = unit.cv
units_max_cv = []
for u in b.target_units:
unit = u.unit
if unit.cv == maxCV:
units_max_cv.append(u)
#TODO: learn python!!!
return units_max_cv
def encode_list(G, player, lst): #lst is list of tuples
code = adict()
options = xset()
for t in lst:
options.add(t)
#print('* * vor code[player]=options', options)
code[player] = options
return code
def encode_accept(G, player, opponent=None):
#player = G.temp.order[G.temp.active_idx]
code = adict()
options = xset()
options.add(('accept',))
#print('* * vor code[player]=options', options)
if player in G.players:
code[player] = options
else:
code[opponent] = options
return code
def encode_cmd_options(G, player):
#player = G.temp.order[G.temp.active_idx]
code = adict()
options = xset()
for b in G.temp.combat.battle.opp_groups:
options.add((b,))
for r in G.temp.combat.battle.retreat_options:
options.add((r,))
#print('* * vor code[player]=options', options)
code[player] = options
return code
def encode_who_takes_hit_options(G, player):
#player = G.temp.order[G.temp.active_idx]
code = adict()
options = xset()
for b in G.temp.combat.battle.types_max_cv:
options.add((b,))
#print('* * vor code[player]=options', options)
code[player] = options
return code
def find_unit_owner(G, unit):
return G.nations.designations[unit.nationality]
def find_tile_owner(G, tile):
if 'owner' in tile:
return tile.owner
if 'alligence' in tile:
nation = tile.alligence
if nation in G.nations.designations:
return G.nations.designations[nation]
return None
def is_friendly_to_unit(G, uid, ugroup, tilename, player):
tile = G.tiles[tilename]
if 'disputed' in tile:
return False
owner = find_tile_owner(G, tile)
if owner == player:
return True
if tile.type == 'Sea' or tile.type == 'Ocean':
if ugroup == 'G': #if G unit, sea area only counts as friendly if occupied by own units
for id in tile.units:
unit = G.objects.table[id]
owner = find_unit_owner(G, unit)
if owner == player:
return True
return False
else: #if ANS unit, sea area that is unoccupied by enemy counts as friendly
for id in tile.units:
unit = G.objects.table[id]
owner = find_unit_owner(G, unit)
if owner != player:
return False
return True
return False
def is_friendly(G, tilename, player):
tile = G.tiles[tilename]
if 'owner' in tile and tile.owner == player:
return True
return False
def no_units_left(G, c, b, player):
units = [u for u in b.fire_order if u.owner == player]
return len(units) == 0
def no_units_in_battle_group_left(G, c, b, player):
units = [u for u in b.fire_orders[b.battle_group] if u.owner == player]
return len(units) == 0
def roll_dice(G, b, player, opponent):
#should return number of successful hits for unit of cv=x
ndice = b.fire.unit.cv
#calc boundary for successful hit
limit = G.units.rules[b.fire.type][b.target_class]
#technologies that could alter limit
if b.fire.type == 'AirForce' and b.fire.air_def_radar and is_friendly(G, b.tilename, b.fire.owner):
ndice *= 2
if b.fire.type == 'Fleet' and b.target_class == 'S':
limit = 3
dice_rolls = [5, 1, 2, 2, 3, 3, 3, 4, 4, 5, 6][:ndice] if b.idx % 2 else [1, 2, 2, 3, 3, 3, 4, 4, 5, 6, 5][:ndice]
outcome = sum(i <= limit for i in dice_rolls)
#print('rolling', ndice, 'dice yields', outcome, 'hits')
return outcome
def target_units_left(b, units, opponent):
res = adict()
for u in units:
if u.owner == opponent and u.group == b.target_class:
res[u.id] = u
return res
#return list({u.unit for u in units if u.owner == opponent and u.group == b.target_class})
def add_unique_in_order(lst, prop):
result = []
for el in lst:
if prop in el and el[prop] and not el[prop] in result:
result.append(el[prop])
return result
#******************************
# old code *
#******************************
#******************************
# main *
#******************************
def land_battle_phase(G, player, action):
c = G.temp.combat
b = c.battle
if b.stage == 'battle_start': #starting a battle
assert action == None, 'there is an action in have_cmd!!!!!'
b.winner = None
b.idx = 0
b.fire = b.fire_order[b.idx]
b.stage = 'battle_start_ack'
c.stages.append(b.stage)
G.logger.write('land battle starting in {}'.format(b.tilename))
player = b.attacker if b.attacker in G.players else b.defender
return encode_accept(G, player)
playerParam = player
player = b.fire.owner
is_defender = player == b.defender
opponent = b.attacker if is_defender else b.defender #TODO: correct! (for simplicity assuming just 1 opponent!)
units = b.fire_order
while (True):
if b.stage == 'battle_start_ack': #player accepted battle start
assert action != None, '{}: no action!!!!!'.format(b.stage)
action = None #if got accept action, just delete it and proceed
b.stage = 'select_combat_action'
c.stages.append(b.stage)
if b.stage == 'select_combat_action':
#arriving here, b.fire should be in place and player should be b.fire.owner!
assert action == None, '{}: action!!!!!'.format(b.stage)
assert b.fire and player == b.fire.owner, '{} ERROR!!! b={}'.format(b.stage, b)
b.stage = 'select_combat_action_ack'
c.stages.append(b.stage)
if 'combat_action' in b:
del b.combat_action
opponent = b.attacker if player == b.defender else b.defender
#calc possible combat actions: target classes and retreat options
units = b.fire_order
b.opp_types = list({u.type for u in units if u.owner == opponent})
b.opp_groups = list({u.group for u in units if u.owner == opponent})
if player == 'Minor':
#per default, G is selected
#check if there is a unit with group 'G' in fire_order
unitOppG = next(filter(lambda i: i.owner == opponent and i.group == 'G', (x for x in lst)), False)
if unitOppG:
b.opp_types = list({u.type for u in units if u.owner == opponent and u.group == 'G'})
b.opp_groups = list({u.group for u in units if u.owner == opponent and u.group == 'G'})
code = encode_list(G, opponent, b.opp_groups)
else:
#retreat options should be same as usual
b.retreat_options = calc_retreat_options_for_fire_unit(G, player, b, c)
#encode all possible target_class or retreat_tile options in code
code = encode_cmd_options(G, player)
#determining target class:
b.target_class = None
b.target_units = None
return code
if b.stage == 'select_combat_action_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
player = b.fire.owner
opponent = b.attacker if player == b.defender else b.defender
if len(action) > 1:
b.stage = 'retreat'
else:
b.stage = 'hit'
c.stages.append(b.stage)
action = None
if b.stage == 'hit':
assert action == None, '{}: action!!!!!'.format(b.stage)
b.combat_action = 'hit'
c.stages.append(b.stage)
b.target_class = head
b.stage = 'hit_ack'
b.target_units = []
for u in b.fire_order:
if u.owner == opponent and u.group == b.target_class:
b.target_units.append(u)
G.logger.write('{}:{} {} targeting {} {}'.format(b.idx, player, b.fire.id, b.target_class, opponent))
return encode_accept(G, player, opponent)
if b.stage == 'hit_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
player = b.fire.owner
opponent = b.attacker if player == b.defender else b.defender
if not 'hits' in b: #ROLL DICE!!!!!!!!
G.logger.write('ROLLING DICE..............')
b.hits = roll_dice(G, b, player, opponent)
b.outcome = b.hits
G.logger.write('{} hits rolled!'.format(b.hits))
if b.hits > 0:
b.stage = 'have_hits'
else:
b.stage = 'no_hits'
c.stages.append(b.stage)
if b.stage == 'no_hits':
assert action == None, '{}: action!!!!!'.format(b.stage)
b.units_hit = None
b.stage = 'no_hits_ack'
c.stages.append(b.stage)
return encode_accept(G, player, opponent)
if b.stage == 'no_hits_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
b.stage = 'combat_action_ends'
c.stages.append(b.stage)
if b.stage == 'have_hits':
assert action == None, '{}: action!!!!!'.format(b.stage)
b.units_max_cv = calc_target_units_with_max_cv(b)
b.types_max_cv = list({u.type for u in b.units_max_cv})
b.stage = 'have_hits_ack' # if b.hits == b.outcome else 'more_hits_ack'
b.units_hit = None
if len(b.units_max_cv) <= b.hits:
b.units_hit = b.units_max_cv
return encode_accept(G, player, opponent)
elif opponent in G.players and len(b.types_max_cv) > 1:
# The owner can choose which of equal-CV unit takes hit
b.units_hit = None
return encode_who_takes_hit_options(G, opponent)
else:
b.units_hit = b.units_max_cv[:b.hits]
return encode_accept(G, player, opponent)
if b.stage == 'have_hits_ack': # or b.stage == 'more_hits_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
player = b.fire.owner
opponent = b.attacker if player == b.defender else b.defender
b.stage = 'damage_ack'
c.stages.append(b.stage)
if head == 'accept':
assert b.units_hit, '{} ERROR!!!'.format(b.stage)
else:
#head is type of units that should be hit first
#use units_max_cv
correctTypeUnits = [u for u in b.units_max_cv if u.type == head]
if len(correctTypeUnits) >= b.hits:
b.units_hit = correctTypeUnits[:b.hits]
else:
b.units_hit = correctTypeUnits
#apply damage to units_hit for damaged player (=opponent) to accept
b.hits -= len(b.units_hit)
for unit_hit in b.units_hit:
apply_damage(G, b, unit_hit)
return encode_accept(G, opponent, player)
if b.stage == 'damage_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
#look if there are target units left
#if not, goto no_target_units_left
if no_units_left(G, c, b, opponent):
b.winner = player
b.stage = 'combat_action_ends'
elif b.hits == 0:
b.stage = 'combat_action_ends'
else: #there are still hits left and still opp is alive
#recompute target units (units have to be b.target_class)
b.target_units = []
for u in b.fire_order:
if u.owner == opponent and u.group == b.target_class:
b.target_units.append(u)
if not len(b.target_units):
#hits left but no units of that class
b.stage = 'combat_action_ends'
else:
G.logger.write('{}:{} {} targeting {} {}'.format(b.idx, player, b.fire.id, b.target_class, opponent))
b.stage = 'have_hits'
if b.stage == 'combat_action_ends':
assert action == None, '{}: action!!!!!'.format(b.stage)
if b.winner:
b.stage = 'battle_ends'
else:
if 'hits' in b:
del b.hits
del b.outcome
b.idx += 1 #not when it was a retreat! check if correct!!!
if no_units_left(G, c, b, opponent): #dont think this can happen!
b.winner = player
b.stage = 'should_NOT_be_here'
G.logger.write('{} has no more units! Please accept battle end!'.format(b.opponent))
elif no_units_left(G, c, b, player): #after retreating last of his units
b.winner = opponent
b.stage = 'should_NOT_be_here'
G.logger.write('{} retreated last unit, Land battle ends'.format(player))
elif b.idx >= len(b.fire_order):
b.stage = 'mandatory_rebase'
G.logger.write('all units have acted, Land battle ends')
else:
b.fire = b.fire_order[b.idx]
player = b.fire.owner
b.stage = 'select_combat_action'
G.logger.write('{} {} fires next'.format(b.fire.owner, b.fire.id))
c.stages.append(b.stage)
if b.stage == 'should_NOT_be_here':
print('IMPOSSIBLE STAGE!!!!!')
return encode_accept(G,player,opponent)
if b.stage == 'retreat':
b.combat_action = 'retreat'
b.selectedRetreatUnit = head
b.selectedRetreatTile = tail[0]
player = b.fire.owner
G.logger.write('{}:{} {} RETREATING TO {}'.format(b.idx, player, b.fire.id, b.selectedRetreatTile))
id = b.selectedRetreatUnit
unit = G.players[player].units[id]
tilename = b.selectedRetreatTile
move_unit(G, unit, tilename)
#er entfernt hier die rebased unit!!!
b.fire_order = [u for u in b.fire_order if u.id != id]
#revert visibility to just owner!
unit.visible.clear()
unit.visible.add(player)
b.stage = 'retreat_ack'
c.stages.append(b.stage)
return encode_accept(G,player)
#TODO: spaeter weiter impl
if b.stage == 'retreat_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
b.stage = 'combat_action_ends'
c.stages.append(b.stage)
if b.stage == 'mandatory_rebase':
assert action == None, '{}: action!!!!!'.format(b.stage)
#ANS must retreat/rebase if no friendly ground support!
#for tile owner: if no ground support AND enemy has ground units on tile!
#player = b.attacker if b.owner != b.attacker else b.defender
#kann beides der fall sein? NEIN
code = calc_mandatory_rebase_options(G, b, c)
if code:
b.stage = 'mandatory_rebase_ack'
c.stages.append(b.stage)
return code
elif no_units_left(G, c, b, playerParam):
b.winner = b.attacker if playerParam == b.defender else b.defender
b.stage = 'battle_ends'
c.stages.append(b.stage)
if b.stage == 'mandatory_rebase_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
#assert playerParam != b.owner, 'owner of tile is rebasing!!! ERROR!!!'
#can happen if owner has no ground support!
head, *tail = action
action = None
#rebase unit,tile
b.selectedRetreatUnit = head
b.selectedRetreatTile = tail[0]
id = b.selectedRetreatUnit
unit = G.players[playerParam].units[id]
tilename = b.selectedRetreatTile
move_unit(G, unit, tilename)
#er entfernt hier die rebased unit!!!
b.fire_order = [u for u in b.fire_order if u.id != id]
#revert visibility to just owner!
unit.visible.clear()
unit.visible.add(playerParam)
#if still more mandatory retreats, have to calc!
b.stage = 'mandatory_rebase'
c.stages.append(b.stage)
if b.stage == 'battle_ends':
#mandatory_rebase has already taken place when here!!!
#either because it is decided or because all players have acted
if b.winner:
make_undisputed(G, G.tiles[b.tilename])
if (b.owner != b.winner):
switch_ownership(G, G.tiles[b.tilename], b.winner)
b.owner = b.winner
if b.owner in G.players:
ownerUnits = [u for u in b.fire_order if u.owner == b.owner]
for u in ownerUnits:
unit = u.unit
unit.visible.clear()
unit.visible.add(b.owner)
G.objects.updated[unit._id] = unit
b.stage = 'battle_ends_ack'
c.stages.append(b.stage)
return encode_accept(G, player, opponent)
if b.stage == 'battle_ends_ack':
c.stage = 'battle_ended'
c.stages.append(b.stage)
# raise PhaseComplete
break
raise PhaseComplete
def sea_battle_phase(G, player, action):
c = G.temp.combat
b = c.battle
if b.stage == 'battle_start': #starting a sea battle
assert action == None, 'there is an action in have_cmd!!!!!'
b.winner = None
b.battle_groups = add_unique_in_order(b.fire_order, 'battle_group')
b.battle_group = None
b.fire_orders = adict()
for bg in b.battle_groups:
b.fire_orders[bg] = [u for u in b.fire_order if (u.owner != b.attacker and u.type != 'Convoy' or u.battle_group == bg)]
b.stage = 'battle_start_ack'
c.stages.append(b.stage)
G.logger.write('sea battle starting in {}'.format(b.tilename))
return encode_accept(G, b.attacker, b.defender)
while (True):
if b.stage == 'battle_start_ack': #player accepted battle start
assert action != None, '{}: no action!!!!!'.format(b.stage)
assert player == b.attacker, '{}: wrong player in {}!!!!!'.format(player, b.stage)
action = None #if got accept action, just delete it and proceed
b.stage = 'battle_round_start'
c.stages.append(b.stage)
if b.stage == 'battle_round_start':
b.roundWinner = None
b.idx = 0
#attacker must select battle group, fire_orders should be upToDate
lst = [(s,) for s in b.fire_orders]
b.stage = 'battle_round_start_ack'
c.stages.append(b.stage)
return encode_list(G, b.attacker, lst)
if b.stage == 'battle_round_start_ack':
#when getting here, should have a battle group: head
assert action != None, '{}: no action!!!!!'.format(b.stage)
assert player == b.attacker, '{}: wrong player in {}!!!!!'.format(player, b.stage)
head, *tail = action
action = None
b.battle_group = head
fire_order = b.fire_orders[head]
b.fire = fire_order[b.idx]
player = b.fire.owner
b.stage = 'select_combat_action'
c.stages.append(b.stage)
if b.stage == 'select_combat_action':
#arriving here, b.fire should be in place and player should be b.fire.owner!
assert action == None, '{}: action!!!!!'.format(b.stage)
assert b.fire and player == b.fire.owner, '{} ERROR!!! b={}'.format(b.stage, b)
b.stage = 'select_combat_action_ack'
c.stages.append(b.stage)
if 'combat_action' in b:
del b.combat_action
opponent = b.attacker if player == b.defender else b.defender
#who can be targeted?
#if player==attacker all opponent units in b.fire_order can be targeted
units = b.fire_orders[b.battle_group]
if player == b.attacker:
b.opp_types = list({u.type for u in b.fire_order if u.owner == opponent})
b.opp_groups = list({u.group for u in b.fire_order if u.owner == opponent})
else:
#otherwise can only target selected battle group or convoys
#convoys have u.battle_group == None since they do not fight at sea!
b.opp_types = []
for u in b.fire_order:
if not u.type in b.opp_types and u.owner == opponent and \
(not u.battle_group or u.battle_group == b.battle_group):
b.opp_types.append(u.type)
b.opp_groups = []
for u in b.fire_order:
if not u.group in b.opp_groups and u.owner == opponent and \
(not u.battle_group or u.battle_group == b.battle_group):
b.opp_groups.append(u.group)
#print('done')
# b.opp_types = list({
# u.type
# for u in units
# if u.owner == opponent and (not u.battle_group or u.battle_group == b.battle_group)
# })
# b.opp_groups = list({
# u.group
# for u in units
# if u.owner == opponent and (not u.battle_group or u.battle_group == b.battle_group)
# })
#retreat options should be same as usual
b.retreat_options = calc_retreat_options_for_fire_unit(G, player, b, c)
#encode all possible target_class or retreat_tile options in code
code = encode_cmd_options(G, player)
b.target_class = None
b.target_units = None
return code
if b.stage == 'select_combat_action_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
player = b.fire.owner
opponent = b.attacker if player == b.defender else b.defender
if len(action) > 1:
b.stage = 'retreat'
else:
b.stage = 'hit'
c.stages.append(b.stage)
action = None
if b.stage == 'hit':
assert action == None, '{}: action error!!!!!'.format(b.stage)
assert head and len(head) == 1, '{}: head error!!!!!'.format(b.stage)
b.combat_action = 'hit'
b.target_class = head
b.stage = 'hit_ack'
c.stages.append(b.stage)
#calc target_units according to selected battle group
b.target_units = []
for u in b.fire_order:
if u.owner == opponent and u.group == b.target_class:
if opponent == b.attacker and u.battle_group and u.battle_group != b.battle_group:
continue
b.target_units.append(u)
G.logger.write('{}:{} {} targeting {} {}'.format(b.idx, player, b.fire.id, b.target_class, opponent))
return encode_accept(G, player, opponent)
if b.stage == 'hit_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
player = b.fire.owner
opponent = b.attacker if player == b.defender else b.defender
if not 'hits' in b: #ROLL DICE!!!!!!!!
G.logger.write('ROLLING DICE..............')
b.hits = roll_dice(G, b, player, opponent)
b.outcome = b.hits
G.logger.write('{} hits rolled!'.format(b.hits))
if b.hits > 0:
b.stage = 'have_hits'
else:
b.stage = 'no_hits'
c.stages.append(b.stage)
if b.stage == 'no_hits':
assert action == None, '{}: action!!!!!'.format(b.stage)
b.units_hit = None
b.stage = 'no_hits_ack'
c.stages.append(b.stage)
return encode_accept(G, player, opponent)
if b.stage == 'no_hits_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
b.stage = 'combat_action_ends'
c.stages.append(b.stage)
if b.stage == 'have_hits':
assert action == None, '{}: action!!!!!'.format(b.stage)
b.units_max_cv = calc_target_units_with_max_cv(b)
b.types_max_cv = list({u.type for u in b.units_max_cv})
b.stage = 'have_hits_ack' # if b.hits == b.outcome else 'more_hits_ack'
b.units_hit = None
if len(b.units_max_cv) <= b.hits:
b.units_hit = b.units_max_cv
return encode_accept(G, player, opponent)
elif opponent in G.players and len(b.types_max_cv) > 1:
# The owner can choose which of equal-CV unit takes hit
b.units_hit = None
return encode_who_takes_hit_options(G, opponent)
else:
b.units_hit = b.units_max_cv[:b.hits]
return encode_accept(G, player, opponent)
if b.stage == 'have_hits_ack': # or b.stage == 'more_hits_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
player = b.fire.owner
opponent = b.attacker if player == b.defender else b.defender
b.stage = 'damage_ack'
c.stages.append(b.stage)
if head == 'accept':
assert b.units_hit, '{} ERROR!!!'.format(b.stage)
else:
#head is type of units that should be hit first
#use units_max_cv
correctTypeUnits = [u for u in b.units_max_cv if u.type == head]
if len(correctTypeUnits) >= b.hits:
b.units_hit = correctTypeUnits[:b.hits]
else:
b.units_hit = correctTypeUnits
#apply damage to units_hit for damaged player (=opponent) to accept
b.hits -= len(b.units_hit)
for unit_hit in b.units_hit:
apply_damage_sea(G, b, unit_hit)
return encode_accept(G, opponent, player)
if b.stage == 'damage_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
player = b.fire.owner
opponent = b.attacker if player ==b.defender else b.defender
head, *tail = action
action = None
#look if there are target units left
#if not, goto no_target_units_left
if no_units_left(G, c, b, opponent):
b.winner = player
b.stage = 'combat_action_ends'
elif opponent == b.attacker and no_units_in_battle_group_left(G,c,b,opponent):
#eliminate this battle_group from fire_orders
del b.fire_orders[b.battle_group]
b.battle_groups.remove(b.battle_group)
b.battle_group = None
#end combat round
b.roundWinner = player
b.stage = 'combat_action_ends'
elif b.hits == 0:
b.stage = 'combat_action_ends'
else: #there are still hits left and still opp is alive
#recompute target units (units have to be b.target_class)
b.target_units = []
for u in b.fire_order:
if u.owner == opponent and u.group == b.target_class:
if opponent == b.attacker and u.battle_group and u.battle_group != b.battle_group:
continue
b.target_units.append(u)
if not len(b.target_units):
#hits left but no units of that class
b.stage = 'combat_action_ends'
else:
b.stage = 'have_hits'
if b.stage == 'combat_action_ends':
assert action == None, '{}: action!!!!!'.format(b.stage)
player = b.fire.owner
opponent = b.attacker if player ==b.defender else b.defender
if b.winner:
b.stage = 'battle_ends'
elif b.roundWinner: #kommt nur vor wenn battle group tot aber noch andere battle group exists
if 'hits' in b:
del b.hits
del b.outcome
b.stage = 'combat_round_ends'
else:
if 'hits' in b:
del b.hits
del b.outcome
b.idx += 1 #not when it was a retreat! check if correct!!!
if no_units_left(G, c, b, opponent): #dont think this can happen!
b.winner = player
b.stage = 'should_NOT_be_here'
G.logger.write('{} has no more units! Please accept battle end!'.format(b.opponent))
elif no_units_left(G, c, b, player): #after retreating last of his units
b.winner = opponent
b.stage = 'should_NOT_be_here'
G.logger.write('{} retreated last unit, Sea battle ends'.format(player))
elif player == b.attacker and no_units_in_battle_group_left(G,c,b,player):
#player retreated his last unit from this battleGroup
#eliminate this battle_group from fire_orders
del b.fire_orders[b.battle_group]
#end combat round
b.roundWinner = opponent
b.stage = 'combat_action_ends'
elif b.idx >= len(b.fire_orders[b.battle_group]):
b.stage = 'combat_round_ends'
G.logger.write('all units have acted, Land battle ends')
else:
b.fire = b.fire_orders[b.battle_group][b.idx]
player = b.fire.owner
b.stage = 'select_combat_action'
G.logger.write('{} {} fires next'.format(b.fire.owner, b.fire.id))
c.stages.append(b.stage)
if b.stage == 'should_NOT_be_here':
print('IMPOSSIBLE STAGE!!!!!')
pass
if b.stage == 'retreat':
b.combat_action = 'retreat'
b.selectedRetreatUnit = head
b.selectedRetreatTile = tail[0]
player = b.fire.owner
G.logger.write('{}:{} {} RETREATING TO {}'.format(b.idx, player, b.fire.id, b.selectedRetreatTile))
id = b.selectedRetreatUnit
unit = G.players[player].units[id]
tilename = b.selectedRetreatTile
move_unit(G, unit, tilename)
#er entfernt hier die rebased unit!!!
b.fire_order = [u for u in b.fire_order if u.id != id]
#re-compute b.fire_orders per battle_group!
b.fire_orders = adict()
for bg in b.battle_groups:
b.fire_orders[bg] = [u for u in b.fire_order if (u.owner != b.attacker and u.type != 'Convoy' or u.battle_group == bg)]
#b.idx stays the same
#revert visibility to just owner!
unit.visible.clear()
unit.visible.add(player)
b.stage = 'retreat_ack'
c.stages.append(b.stage)
return encode_accept(G,player)
if b.stage == 'retreat_ack':
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
b.stage = 'combat_action_ends'
c.stages.append(b.stage)
if b.stage == 'combat_round_ends':
#muss air rebase machen
#all airForce units in fire_orders[b.battle_group] (both players!) have to rebase!
#wie kann ich beide players handlen?
#zuerst fuer attacker
b.stage = 'combat_round_ends_attacker'
c.stages.append(b.stage)
if b.stage == 'combat_round_ends_attacker':
player = b.attacker
lst = []
#achtung! b.fire_orders[b.battle_group] could have been deleted!
if b.battle_group and b.battle_group in b.fire_orders:
for u in b.fire_orders[b.battle_group]:
if u.type == 'AirForce' and u.owner == player:
retreat_options = calc_retreat_options_for_unit(G, player, b, c, u)
lst.extend(retreat_options)
if len(lst):
code = encode_list(G,player,lst)
b.stage = 'air_rebase_attacker_ack'
c.stages.append(b.stage)
return code
b.stage = 'combat_round_ends_defender'
c.stages.append(b.stage)
if b.stage == 'air_rebase_attacker_ack':
#player selected air rebase option
#remove option from list and recalc list
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
player = b.attacker
b.selectedRetreatUnit = head
b.selectedRetreatTile = tail[0]
G.logger.write('{}:{} {} RETREATING TO {}'.format(b.idx, player, b.fire.id, b.retreats[head]))
id = b.selectedRetreatUnit
unit = G.players[player].units[id]
tilename = b.selectedRetreatTile
move_unit(G, unit, tilename)
#er entfernt hier die rebased unit!!!
b.fire_orders[b.battle_group] = [u for u in b.fire_order if u.id != id]
#revert visibility to just owner!
unit.visible.clear()
unit.visible.add(player)
b.stage = 'combat_round_ends_attacker'
c.stages.append(b.stage)
if b.stage == 'combat_round_ends_defender':
player = b.defender
lst = []
#battle_group could have been eliminated therefore use b.fire_order!
for u in b.fire_order:
if u.type == 'AirForce' and u.owner == player:
retreat_options = calc_retreat_options_for_unit(G, player, b, c, u)
lst.extend(retreat_options)
if len(lst):
code = encode_list(G,player,lst)
b.stage = 'air_rebase_defender_ack'
return code
else:
b.stage = 'round_end_after_air_rebase'
if b.stage == 'air_rebase_defender_ack':
#player selected air rebase option
#remove option from list and recalc list
assert action != None, '{}: no action!!!!!'.format(b.stage)
head, *tail = action
action = None
player = b.defender
b.selectedRetreatUnit = head
b.selectedRetreatTile = tail[0]
G.logger.write('{}:{} {} RETREATING TO {}'.format(b.idx, player, b.fire.id, b.retreats[head]))
id = b.selectedRetreatUnit
unit = G.players[player].units[id]
tilename = b.selectedRetreatTile
move_unit(G, unit, tilename)
#er entfernt hier die rebased unit!!!
b.fire_orders[b.battle_group] = [u for u in b.fire_order if u.id != id]
#revert visibility to just owner!
unit.visible.clear()
unit.visible.add(player)
b.stage = 'combat_round_ends_defender'
c.stages.append(b.stage)
if b.stage == 'round_end_after_air_rebase':
G.logger.write('combat round ends after air rebase!!!')
if len(b.fire_orders):
b.stage = 'battle_round_start'
else:
b.stage = 'battle_ends'
c.stages.append(b.stage)
if b.stage == 'battle_ends':
#mandatory_rebase has already taken place when here!!!
#either because it is decided or because all players have acted
if b.winner:
b.owner = b.winner
if b.owner in G.players:
ownerUnits = [u for u in b.fire_order if u.owner == b.owner]
for u in ownerUnits:
unit = u.unit
unit.visible.clear()
unit.visible.add(b.owner)
G.objects.updated[unit._id] = unit
b.stage = 'battle_ends_ack'
c.stages.append(b.stage)
return encode_accept(G, player, opponent)
if b.stage == 'battle_ends_ack':
c.stage = 'battle_ended'
c.stages.append(b.stage)
# raise PhaseComplete
break
raise PhaseComplete
| 35.39673
| 124
| 0.647152
| 6,416
| 41,131
| 4.023691
| 0.064526
| 0.043229
| 0.011853
| 0.02115
| 0.810234
| 0.783855
| 0.754222
| 0.730129
| 0.719515
| 0.712271
| 0
| 0.00197
| 0.222606
| 41,131
| 1,161
| 125
| 35.427218
| 0.80541
| 0.187158
| 0
| 0.789177
| 0
| 0
| 0.108849
| 0.013715
| 0
| 0
| 0
| 0.000861
| 0.042841
| 1
| 0.027058
| false
| 0.001127
| 0.006764
| 0.001127
| 0.096956
| 0.002255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6097f67ab37dab3cab65a1ff9701cadea1d9c727
| 8,462
|
py
|
Python
|
rest_api/tests/test_doctor_crud.py
|
MyLifeUa/rest-api
|
acf7c6fefdc3cfd90a9ba5a1a1be3d5dae3c90ba
|
[
"MIT"
] | null | null | null |
rest_api/tests/test_doctor_crud.py
|
MyLifeUa/rest-api
|
acf7c6fefdc3cfd90a9ba5a1a1be3d5dae3c90ba
|
[
"MIT"
] | 2
|
2021-03-26T09:16:51.000Z
|
2021-03-26T09:17:07.000Z
|
rest_api/tests/test_doctor_crud.py
|
my-life-ua/rest-api
|
acf7c6fefdc3cfd90a9ba5a1a1be3d5dae3c90ba
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from rest_framework.status import (
HTTP_200_OK,
HTTP_201_CREATED,
HTTP_400_BAD_REQUEST,
HTTP_401_UNAUTHORIZED,
HTTP_403_FORBIDDEN
)
from rest_framework.test import APITestCase
from .utils import login, create_user_and_login
from ..models import Client, Doctor
class DoctorRegistrationTest(APITestCase):
def test_new_doctor_missing_authentication(self):
response = self.client.post("/doctors", {"email": "vr@ua.pt", "password": "pwd", "first_name": "Vasco",
"last_name": "Ramos"})
self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)
def test_new_doctor_missing_authorization(self):
create_user_and_login(self.client, "client", "vasco", "vr@ua.pt", "pwd")
response = self.client.post("/doctors",
{"email": "vr@ua.pt", "password": "pwd", "first_name": "Vasco",
"last_name": "Ramos"})
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
def test_new_doctor_missing_parameters(self):
create_user_and_login(self.client, "custom_admin", "vasco", "vr@ua.pt", "pwd")
response = self.client.post("/doctors", {"email": "vr@ua.pt"})
self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)
response = self.client.post("/doctors", {"email": "vr@ua.pt", "password": "pwd"})
self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)
response = self.client.post("/doctors",
{"email": "vr@ua.pt", "password": "pwd", "first_name": "Vasco"})
self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)
def test_new_doctor_right_parameters(self):
create_user_and_login(self.client, "custom_admin", "vasco", "vr@ua.pt", "pwd")
response = self.client.post("/doctors",
{"email": "j.vasconcelos99@ua.pt", "password": "pwd", "first_name": "Vasco",
"last_name": "Ramos", "birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
class DoctorUpdateTest(APITestCase):
def setUp(self):
create_user_and_login(self.client, "custom_admin", "vasco", "vr@ua.pt", "pwd")
response = self.client.post("/doctors", {"email": "vr@ua.pt", "password": "pwd", "first_name": "Vasco",
"last_name": "Ramos", "birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
login(self.client, "vr@ua.pt", "pwd")
def test_update_nothing(self):
response = self.client.put("/doctors/vr@ua.pt", {})
self.assertEqual(response.status_code, HTTP_200_OK)
def test_update_wrong_parameters(self):
response = self.client.put("/doctors/vr@ua.pt", {"aaaa": "aaa"})
self.assertEqual(response.status_code, HTTP_200_OK)
def test_update_wrong_parameters_type(self):
response = self.client.put("/doctors/vr@ua.pt", {"birth_date": 2})
self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)
def test_correct_update(self):
response = self.client.put("/doctors/vr@ua.pt", {"last_name": "joao"})
self.assertEqual(response.status_code, HTTP_200_OK)
class DoctorDeleteTest(APITestCase):
def setUp(self):
create_user_and_login(self.client, "custom_admin", "vasco", "vr@ua.pt", "pwd")
response = self.client.post("/doctors",
{"email": "v@ua.pt", "password": "pwd", "first_name": "Vasco", "last_name": "Ramos",
"birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
login(self.client, "v@ua.pt", "pwd")
def test_delete_non_existent_user(self):
response = self.client.delete("/doctors/vr99@ua.pt")
self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)
def test_delete_non_doctor_account(self):
User.objects.create_superuser("admin", "admin@ua.pt", "pwd")
response = self.client.delete("/doctors/admin")
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
def test_delete_other_doctor_account(self):
create_user_and_login(self.client, "custom_admin", "vasco99", "vr@ua.pt", "pwd")
response = self.client.post("/doctors",
{"email": "ze@ua.pt", "password": "pwd", "first_name": "Ze", "last_name": "Costa",
"birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
login(self.client, "v@ua.pt", "pwd")
response = self.client.delete("/doctors/ze@ua.pt")
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
def test_client_delete_doctor_account(self):
response = self.client.post("/clients", {"email": "joana@ua.pt", "password": "pwd", "first_name": "Vasco",
"last_name": "Ramos", "height": 1.60, "weight_goal": 65,
"current_weight": 90, "sex": "Male",
"birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
login(self.client, "joana@ua.pt", "pwd")
response = self.client.delete("/doctors/v@ua.pt")
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
def test_admin_delete_doctor_account(self):
response = self.client.delete("/doctors/v@ua.pt")
self.assertEqual(response.status_code, HTTP_200_OK)
def test_delete_self(self):
response = self.client.delete("/doctors/v@ua.pt")
self.assertEqual(response.status_code, HTTP_200_OK)
class GetDoctorTest(APITestCase):
def setUp(self):
# Client without a doctor
response = self.client.post("/clients", {"email": "tos@ua.pt", "password": "pwd", "first_name": "Tomas",
"last_name": "Ramos", "height": 1.60, "weight_goal": 65,
"current_weight": 90, "sex": "Male",
"birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
create_user_and_login(self.client, "custom_admin", "vasco", "vr@ua.pt", "pwd")
response = self.client.post("/doctors", {"email": "ana@ua.pt", "password": "pwd", "first_name": "Vasco",
"last_name": "Ramos", "birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
self.doctor = Doctor.objects.get(user__auth_user__username="ana@ua.pt")
# Client with doctor
self.client.post("/clients",
{"email": "ana99@ua.pt", "password": "pwd", "first_name": "Tomas", "last_name": "Ramos",
"sex": "Male",
"height": 1.60, "weight_goal": 65, "current_weight": 90, "birth_date": "2020-03-04"})
self.assertEqual(response.status_code, HTTP_201_CREATED)
Client.objects.filter(user__auth_user__username="ana99@ua.pt").update(doctor=self.doctor)
def test_get_doctor_info_other_client(self):
login(self.client, "tos@ua.pt", "pwd")
response = self.client.get("/doctors/ana@ua.pt")
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
def test_get_doctor_self_info(self):
login(self.client, "ana@ua.pt", "pwd")
response = self.client.get("/doctors/ana@ua.pt")
self.assertEqual(response.status_code, HTTP_200_OK)
def test_get_doctor_info_client_doctor(self):
login(self.client, "ana99@ua.pt", "pwd")
response = self.client.get("/doctors/ana@ua.pt")
self.assertEqual(response.status_code, HTTP_200_OK)
def test_get_doctor_info_admin(self):
response = self.client.get("/doctors/ana@ua.pt")
self.assertEqual(response.status_code, HTTP_200_OK)
def test_get_doctor_info_other_hospital_admin(self):
create_user_and_login(self.client, "custom_admin", "ant@ua.pt", "ant@ua.pt", "pwd", "Other Hospital")
response = self.client.get("/doctors/ana@ua.pt")
self.assertEqual(response.status_code, HTTP_403_FORBIDDEN)
| 48.354286
| 120
| 0.61333
| 1,028
| 8,462
| 4.798638
| 0.11284
| 0.0373
| 0.130549
| 0.164606
| 0.797486
| 0.774579
| 0.746402
| 0.727752
| 0.712345
| 0.647071
| 0
| 0.030666
| 0.240841
| 8,462
| 174
| 121
| 48.632184
| 0.737235
| 0.004963
| 0
| 0.492424
| 0
| 0
| 0.195794
| 0.002495
| 0
| 0
| 0
| 0
| 0.212121
| 1
| 0.166667
| false
| 0.090909
| 0.037879
| 0
| 0.234848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
60cc9ad3ed14a7370639391d34cf308154f13c2b
| 11,070
|
py
|
Python
|
re-id-data.py
|
jakobGTO/deep-person-reid
|
ede995982c2df39072c3bcf392c805062e8193b6
|
[
"MIT"
] | null | null | null |
re-id-data.py
|
jakobGTO/deep-person-reid
|
ede995982c2df39072c3bcf392c805062e8193b6
|
[
"MIT"
] | null | null | null |
re-id-data.py
|
jakobGTO/deep-person-reid
|
ede995982c2df39072c3bcf392c805062e8193b6
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import os
import os.path as osp
import random
import copy
import numpy as np
from torchreid.data import ImageDataset
import torchreid
class RGBDataset(ImageDataset):
dataset_dir = 'new_dataset'
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
# All you need to do here is to generate three lists,
# which are train, query and gallery.
# Each list contains tuples of (img_path, pid, camid),
# where
# - img_path (str): absolute path to an image.
# - pid (int): person ID, e.g. 0, 1.
# - camid (int): camera ID, e.g. 0, 1.
# Note that
# - pid and camid should be 0-based.
# - query and gallery should share the same pid scope (e.g.
# pid=0 in query refers to the same person as pid=0 in gallery).
# - train, query and gallery share the same camid scope (e.g.
# camid=0 in train refers to the same camera as camid=0
# in query/gallery).
train = []
query = []
gallery = []
cams = [i for i in os.listdir("D:/thesis-data/SYSU-MM01/RGB/") if i.startswith("cam")]
train_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/train_id.txt", delimiter=",", dtype = int)
test_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/test_id.txt", delimiter=",", dtype = int)
val_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/val_id.txt", delimiter=",", dtype = int)
all_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/available_id.txt", delimiter=",", dtype = int)
cam_table = {'cam1': 0, 'cam2': 1, 'cam4': 2, 'cam5': 3}
pid_container_train = set()
pid_container_test = set()
for cam in cam_table:
persons = os.listdir("D:/thesis-data/SYSU-MM01/RGB/" + cam)
for person in persons:
pid = int(person)
if pid in train_idx:
pid_container_train.add(pid)
else:
pid_container_test.add(pid)
pid2label_train = {pid: label for label, pid in enumerate(pid_container_train)}
pid2label_test = {pid: label for label, pid in enumerate(pid_container_test)}
for cam in cam_table:
camid = cam_table[cam]
persons = os.listdir("D:/thesis-data/SYSU-MM01/RGB/" + cam)
for person in persons:
pid = int(person)
images = os.listdir("D:/thesis-data/SYSU-MM01/RGB/" + cam + "/" + person)
for image in images:
if pid in train_idx:
train.append((
"D:/thesis-data/SYSU-MM01/RGB/" + cam + "/" + person + "/" + image,
pid2label_train[pid],
camid
))
else:
if camid == 0:
query.append((
"D:/thesis-data/SYSU-MM01/RGB/" + cam + "/" + person + "/" + image,
pid2label_test[pid],
camid
))
elif camid == 1:
gallery.append((
"D:/thesis-data/SYSU-MM01/RGB/" + cam + "/" + person + "/" + image,
pid2label_test[pid],
camid
))
super(RGBDataset, self).__init__(train, query, gallery, **kwargs)
class TIRDataset(ImageDataset):
dataset_dir = 'new_dataset'
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
# All you need to do here is to generate three lists,
# which are train, query and gallery.
# Each list contains tuples of (img_path, pid, camid),
# where
# - img_path (str): absolute path to an image.
# - pid (int): person ID, e.g. 0, 1.
# - camid (int): camera ID, e.g. 0, 1.
# Note that
# - pid and camid should be 0-based.
# - query and gallery should share the same pid scope (e.g.
# pid=0 in query refers to the same person as pid=0 in gallery).
# - train, query and gallery share the same camid scope (e.g.
# camid=0 in train refers to the same camera as camid=0
# in query/gallery).
train = []
query = []
gallery = []
cams = [i for i in os.listdir("D:/thesis-data/SYSU-MM01/TIR/") if i.startswith("cam")]
train_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/train_id.txt", delimiter=",", dtype = int)
test_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/test_id.txt", delimiter=",", dtype = int)
val_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/val_id.txt", delimiter=",", dtype = int)
all_idx = np.loadtxt("D:/thesis-data/SYSU-MM01/available_id.txt", delimiter=",", dtype = int)
cam_table = {'cam3': 0, 'cam6': 1}
pid_container_train = set()
pid_container_test = set()
for cam in cam_table:
persons = os.listdir("D:/thesis-data/SYSU-MM01/TIR/" + cam)
for person in persons:
pid = int(person)
if pid in train_idx:
pid_container_train.add(pid)
else:
pid_container_test.add(pid)
pid2label_train = {pid: label for label, pid in enumerate(pid_container_train)}
pid2label_test = {pid: label for label, pid in enumerate(pid_container_test)}
for cam in cam_table:
camid = cam_table[cam]
persons = os.listdir("D:/thesis-data/SYSU-MM01/TIR/" + cam)
for person in persons:
pid = int(person)
images = os.listdir("D:/thesis-data/SYSU-MM01/TIR/" + cam + "/" + person)
for image in images:
if pid in train_idx:
train.append((
"D:/thesis-data/SYSU-MM01/TIR/" + cam + "/" + person + "/" + image,
pid2label_train[pid],
camid
))
else:
if camid == 1:
query.append((
"D:/thesis-data/SYSU-MM01/TIR/" + cam + "/" + person + "/" + image,
pid2label_test[pid],
camid
))
else:
gallery.append((
"D:/thesis-data/SYSU-MM01/TIR/" + cam + "/" + person + "/" + image,
pid2label_test[pid],
camid
))
super(TIRDataset, self).__init__(train, query, gallery, **kwargs)
def train_rgb_net():
torchreid.data.register_image_dataset('SYSU-MM01', RGBDataset)
datamanager = torchreid.data.ImageDataManager(
root='reid-data',
sources='SYSU-MM01',
targets='SYSU-MM01',
height=256,
width=128,
batch_size_train=32,
batch_size_test=100,
transforms=['random_flip', 'random_crop'],
workers=1,
combineall = False
)
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='softmax',
pretrained=True
)
model = model.cuda()
optimizer = torchreid.optim.build_optimizer(
model,
optim='adam',
lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
engine = torchreid.engine.ImageSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
label_smooth=True
)
engine.run(
save_dir='Trained-RGB-model',
max_epoch=100,
eval_freq=10,
print_freq=10,
test_only=False
)
def train_tir_net():
torchreid.data.register_image_dataset('SYSU-MM01', TIRDataset)
datamanager = torchreid.data.ImageDataManager(
root='reid-data',
sources='SYSU-MM01',
targets='SYSU-MM01',
height=256,
width=128,
batch_size_train=32,
batch_size_test=100,
transforms=['random_flip', 'random_crop'],
workers=1,
combineall = False
)
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='softmax',
pretrained=True
)
model = model.cuda()
optimizer = torchreid.optim.build_optimizer(
model,
optim='adam',
lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
engine = torchreid.engine.ImageSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
label_smooth=True
)
engine.run(
save_dir='Trained-TIR-model',
max_epoch=100,
eval_freq=10,
print_freq=10,
test_only=False
)
if __name__ == '__main__':
#train_rgb_net()
#train_tir_net()
torchreid.data.register_image_dataset('SYSU-MM01', RGBDataset)
datamanager = torchreid.data.ImageDataManager(
root='reid-data',
sources='SYSU-MM01',
targets='SYSU-MM01',
height=256,
width=128,
batch_size_train=32,
batch_size_test=100,
transforms=['random_flip', 'random_crop'],
workers=1,
combineall = False
)
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='softmax',
pretrained=True
)
model = model.cuda()
optimizer = torchreid.optim.build_optimizer(
model,
optim='adam',
lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
torchreid.utils.load_pretrained_weights(model, 'Trained-RGB-model/model/model.pth.tar-100')
engine = torchreid.engine.ImageSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
label_smooth=True
)
engine.run(
max_epoch=100,
eval_freq=10,
print_freq=10,
test_only=True
)
| 32.654867
| 101
| 0.526197
| 1,224
| 11,070
| 4.603758
| 0.150327
| 0.044011
| 0.042946
| 0.058563
| 0.923869
| 0.923869
| 0.912866
| 0.908607
| 0.905768
| 0.904703
| 0
| 0.027265
| 0.363866
| 11,070
| 339
| 102
| 32.654867
| 0.772934
| 0.106865
| 0
| 0.801587
| 0
| 0
| 0.114132
| 0.075581
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.043651
| 0
| 0.075397
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60e3ae86cce6553ec13c9a1948ed35f5cee4d207
| 2,665
|
py
|
Python
|
tests/feeds/test_bct_usd_feed.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 7
|
2021-11-10T21:14:57.000Z
|
2022-03-26T07:27:23.000Z
|
tests/feeds/test_bct_usd_feed.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 86
|
2021-11-09T13:12:58.000Z
|
2022-03-31T17:28:56.000Z
|
tests/feeds/test_bct_usd_feed.py
|
tellor-io/telliot-feed-examples
|
3f825c90ad372f42c89eee0f5b54250f22ec0728
|
[
"MIT"
] | 2
|
2021-11-27T12:51:22.000Z
|
2022-03-12T16:38:00.000Z
|
import pytest
from telliot_feed_examples.feeds.bct_usd_feed import bct_usd_median_feed
@pytest.mark.asyncio
async def test_fetch_price():
(value, _) = await bct_usd_median_feed.source.fetch_new_datapoint()
assert value > 0
print(value)
def test_query_info():
q = bct_usd_median_feed.query
exp_id = "35e083af947a4cf3bc053440c3b4f753433c76acab6c8b1911ee808104b72e85"
exp_data = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tSpotPrice\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03bct\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03usd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # noqa: E501
exp_data_hex = "00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000953706f745072696365000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000003626374000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000037573640000000000000000000000000000000000000000000000000000000000" # noqa: E501
# print(q.query_data)
assert q.query_data == exp_data
assert q.query_id.hex() == exp_id
assert q.query_data.hex() == exp_data_hex
| 115.869565
| 1,387
| 0.812758
| 422
| 2,665
| 5.056872
| 0.101896
| 0.905342
| 1.328491
| 1.731959
| 0.465323
| 0.465323
| 0.465323
| 0.465323
| 0.465323
| 0.465323
| 0
| 0.554123
| 0.039775
| 2,665
| 22
| 1,388
| 121.136364
| 0.279797
| 0.015385
| 0
| 0
| 0
| 0.066667
| 0.810305
| 0.810305
| 0
| 1
| 0
| 0
| 0.266667
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
71618ee9f3cb61efd17d471dee46d2abf8a25eb7
| 27,308
|
py
|
Python
|
tests/test_expand.py
|
HobnobMancer/cazy_webscraper
|
3f74492f46db2093f7e6cd91fffcb8347694e54e
|
[
"MIT"
] | 3
|
2020-10-22T08:31:29.000Z
|
2021-05-19T13:13:12.000Z
|
tests/test_expand.py
|
HobnobMancer/cazy_webscraper
|
3f74492f46db2093f7e6cd91fffcb8347694e54e
|
[
"MIT"
] | 62
|
2020-11-30T11:29:20.000Z
|
2022-03-28T13:50:30.000Z
|
tests/test_expand.py
|
HobnobMancer/cazy_webscraper
|
3f74492f46db2093f7e6cd91fffcb8347694e54e
|
[
"MIT"
] | 1
|
2021-03-10T16:30:11.000Z
|
2021-03-10T16:30:11.000Z
|
##################################################################
# These unit tests are in the process of being updated
# This includes updating the paths to meet the new scraper structure
# This also includes factorising out the tests to make the script size
# easier to handle
#####################################################################
# #!/usr/bin/env python
# # -*- coding: utf-8 -*-
# # Author:
# # Emma E. M. Hobbs
# # Contact
# # eemh1@st-andrews.ac.uk
# # Emma E. M. Hobbs,
# # Biomolecular Sciences Building,
# # University of St Andrews,
# # North Haugh Campus,
# # St Andrews,
# # KY16 9ST
# # Scotland,
# # UK
# # The MIT License
# """Tests the module sql which builds and interacts with an SQL database.
# These test are intened to be run from the root of the repository using:
# pytest -v
# """
# import pytest
# from argparse import Namespace, ArgumentParser
# from pathlib import Path
# from scraper import expand
# from scraper.expand import get_pdb_structures, get_genbank_sequences
# from scraper.sql.sql_orm import Cazyme, Cazymes_Genbanks, Genbank, Taxonomy
# from scraper.utilities import file_io, parse_configuration
# @pytest.fixture
# def db_path():
# path_ = Path("tests")
# path_ = path_ / "test_inputs" / "test_inputs_expand" / "unit_test_2021-03-11--13-06-42.db"
# return path_
# @pytest.fixture
# def output_dir(test_dir):
# path_ = test_dir / "test_outputs"
# return path_
# @pytest.fixture
# def tax_filter():
# return set(["Nonlabens"])
# @pytest.fixture
# def genbank_query(db_session):
# query = db_session.query(Genbank, Cazymes_Genbanks, Taxonomy).\
# join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
# filter(Cazymes_Genbanks.primary == True).all()
# return query
# # tests for get_pdb_structures
# def test_main_no_db(monkeypatch):
# """Test main() when an the database file cannot be found."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=Path("--"),
# verbose=False,
# log=None,
# force=False,
# nodelete=False,
# outdir=None,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return None, set()
# monkeypatch.setattr(utilities, "build_pdb_structures_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# with pytest.raises(SystemExit) as pytest_wrapped_e:
# get_pdb_structures.main()
# assert pytest_wrapped_e.type == SystemExit
# def test_main_outdir_is_none(db_path, monkeypatch):
# """Test main() when outdir=None."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=db_path,
# outdir=None,
# verbose=False,
# log=None,
# force=False,
# nodelete=False,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return None, set()
# monkeypatch.setattr(utilities, "build_pdb_structures_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(utilities, "config_logger", mock_no_return)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# monkeypatch.setattr(get_pdb_structures, "get_every_cazymes_structures", mock_no_return)
# get_pdb_structures.main()
# def test_main(db_path, output_dir, monkeypatch):
# """Test main()."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=db_path,
# outdir=output_dir,
# verbose=False,
# log=None,
# force=True,
# nodelete=True,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return None, set()
# monkeypatch.setattr(utilities, "build_pdb_structures_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(utilities, "config_logger", mock_no_return)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# monkeypatch.setattr(get_pdb_structures, "get_every_cazymes_structures", mock_no_return)
# get_pdb_structures.main()
# def test_main_argv(db_path, output_dir, monkeypatch):
# """Test main()."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=db_path,
# outdir=output_dir,
# verbose=False,
# log=None,
# force=True,
# nodelete=True,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return {}, set()
# monkeypatch.setattr(utilities, "build_pdb_structures_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(utilities, "config_logger", mock_no_return)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# monkeypatch.setattr(get_pdb_structures, "get_structures_for_specific_cazymes", mock_no_return)
# get_pdb_structures.main()
# def test_get_every_cazymes_structures_primary(db_session, output_dir, monkeypatch):
# """Test get_every_cazymes_structures() when primary is True and taxonomy filter is given."""
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_pdb_structures, "download_pdb_structures", mock_no_return)
# args = {"args": Namespace(primary=True)}
# tax_filter = set(["Nonlabens"])
# get_pdb_structures.get_every_cazymes_structures(
# output_dir,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_get_every_cazymes_structures_all(db_session, output_dir, monkeypatch):
# """Test get_every_cazymes_structures() when primary is False and no taxonomy filter is given."""
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_pdb_structures, "download_pdb_structures", mock_no_return)
# args = {"args": Namespace(primary=False)}
# tax_filter = None
# get_pdb_structures.get_every_cazymes_structures(
# output_dir,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_get_structures_for_specific_cazymes_primary(db_session, output_dir, monkeypatch):
# """Test get_structures_for_specific_cazymes when primary is true and tax filter is given."""
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_pdb_structures, "download_pdb_structures", mock_no_return)
# args = {"args": Namespace(primary=True)}
# tax_filter = set(["Nonlabens"])
# config_dict = {"classes": ["PL"], "Polysaccharide Lyases (PLs)": ["PL28"]}
# get_pdb_structures.get_structures_for_specific_cazymes(
# output_dir,
# config_dict,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_get_structures_for_specific_cazymes(db_session, output_dir, monkeypatch):
# """Test get_structures_for_specific_cazymes when primary is False and tax filter not given."""
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_pdb_structures, "download_pdb_structures", mock_no_return)
# args = {"args": Namespace(primary=False)}
# tax_filter = None
# config_dict = {
# "classes": ["PL"],
# "Polysaccharide Lyases (PLs)": ["PL28","GH3_1"],
# "CAZyclass": None,
# }
# get_pdb_structures.get_structures_for_specific_cazymes(
# output_dir,
# config_dict,
# tax_filter,
# db_session,
# args["args"],
# )
# # test for get_genbank_sequences
# def test_main_no_db_genbank(monkeypatch):
# """Test main() when an the database file cannot be found."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=Path("--"),
# email="dummy_email",
# verbose=False,
# log=None,
# force=False,
# nodelete=False,
# outdir=None,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return None, set()
# monkeypatch.setattr(utilities, "build_genbank_sequences_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# with pytest.raises(SystemExit) as pytest_wrapped_e:
# get_genbank_sequences.main()
# assert pytest_wrapped_e.type == SystemExit
# def test_main_no_config_no_update(db_path, monkeypatch):
# """Test main() when outdir=None."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=db_path,
# email="dummy_email",
# outdir=None,
# verbose=False,
# log=None,
# force=False,
# nodelete=False,
# update=False,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return None, set()
# monkeypatch.setattr(utilities, "build_genbank_sequences_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(utilities, "config_logger", mock_no_return)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# monkeypatch.setattr(
# get_genbank_sequences,
# "get_missing_sequences_for_everything",
# mock_no_return,
# )
# get_genbank_sequences.main()
# def test_main_no_config_yes_update(db_path, monkeypatch):
# """Test main() when outdir=None."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=db_path,
# email="dummy_email",
# outdir=None,
# verbose=False,
# log=None,
# force=False,
# nodelete=False,
# update=True,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return None, set()
# monkeypatch.setattr(utilities, "build_genbank_sequences_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(utilities, "config_logger", mock_no_return)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# monkeypatch.setattr(
# get_genbank_sequences,
# "add_and_update_all_sequences",
# mock_no_return,
# )
# get_genbank_sequences.main()
# def test_main_yes_config_no_update(db_path, monkeypatch):
# """Test main() when outdir=None."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=db_path,
# email="dummy_email",
# outdir=None,
# verbose=False,
# log=None,
# force=False,
# nodelete=False,
# update=False,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return {}, set()
# monkeypatch.setattr(utilities, "build_genbank_sequences_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(utilities, "config_logger", mock_no_return)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# monkeypatch.setattr(
# get_genbank_sequences,
# "get_missing_sequences_for_specific_records",
# mock_no_return,
# )
# get_genbank_sequences.main()
# def test_main_yes_config_yes_update(db_path, monkeypatch):
# """Test main() when outdir=None."""
# def mock_building_parser(*args, **kwargs):
# parser_args = ArgumentParser(
# prog="cazy_webscraper.py",
# usage=None,
# description="Scrape the CAZy database",
# conflict_handler="error",
# add_help=True,
# )
# return parser_args
# def mock_parser(*args, **kwargs):
# parser = Namespace(
# database=db_path,
# email="dummy_email",
# outdir=None,
# verbose=False,
# log=None,
# force=False,
# nodelete=False,
# update=True,
# )
# return parser
# def mock_no_return(*args, **kwargs):
# return
# def mock_config(*args, **kwargs):
# return {}, set()
# monkeypatch.setattr(utilities, "build_genbank_sequences_parser", mock_building_parser)
# monkeypatch.setattr(ArgumentParser, "parse_args", mock_parser)
# monkeypatch.setattr(utilities, "config_logger", mock_no_return)
# monkeypatch.setattr(parse_configuration, "get_configuration", mock_config)
# monkeypatch.setattr(file_io, "make_output_directory", mock_no_return)
# monkeypatch.setattr(
# get_genbank_sequences,
# "update_sequences_for_specific_records",
# mock_no_return,
# )
# get_genbank_sequences.main()
# def test_get_missing_sequences_for_everything_primary(db_session, monkeypatch):
# """Tests get_missing_sequences_for_everything() when primary is True and tax filter is given."""
# def mock_accession(*args, **kwargs):
# return []
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=True)}
# tax_filter = set(["Nonlabens"])
# get_genbank_sequences.get_missing_sequences_for_everything(
# "date",
# tax_filter,
# db_session,
# args["args"],
# )
# def test_get_missing_sequences_for_everything(db_session, monkeypatch):
# """Tests get_missing_sequences_for_everything() when primary is False and tax filter is None."""
# def mock_accession(*args, **kwargs):
# return ["acc1", "acc2"]
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_accession_chunks", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=True, epost=200)}
# tax_filter = None
# get_genbank_sequences.get_missing_sequences_for_everything(
# "date",
# tax_filter,
# db_session,
# args["args"],
# )
# def test_add_and_update_all_sequences_primary(db_session, monkeypatch):
# """Tests add_and_update_all_sequences() when primary is True and tax filter is given."""
# def mock_accession(*args, **kwargs):
# return {}
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions_and_dates", mock_accession)
# args = {"args": Namespace(primary=True, epost=200)}
# tax_filter = set(["Nonlabens"])
# get_genbank_sequences.add_and_update_all_sequences(
# "date",
# tax_filter,
# db_session,
# args["args"],
# )
# def test_add_and_update_all_sequences_no_updates(db_session, monkeypatch):
# """Tests get_missing_sequences_for_everything() when there are no seq to update."""
# def mock_accession(*args, **kwargs):
# return {"acc1":0, "acc2":1}
# def mock_no_acc(*args, **kwargs):
# return []
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions_and_dates", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_accessions_for_new_sequences", mock_no_acc)
# args = {"args": Namespace(primary=True, epost=200)}
# tax_filter = None
# get_genbank_sequences.add_and_update_all_sequences(
# "date",
# tax_filter,
# db_session,
# args["args"],
# )
# def test_add_and_update_all_sequences(db_session, monkeypatch):
# """Tests get_missing_sequences_for_everything() when primary is False and tax filter is None."""
# def mock_accession(*args, **kwargs):
# return {"acc1":1, "acc2":2}
# def mock_acc(*args, **kwargs):
# return ["acc", "acc1"]
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions_and_dates", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_accessions_for_new_sequences", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_accession_chunks", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=True, epost=200)}
# tax_filter = None
# get_genbank_sequences.add_and_update_all_sequences(
# "date",
# tax_filter,
# db_session,
# args["args"],
# )
# def test_get_missing_sequences_for_specific_records_primary(db_session, monkeypatch):
# """Tests get_missing_sequences_for_specific_records, primary is True, tax filter isn't None."""
# def mock_accession(*args, **kwargs):
# return []
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=True, epost=150)}
# tax_filter = None
# config = {"classes":["PL"], "PL": ["PL28", "PL29_1"], "GH": None}
# get_genbank_sequences.get_missing_sequences_for_specific_records(
# "date",
# config,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_get_missing_sequences_for_specific_records(db_session, monkeypatch):
# """Tests get_missing_sequences_for_specific_records, primary is False, tax filter isn't None."""
# def mock_accession(*args, **kwargs):
# return ["acc", "acc1"]
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=False, epost=150)}
# tax_filter = set(["Nonlabens"])
# config = {"classes":["PL"], "PL": ["PL28", "PL29_1"], "GH": None}
# get_genbank_sequences.get_missing_sequences_for_specific_records(
# "date",
# config,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_update_sequences_for_specific_records_primary_no_acc1(db_session, monkeypatch):
# """Test update_sequences_for_specific_records, primary is True, tax filter not given."""
# def mock_accession(*args, **kwargs):
# return {}
# def mock_acc(*args, **kwargs):
# return []
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions_and_dates", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_accessions_for_new_sequences", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_accession_chunks", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=True, epost=200)}
# tax_filter = None
# config = {"classes": ["PL"], "PL": ["PL28", "PL29_1"], "GH": None}
# get_genbank_sequences.update_sequences_for_specific_records(
# "date",
# config,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_update_sequences_for_specific_records_primary_no_acc2(db_session, monkeypatch):
# """Test update_sequences_for_specific_records, primary is True, tax filter not given."""
# def mock_accession(*args, **kwargs):
# return {"acc1": 1, "acc2": 2}
# def mock_acc(*args, **kwargs):
# return []
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions_and_dates", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_accessions_for_new_sequences", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_accession_chunks", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=True, epost=200)}
# tax_filter = None
# config = {"classes": ["PL"], "PL": ["PL28", "PL29_1"], "GH": None}
# get_genbank_sequences.update_sequences_for_specific_records(
# "date",
# config,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_update_sequences_for_specific_records_primary_false(db_session, monkeypatch):
# """Test update_sequences_for_specific_records, primary is False, tax filter given."""
# def mock_accession(*args, **kwargs):
# return {"acc1": 1, "acc2": 2}
# def mock_acc(*args, **kwargs):
# return ["acc", "acc1"]
# def mock_no_return(*args, **kwargs):
# return
# monkeypatch.setattr(get_genbank_sequences, "extract_accessions_and_dates", mock_accession)
# monkeypatch.setattr(get_genbank_sequences, "get_accessions_for_new_sequences", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_accession_chunks", mock_acc)
# monkeypatch.setattr(get_genbank_sequences, "get_sequences_add_to_db", mock_no_return)
# args = {"args": Namespace(primary=False, epost=200)}
# tax_filter = set(["Nonlabens"])
# config = {"classes": ["PL"], "PL": ["PL28", "PL29_1"], "GH": None}
# get_genbank_sequences.update_sequences_for_specific_records(
# "date",
# config,
# tax_filter,
# db_session,
# args["args"],
# )
# def test_extract_accessions_no_tax(genbank_query):
# """Test extract_accessions() when no tax filter is given."""
# get_genbank_sequences.extract_accessions(genbank_query, None)
# def test_extract_accessions_tax_given(genbank_query, tax_filter):
# """Test extract_accessions() when no tax filter is given."""
# get_genbank_sequences.extract_accessions(genbank_query, tax_filter)
# def test_extract_accessions_and_dates_no_tax(genbank_query):
# """Test extract_accessions() when no tax filter is given."""
# get_genbank_sequences.extract_accessions_and_dates(genbank_query, None)
# def test_extract_accessions_and_dates_tax_given(genbank_query, tax_filter):
# """Test extract_accessions() when no tax filter is given."""
# get_genbank_sequences.extract_accessions_and_dates(genbank_query, tax_filter)
# def test_get_accession_chunks():
# """Test get_accession_chunks()"""
# lst = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
# get_genbank_sequences.get_accession_chunks(lst, 2)
# def test_entry_retry():
# """Test entrez_retry."""
# def mock_record(*args, **kwargs):
# return "test_record"
# assert "test_record" == get_genbank_sequences.entrez_retry(mock_record)
# def test_entrez_retry_none():
# """Test entrez_retry when nothing is returned."""
# def mock_record(*args, **kwargs):
# return
# assert get_genbank_sequences.entrez_retry(mock_record) is None
| 32.27896
| 102
| 0.642412
| 3,059
| 27,308
| 5.391304
| 0.075515
| 0.089498
| 0.064516
| 0.032016
| 0.908137
| 0.898375
| 0.88546
| 0.870725
| 0.860175
| 0.851928
| 0
| 0.006291
| 0.231654
| 27,308
| 845
| 103
| 32.31716
| 0.779716
| 0.940237
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
71bf72c585eec295567e06bceea054579b2ceab7
| 516
|
py
|
Python
|
calculator.py
|
Vitali-Lupusor/CalculatorLibrary
|
aeb8646eb950f525ba43be6c204cddb6546070ce
|
[
"MIT"
] | null | null | null |
calculator.py
|
Vitali-Lupusor/CalculatorLibrary
|
aeb8646eb950f525ba43be6c204cddb6546070ce
|
[
"MIT"
] | null | null | null |
calculator.py
|
Vitali-Lupusor/CalculatorLibrary
|
aeb8646eb950f525ba43be6c204cddb6546070ce
|
[
"MIT"
] | null | null | null |
"""Calculator library containing basic math operations."""
def add(first_term: float, second_term: float) -> float:
"""TODO."""
return first_term + second_term
def subtract(first_term: float, second_term: float) -> float:
"""TODO."""
return first_term - second_term
def multiply(first_term: float, second_term: float) -> float:
"""TODO."""
return first_term * second_term
def divide(first_term: float, second_term: float) -> float:
"""TODO."""
return first_term / second_term
| 23.454545
| 61
| 0.680233
| 66
| 516
| 5.075758
| 0.257576
| 0.214925
| 0.167164
| 0.238806
| 0.779104
| 0.779104
| 0.779104
| 0.779104
| 0.779104
| 0.779104
| 0
| 0
| 0.182171
| 516
| 21
| 62
| 24.571429
| 0.793839
| 0.147287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
e07b9b549df38d95c49758f66e8fd6a0043e5b21
| 15,769
|
py
|
Python
|
syncopy/tests/test_preproc.py
|
kajal5888/syncopy
|
f7d49808a09ff65eec64cda1cfb4c87a012e0c2b
|
[
"BSD-3-Clause"
] | null | null | null |
syncopy/tests/test_preproc.py
|
kajal5888/syncopy
|
f7d49808a09ff65eec64cda1cfb4c87a012e0c2b
|
[
"BSD-3-Clause"
] | null | null | null |
syncopy/tests/test_preproc.py
|
kajal5888/syncopy
|
f7d49808a09ff65eec64cda1cfb4c87a012e0c2b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Test preprocessing
#
# 3rd party imports
import psutil
import pytest
import inspect
import numpy as np
import matplotlib.pyplot as ppl
# Local imports
from syncopy import __acme__
if __acme__:
import dask.distributed as dd
from syncopy import preprocessing as ppfunc
from syncopy import AnalogData, freqanalysis
import syncopy.preproc as preproc # submodule
import syncopy.tests.helpers as helpers
from syncopy.shared.errors import SPYValueError
from syncopy.shared.tools import get_defaults, best_match
# Decorator to decide whether or not to run dask-related tests
skip_without_acme = pytest.mark.skipif(not __acme__, reason="acme not available")
# Decorator to decide whether or not to run memory-intensive tests
availMem = psutil.virtual_memory().total
minRAM = 5
skip_low_mem = pytest.mark.skipif(availMem < minRAM * 1024**3, reason=f"less than {minRAM}GB RAM available")
# availableFilterTypes = ('lp', 'hp', 'bp', 'bs')
# availableDirections = ('twopass', 'onepass', 'onepass-minphase')
# availableWindows = ("hamming", "hann", "blackman")
class TestButterworth:
nSamples = 1000
nChannels = 4
nTrials = 100
fs = 200
fNy = fs / 2
# -- use flat white noise as test data --
trls = []
for _ in range(nTrials):
trl = np.random.randn(nSamples, nChannels)
trls.append(trl)
data = AnalogData(trls, samplerate=fs)
# for toi tests, -1s offset
time_span = [-.8, 4.2]
flow, fhigh = 0.3 * fNy, 0.4 * fNy
freq_kw = {'lp': fhigh, 'hp': flow,
'bp': [flow, fhigh], 'bs': [flow, fhigh]}
def test_but_filter(self, **kwargs):
"""
We test for remaining power after filtering
for all available filter types.
Minimum order is 4 to safely pass..
"""
# check if we run the default test
def_test = not len(kwargs)
# write default parameters dict
if def_test:
kwargs = {'direction': 'twopass',
'order': 4}
# the unfiltered data
spec = freqanalysis(self.data, tapsmofrq=1, keeptrials=False)
# total power in arbitrary units (for now)
pow_tot = spec.show(channel=0).sum()
nFreq = spec.freq.size
if def_test:
fig, ax = mk_spec_ax()
for ftype in preproc.availableFilterTypes:
filtered = ppfunc(self.data,
filter_class='but',
filter_type=ftype,
freq=self.freq_kw[ftype],
**kwargs)
# check in frequency space
spec_f = freqanalysis(filtered, tapsmofrq=1, keeptrials=False)
# get relevant frequency ranges
# for integrated powers
if ftype == 'lp':
foilim = [0, self.freq_kw[ftype]]
elif ftype == 'hp':
# toilim selections can screw up the
# frequency axis of freqanalysis/np.fft.rfftfreq :/
foilim = [self.freq_kw[ftype], spec_f.freq[-1]]
else:
foilim = self.freq_kw[ftype]
# remaining power after filtering
pow_fil = spec_f.show(channel=0, foilim=foilim).sum()
_, idx = best_match(spec_f.freq, foilim, span=True)
# ratio of pass-band to total freqency band
ratio = len(idx) / nFreq
# at least 80% of the ideal filter power
# should be still around
if ftype in ('lp', 'hp'):
assert 0.8 * ratio < pow_fil / pow_tot
# here we have two roll-offs, one at each side
elif ftype == 'bp':
assert 0.7 * ratio < pow_fil / pow_tot
# as well as here
elif ftype == 'bs':
assert 0.7 * ratio < (pow_tot - pow_fil) / pow_tot
if def_test:
plot_spec(ax, spec_f, label=ftype)
# plotting
if def_test:
plot_spec(ax, spec, c='0.3', label='unfiltered')
annotate_foilims(ax, *self.freq_kw['bp'])
ax.set_title(f"Twopass Butterworth, order = {kwargs['order']}")
def test_but_kwargs(self):
"""
Test order and direction parameter
"""
for direction in preproc.availableDirections:
kwargs = {'direction': direction,
'order': 4}
# only for firws
if 'minphase' in direction:
with pytest.raises(SPYValueError) as err:
self.test_but_filter(**kwargs)
assert "expected 'onepass'" in str(err)
else:
self.test_but_filter(**kwargs)
for order in [-2, 10, 5.6]:
kwargs = {'direction': 'twopass',
'order': order}
if order < 1 and isinstance(order, int):
with pytest.raises(SPYValueError) as err:
self.test_but_filter(**kwargs)
assert "value to be greater" in str(err)
elif not isinstance(order, int):
with pytest.raises(SPYValueError) as err:
self.test_but_filter(**kwargs)
assert "expected int_like" in str(err)
# valid order
else:
self.test_but_filter(**kwargs)
def test_but_selections(self):
sel_dicts = helpers.mk_selection_dicts(nTrials=20,
nChannels=2,
toi_min=self.time_span[0],
toi_max=self.time_span[1],
min_len=3.5)
for sd in sel_dicts:
self.test_but_filter(select=sd)
def test_but_polyremoval(self):
helpers.run_polyremoval_test(self.test_but_filter)
def test_but_cfg(self):
cfg = get_defaults(ppfunc)
cfg.filter_class = 'but'
cfg.order = 6
cfg.direction = 'twopass'
cfg.freq = 30
cfg.filter_type = 'hp'
result = ppfunc(self.data, cfg)
# check here just for finiteness
assert np.all(np.isfinite(result.data))
@skip_without_acme
def test_but_parallel(self, testcluster=None):
ppl.ioff()
client = dd.Client(testcluster)
all_tests = [attr for attr in self.__dir__()
if (inspect.ismethod(getattr(self, attr)) and 'parallel' not in attr)]
for test_name in all_tests:
test_method = getattr(self, test_name)
if 'but_filter' in test_name:
# test parallelisation along channels
test_method(chan_per_worker=2)
else:
test_method()
client.close()
ppl.ion()
def test_but_hilbert_rect(self):
call = lambda **kwargs: ppfunc(self.data,
freq=20,
filter_class='but',
filter_type='lp',
order=5,
direction='onepass',
**kwargs)
# test rectification
filtered = call(rectify=False)
assert not np.all(filtered.trials[0] > 0)
rectified = call(rectify=True)
assert np.all(rectified.trials[0] > 0)
# test simultaneous call to hilbert and rectification
with pytest.raises(SPYValueError) as err:
call(rectify=True, hilbert='abs')
assert "either rectifi" in str(err)
assert "or hilbert" in str(err)
# test hilbert outputs
for output in preproc.hilbert_outputs:
htrafo = call(hilbert=output)
if output == 'complex':
assert np.all(np.imag(htrafo.trials[0]) != 0)
else:
assert np.all(np.imag(htrafo.trials[0]) == 0)
# test wrong hilbert parameter
with pytest.raises(SPYValueError) as err:
call(hilbert='absnot')
assert "one of {'" in str(err)
class TestFIRWS:
nSamples = 1000
nChannels = 4
nTrials = 50
fs = 200
fNy = fs / 2
# -- use flat white noise as test data --
trls = []
for _ in range(nTrials):
trl = np.random.randn(nSamples, nChannels)
trls.append(trl)
data = AnalogData(trls, samplerate=fs)
# for toi tests, -1s offset
time_span = [-.8, 4.2]
flow, fhigh = 0.3 * fNy, 0.4 * fNy
freq_kw = {'lp': fhigh, 'hp': flow,
'bp': [flow, fhigh], 'bs': [flow, fhigh]}
def test_firws_filter(self, **kwargs):
"""
We test for remaining power after filtering
for all available filter types.
Order parameter here means length of the filter,
200 is safe to pass!
"""
# check if we run the default test
def_test = not len(kwargs)
# write default parameters dict
if def_test:
kwargs = {'direction': 'twopass',
'order': 200}
# the unfiltered data
spec = freqanalysis(self.data, tapsmofrq=1, keeptrials=False)
# total power in arbitrary units (for now)
pow_tot = spec.show(channel=0).sum()
nFreq = spec.freq.size
if def_test:
fig, ax = mk_spec_ax()
for ftype in preproc.availableFilterTypes:
filtered = ppfunc(self.data,
filter_class='firws',
filter_type=ftype,
freq=self.freq_kw[ftype],
**kwargs)
# check in frequency space
spec_f = freqanalysis(filtered, tapsmofrq=1, keeptrials=False)
# get relevant frequency ranges
# for integrated powers
if ftype == 'lp':
foilim = [0, self.freq_kw[ftype]]
elif ftype == 'hp':
# toilim selections can screw up the
# frequency axis of freqanalysis/np.fft.rfftfreq :/
foilim = [self.freq_kw[ftype], spec_f.freq[-1]]
else:
foilim = self.freq_kw[ftype]
# remaining power after filtering
pow_fil = spec_f.show(channel=0, foilim=foilim).sum()
_, idx = best_match(spec_f.freq, foilim, span=True)
# ratio of pass-band to total freqency band
ratio = len(idx) / nFreq
# at least 80% of the ideal filter power
# should be still around
if ftype in ('lp', 'hp'):
assert 0.8 * ratio < pow_fil / pow_tot
# here we have two roll-offs, one at each side
elif ftype == 'bp':
assert 0.7 * ratio < pow_fil / pow_tot
# as well as here
elif ftype == 'bs':
assert 0.7 * ratio < (pow_tot - pow_fil) / pow_tot
if def_test:
plot_spec(ax, spec_f, label=ftype)
# plotting
if def_test:
plot_spec(ax, spec, c='0.3', label='unfiltered')
annotate_foilims(ax, *self.freq_kw['bp'])
ax.set_title(f"Twopass FIRWS, order = {kwargs['order']}")
def test_firws_kwargs(self):
"""
Test order and direction parameter
"""
for direction in preproc.availableDirections:
kwargs = {'direction': direction,
'order': 200}
self.test_firws_filter(**kwargs)
for order in [-2, 220, 5.6]:
kwargs = {'direction': 'twopass',
'order': order}
if order < 1 and isinstance(order, int):
with pytest.raises(SPYValueError) as err:
self.test_firws_filter(**kwargs)
assert "value to be greater" in str(err)
elif not isinstance(order, int):
with pytest.raises(SPYValueError) as err:
self.test_firws_filter(**kwargs)
assert "expected int_like" in str(err)
# valid order
else:
self.test_firws_filter(**kwargs)
def test_firws_selections(self):
sel_dicts = helpers.mk_selection_dicts(nTrials=20,
nChannels=2,
toi_min=self.time_span[0],
toi_max=self.time_span[1],
min_len=3.5)
for sd in sel_dicts:
print(sd)
self.test_firws_filter(select=sd, order=200)
def test_firws_polyremoval(self):
helpers.run_polyremoval_test(self.test_firws_filter)
def test_firws_cfg(self):
cfg = get_defaults(ppfunc)
cfg.filter_class = 'firws'
cfg.order = 200
cfg.direction = 'twopass'
cfg.freq = 30
cfg.filter_type = 'hp'
result = ppfunc(self.data, cfg)
# check here just for finiteness
assert np.all(np.isfinite(result.data))
@skip_without_acme
def test_firws_parallel(self, testcluster=None):
ppl.ioff()
client = dd.Client(testcluster)
all_tests = [attr for attr in self.__dir__()
if (inspect.ismethod(getattr(self, attr)) and 'parallel' not in attr)]
for test_name in all_tests:
test_method = getattr(self, test_name)
if 'firws_filter' in test_name:
# test parallelisation along channels
test_method(chan_per_worker=2)
else:
test_method()
client.close()
ppl.ion()
def test_firws_hilbert_rect(self):
call = lambda **kwargs: ppfunc(self.data,
freq=20,
filter_class='firws',
filter_type='lp',
order=200,
direction='onepass',
**kwargs)
# test rectification
filtered = call(rectify=False)
assert not np.all(filtered.trials[0] > 0)
rectified = call(rectify=True)
assert np.all(rectified.trials[0] > 0)
# test simultaneous call to hilbert and rectification
with pytest.raises(SPYValueError) as err:
call(rectify=True, hilbert='abs')
assert "either rectifi" in str(err)
assert "or hilbert" in str(err)
# test hilbert outputs
for output in preproc.hilbert_outputs:
htrafo = call(hilbert=output)
if output == 'complex':
assert np.all(np.imag(htrafo.trials[0]) != 0)
else:
assert np.all(np.imag(htrafo.trials[0]) == 0)
# test wrong hilbert parameter
with pytest.raises(SPYValueError) as err:
call(hilbert='absnot')
assert "one of {'" in str(err)
def mk_spec_ax():
fig, ax = ppl.subplots()
ax.set_xlabel('frequency (Hz)')
ax.set_ylabel('power (dB)')
return fig, ax
def plot_spec(ax, spec, **pkwargs):
ax.plot(spec.freq, spec.show(channel=1), alpha=0.8, **pkwargs)
ax.legend()
def annotate_foilims(ax, flow, fhigh):
ylim = ax.get_ylim()
ax.plot([flow, flow], [0, 1], 'k--')
ax.plot([fhigh, fhigh], [0, 1], 'k--')
ax.set_ylim(ylim)
if __name__ == '__main__':
T1 = TestButterworth()
T2 = TestFIRWS()
| 33.0587
| 108
| 0.529393
| 1,794
| 15,769
| 4.522297
| 0.172798
| 0.020708
| 0.010847
| 0.032171
| 0.819795
| 0.798841
| 0.789474
| 0.789474
| 0.769259
| 0.759152
| 0
| 0.01664
| 0.374976
| 15,769
| 476
| 109
| 33.128151
| 0.806514
| 0.141544
| 0
| 0.761745
| 0
| 0
| 0.048872
| 0
| 0
| 0
| 0
| 0
| 0.090604
| 1
| 0.057047
| false
| 0.036913
| 0.043624
| 0
| 0.171141
| 0.003356
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e09c2cc36065a201d38853b4bbcbd0c0ed538bd4
| 24,386
|
py
|
Python
|
models/modelsTF.py
|
frandorr/PROBA-V
|
89c1aa4dfc58d66e7747293f6738fdd4e2ba6e6f
|
[
"Apache-2.0"
] | 14
|
2020-03-06T14:19:06.000Z
|
2022-01-31T05:19:02.000Z
|
models/modelsTF.py
|
frandorr/PROBA-V
|
89c1aa4dfc58d66e7747293f6738fdd4e2ba6e6f
|
[
"Apache-2.0"
] | 1
|
2021-01-18T09:13:19.000Z
|
2022-03-16T07:00:53.000Z
|
models/modelsTF.py
|
frandorr/PROBA-V
|
89c1aa4dfc58d66e7747293f6738fdd4e2ba6e6f
|
[
"Apache-2.0"
] | 7
|
2020-04-05T17:38:10.000Z
|
2021-09-22T13:33:41.000Z
|
import tensorflow as tf
from tensorflow_addons.layers import WeightNormalization, InstanceNormalization
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Conv3D, Conv2D, Lambda, Add, Reshape
class WDSRConv3D:
def __init__(self, name, band, mean, std, maxShift):
self.name = name
self.band = band
self.mean = mean
self.std = std
self.maxShift = maxShift
def build(self, scale: int, numFilters: int, kernelSize: tuple,
numResBlocks: int, expRate: int, decayRate: float,
numImgLR: int, patchSizeLR: int, isGrayScale: bool) -> Model:
# Define inputs
imgLRIn = Input(shape=(patchSizeLR + self.maxShift, patchSizeLR + self.maxShift, numImgLR, 1)) if isGrayScale \
else Input(shape=(patchSizeLR + self.maxShift, patchSizeLR + self.maxShift, numImgLR, 3))
# Get mean of instance mean patch and over all mean pixel value
meanImgLR = Lambda(lambda x: tf.reduce_mean(x, axis=3, name='meanLR'), name='getMeanLR')(imgLRIn)
# Normalize Instance
imgLR = Lambda(self.normalize, name='normImgLR')(imgLRIn)
meanImgLR = Lambda(self.normalize, name='normMeanImgLR')(meanImgLR)
# ImgResBlocks | High Frequency Residuals Path
main = self.WDSRNetHRResidualPath(imgLR, numFilters, kernelSize,
numResBlocks, patchSizeLR, numImgLR,
scale, expRate, decayRate)
# MeanResBlocks | Low Frequency Residuals Path
residual = self.WDSRNetLRResidualPath(meanImgLR, kernelSize[:-1], scale)
# Fuse Main and Residual Patch
out = Add(name='mainPlusResid')([main, residual])
# Denormalize Instance
out = Lambda(self.denormalize, name='denorm')(out)
return Model(imgLRIn, out, name=f'WDSRConv3D_{self.band}_{self.name}')
def WDSRNetLRResidualPath(self, x: tf.Tensor, kernelSize: tuple, scale: int):
# TODO: Check correctness for different scales
for i in range(scale):
act = 'relu' if i == 0 else None
x = self.weightNormedConv2D(outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', activation=act, name=f'residConv{i+1}')(x)
# See https://arxiv.org/abs/1609.05158
x = Lambda(lambda x: tf.nn.depth_to_space(x, scale), name='dtsResid')(x) # Pixel Shuffle!
return x
def WDSRNetHRResidualPath(self, imgLR: tf.Tensor, numFilters: int, kernelSize: tuple,
numResBlocks: int, patchSizeLR: int, numImgLR: int,
scale: int, expRate: int, decayRate: int):
x = self.weightNormedConv3D(numFilters, kernelSize, 'same', activation='relu', name='mainConv1')(imgLR)
for i in range(numResBlocks):
x = self.ResConv3D(x, numFilters, expRate, decayRate, kernelSize, i)
if numImgLR == 7:
x = self.ConvReduceAndUpscalev2(x, numImgLR, scale, numFilters, kernelSize)
elif numImgLR == 9:
x = self.ConvReduceAndUpscale(x, numImgLR, scale, numFilters, kernelSize)
elif numImgLR == 13:
x = self.ConvReduceAndUpscalev3(x, numImgLR, scale, numFilters, kernelSize)
elif numImgLR == 19:
x = self.ConvReduceAndUpscaleEx(x, numImgLR, scale, numFilters, kernelSize)
x = Reshape((patchSizeLR, patchSizeLR, scale*scale), name='reshapeMain')(x)
# See https://arxiv.org/abs/1609.05158
x = Lambda(lambda x: tf.nn.depth_to_space(x, scale), name='dtsMain')(x) # Pixel Shuffle!
return x
def ConvReduceAndUpscaleEx(self, x: tf.Tensor, numImgLR: int, scale: int, numFilters: int, kernelSize: tuple):
'''EXPERIMENTAL'''
x = Lambda(lambda x: tf.pad(x, [[0, 0], [2, 2], [2, 2], [2, 2], [0, 0]],
mode='reflect'), name=f'convReducePad_{1}')(x)
x = self.weightNormedConv3D(numFilters, (5, 5, 5), padding='valid',
activation='relu', name=f'convReducer_{1}')(x)
x = Lambda(lambda x: tf.pad(x, [[0, 0], [2, 2], [2, 2], [1, 1], [0, 0]],
mode='reflect'), name=f'convReducePad_{2}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{2}')(x)
x = Lambda(lambda x: tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{3}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{3}')(x)
x = Lambda(lambda x: tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{4}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{4}')(x)
x = Lambda(lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{5}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{5}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{6}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{7}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{8}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{9}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{10}')(x)
# Upscale block
x = self.weightNormedConv3D(outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='upscaleConv1')(x)
return x
def ConvReduceAndUpscalev3(self, x: tf.Tensor, numImgLR: int, scale: int, numFilters: int, kernelSize: tuple):
'''used numLRImg 13 config'''
# Conv Reducer
x = Lambda(lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{1}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{1}')(x)
x = Lambda(lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{2}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{2}')(x)
x = Lambda(lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{3}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{3}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{4}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{5}')(x)
# Upscale block
x = self.weightNormedConv3D(outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='upscaleConv1')(x)
return x
def ConvReduceAndUpscale(self, x: tf.Tensor, numImgLR: int, scale: int, numFilters: int, kernelSize: tuple):
'''used in patch 38 numLRImg 9 config'''
# Conv Reducer
for i in range(numImgLR//scale):
if i == 0:
x = Lambda(lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{i+1}')(x)
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{i+1}')(x)
# Upscale block
x = self.weightNormedConv3D(outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='upscaleConv1')(x)
return x
def ConvReduceAndUpscalev2(self, x: tf.Tensor, numImgLR: int, scale: int, numFilters: int, kernelSize: tuple):
'''used in patch 38 numLRImg 7 config'''
# Conv Reducer
for i in range(numImgLR//scale):
x = self.weightNormedConv3D(numFilters, kernelSize, padding='valid',
activation='relu', name=f'convReducer_{i+1}')(x)
# Upscale block
x = self.weightNormedConv3D(outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='upscaleConv1')(x)
return x
def ResConv3D(self, xIn: tf.Tensor, numFilters: int, expRate: int, decayRate: float, kernelSize: int, blockNum: int):
# Expansion Conv3d | Same padding
x = self.weightNormedConv3D(outChannels=numFilters*expRate, kernelSize=1, padding='same',
activation='relu', name=f'expConv_{blockNum}')(xIn)
# Decay Conv3d | Same padding
x = self.weightNormedConv3D(outChannels=int(numFilters*decayRate), kernelSize=1,
padding='same', name=f'decConv_{blockNum}')(x)
# Norm Conv3D | Same padding
x = self.weightNormedConv3D(outChannels=numFilters, kernelSize=kernelSize,
padding='same', name=f'normConv_{blockNum}')(x)
# Add input and result
out = Add(name=f'AddResConv_{blockNum}')([x, xIn])
return out
def weightNormedConv3D(self, outChannels: int, kernelSize: int, padding: str, activation=None, name=''):
return WeightNormalization(Conv3D(outChannels, kernelSize, padding=padding, activation=activation),
data_init=False, name=name)
def weightNormedConv2D(self, outChannels: int, kernelSize: int, padding: str, activation=None, name=''):
return WeightNormalization(Conv2D(outChannels, kernelSize, padding=padding, activation=activation),
data_init=False, name=name)
def normalize(self, x):
return (x-self.mean)/self.std
def denormalize(self, x):
return x * self.std + self.mean
class iWDSRConv3D:
def __init__(self, name, band, mean, std, maxShift):
self.name = name
self.band = band
self.mean = mean
self.std = std
self.maxShift = maxShift
def build(self, scale: int, numFilters: int, kernelSize: tuple,
numResBlocks: int, expRate: int, decayRate: float,
numImgLR: int, patchSizeLR: int, isGrayScale: bool) -> Model:
# Define inputs
imgLRIn = Input(shape=(patchSizeLR + self.maxShift, patchSizeLR + self.maxShift, numImgLR, 1)) if isGrayScale \
else Input(shape=(patchSizeLR + self.maxShift, patchSizeLR + self.maxShift, numImgLR, 3))
# Get mean of instance mean patch and over all mean pixel value
meanImgLR = Lambda(lambda x: tf.reduce_mean(x, axis=3, name='meanLR'), name='getMeanLR')(imgLRIn)
# Normalize Instance
imgLR = Lambda(self.normalize, name='normImgLR')(imgLRIn)
meanImgLR = Lambda(self.normalize, name='normMeanImgLR')(meanImgLR)
# ImgResBlocks | Main Path
main = self.iWDSRNetMainPath(imgLR, numFilters, kernelSize,
numResBlocks, patchSizeLR, numImgLR,
scale, expRate, decayRate)
# MeanResBlocks | Residual Path
residual = self.iWDSRNetResidualPath(meanImgLR, kernelSize[:-1], scale)
# Fuse Main and Residual Patch
out = Add(name='mainPlusResid')([main, residual])
# Denormalize Instance
out = Lambda(self.denormalize, name='denorm')(out)
return Model(imgLRIn, out, name=f'WDSRConv3D_{self.band}_{self.name}')
def iWDSRNetResidualPath(self, x: tf.Tensor, kernelSize: tuple, scale: int):
x = self.conv2DIns(x, outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', activation='mish', name='residConv1')
x = self.conv2DIns(x, outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='residConv2')
x = self.conv2DIns(x, outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='residConv3')
for i in range(scale):
act = 'mish' if i == 0 else None
x = self.conv2DIns(x, outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', activation=act, name=f'residConv{i+1}')
# See https://arxiv.org/abs/1609.05158
x = Lambda(lambda x: tf.nn.depth_to_space(x, scale), name='dtsResid')(x) # Pixel Shuffle!
return x
def iWDSRNetMainPath(self, imgLR: tf.Tensor, numFilters: int, kernelSize: tuple,
numResBlocks: int, patchSizeLR: int, numImgLR: int,
scale: int, expRate: int, decayRate: int):
x = self.conv3DIns(imgLR, numFilters, kernelSize, 'same', activation='mish', name='mainConv1')
for i in range(numResBlocks):
x = self.ResConv3D(x, numFilters, expRate, decayRate, kernelSize, i)
x = self.ConvReduceAndUpscale(x, numImgLR, scale, numFilters, kernelSize)
x = Reshape((patchSizeLR, patchSizeLR, scale*scale), name='reshapeMain')(x)
# See https://arxiv.org/abs/1609.05158
x = Lambda(lambda x: tf.nn.depth_to_space(x, scale), name='dtsMain')(x) # Pixel Shuffle!
return x
def ConvReduceAndUpscale(self, x: tf.Tensor, numImgLR: int, scale: int, numFilters: int, kernelSize: tuple):
'''used in patch 38 numLRImg 9 config'''
# Conv Reducer
for i in range(numImgLR//scale):
if i == 0:
x = Lambda(lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0], [0, 0]],
mode='reflect'), name=f'convReducePad_{i}')(x)
x = self.conv3DIns(x, numFilters, kernelSize, padding='valid',
activation='mish', name=f'convReducer_{i}')
# Upscale block
x = self.conv3DIns(x, outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='upscaleConv1')
return x
def ConvReduceAndUpscalev2(self, x: tf.Tensor, numImgLR: int, scale: int, numFilters: int, kernelSize: tuple):
'''used in patch 38 numLRImg 7 config'''
# Conv Reducer
for i in range(numImgLR//scale):
x = self.conv3DIns(x, numFilters, kernelSize, padding='valid',
activation='mish', name=f'convReducer_{i}')
# Upscale block
x = self.conv3DIns(x, outChannels=scale*scale, kernelSize=kernelSize,
padding='valid', name='upscaleConv1')
return x
def ResConv3D(self, xIn: tf.Tensor, numFilters: int, expRate: int, decayRate: float, kernelSize: int, blockNum: int):
# Expansion Conv3d | Same padding
x = self.conv3DIns(xIn, outChannels=numFilters*expRate, kernelSize=1, padding='same',
activation='mish', name=f'expConv_{blockNum}')
# Decay Conv3d | Same padding
x = self.conv3DIns(x, outChannels=int(numFilters*decayRate), kernelSize=1,
padding='same', name=f'decConv_{blockNum}')
# Norm Conv3D | Same padding
x = self.conv3DIns(x, outChannels=numFilters, kernelSize=kernelSize,
padding='same', name=f'normConv_{blockNum}')
# Add input and result
out = Add(name=f'AddResConv_{blockNum}')([x, xIn])
return out
def weightNormedConv3D(self, outChannels: int, kernelSize: int, padding: str, activation=None, name=''):
return WeightNormalization(Conv3D(outChannels, kernelSize, padding=padding, activation=activation),
data_init=False, name=name)
def weightNormedConv2D(self, outChannels: int, kernelSize: int, padding: str, activation=None, name=''):
return WeightNormalization(Conv2D(outChannels, kernelSize, padding=padding, activation=activation),
data_init=False, name=name)
def conv3DIns(self, xIn, outChannels, kernelSize, padding, activation=None, name=''):
if activation is None:
x = WeightNormalization(Conv3D(outChannels, kernelSize, padding=padding, activation=None),
data_init=False, name=name)(xIn)
x = InstanceNormalization(axis=4,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
return x
if activation == 'leakyrelu':
x = WeightNormalization(Conv3D(outChannels, kernelSize, padding=padding, activation=None),
data_init=False, name=name)(xIn)
x = InstanceNormalization(axis=4,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
return x
if activation == 'mish':
x = WeightNormalization(Conv3D(outChannels, kernelSize, padding=padding, activation=None),
data_init=False, name=name)(xIn)
x = InstanceNormalization(axis=4,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = self.mish(x)
return x
def conv2DIns(self, xIn, outChannels, kernelSize, padding, activation=None, name=''):
if activation is None:
x = WeightNormalization(Conv2D(outChannels, kernelSize, padding=padding, activation=None),
data_init=False, name=name)(xIn)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
return x
if activation == 'leakyrelu':
x = WeightNormalization(Conv2D(outChannels, kernelSize, padding=padding, activation=None),
data_init=False, name=name)(xIn)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
return x
if activation == 'mish':
x = WeightNormalization(Conv2D(outChannels, kernelSize, padding=padding, activation=None),
data_init=False, name=name)(xIn)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = self.mish(x)
return x
def mish(self, x):
return x * tf.math.tanh(tf.keras.activations.softplus(x))
def normalize(self, x):
return (x-self.mean)/self.std
def denormalize(self, x):
return x * self.std + self.mean
class FuseNetConv2D:
def __init__(self, name, band):
self.name = name
self.band = band
def build(self) -> Model:
# Define inputs
imgLRIn = Input(shape=(384, 384, 1))
# Fusing patch
main = self.FuseNetv3(imgLRIn)
# Fuse Main and Residual Patch
out = Add(name='mainPlusInput')([imgLRIn, main])
return Model(imgLRIn, out, name=f'FuseNet_{self.band}_{self.name}')
def FuseNet(self, xIn):
x = Conv2D(128, 3, 3, padding='same')(xIn)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
x = Conv2D(64, 3, 1, padding='same')(x)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
x = Conv2D(32, 3, 1, padding='same')(x)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
x = Conv2D(9, 3, 1, padding='same')(x)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
x = Lambda(lambda x: tf.nn.depth_to_space(x, 3), name='dtsMain2')(x)
return x
def FuseNetv2(self, xIn):
x = Conv2D(64, 8, 8, padding='same')(xIn)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
x = Conv2D(64, 3, 1, padding='same')(x)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
x = Lambda(lambda x: tf.nn.depth_to_space(x, 8), name='dtsMain2')(x)
return x
def FuseNetv3(self, xIn):
x = Conv2D(64, 48, 1, padding='same')(xIn)
x = InstanceNormalization(axis=3,
center=True,
scale=True,
beta_initializer="random_uniform",
gamma_initializer="random_uniform")(x)
x = tf.keras.layers.LeakyReLU(alpha=0.3)(x)
x = Lambda(lambda x: tf.reduce_mean(x, axis=3, keepdims=True), name='mean')(x)
return x
| 51.338947
| 121
| 0.547773
| 2,494
| 24,386
| 5.302727
| 0.081796
| 0.019282
| 0.048242
| 0.050813
| 0.911304
| 0.903138
| 0.887637
| 0.874556
| 0.84189
| 0.840681
| 0
| 0.022972
| 0.334167
| 24,386
| 474
| 122
| 51.447257
| 0.791526
| 0.052899
| 0
| 0.774286
| 0
| 0
| 0.07958
| 0.006125
| 0
| 0
| 0
| 0.00211
| 0
| 1
| 0.091429
| false
| 0
| 0.011429
| 0.025714
| 0.205714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0d62fe13d8e2d48474d08625c825c7fb819b52f
| 5,314
|
py
|
Python
|
address/test/test_address.py
|
sjmelia/pyaddress
|
e7e3f8c9f331c4da36f5eacfe122757fc4c34d93
|
[
"BSD-3-Clause"
] | null | null | null |
address/test/test_address.py
|
sjmelia/pyaddress
|
e7e3f8c9f331c4da36f5eacfe122757fc4c34d93
|
[
"BSD-3-Clause"
] | null | null | null |
address/test/test_address.py
|
sjmelia/pyaddress
|
e7e3f8c9f331c4da36f5eacfe122757fc4c34d93
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import unittest
from ..address import Address, AddressParser
class AddressTest(unittest.TestCase):
parser = None
def setUp(self):
self.parser = AddressParser()
def test_basic_full_address(self):
addr = Address("2 N. Park Street, Madison, WI 53703", self.parser)
# print addr
self.assertTrue(addr.house_number == "2")
self.assertTrue(addr.street_prefix == "N.")
self.assertTrue(addr.street == "Park")
self.assertTrue(addr.street_suffix == "St.")
self.assertTrue(addr.city == "Madison")
self.assertTrue(addr.state == "WI")
self.assertTrue(addr.zip == "53703")
self.assertTrue(addr.apartment == None)
# self.assertTrue(addr.building == None)
def test_multi_address(self):
addr = Address("416/418 N. Carroll St.", self.parser)
# print addr
self.assertTrue(addr.house_number == "416")
self.assertTrue(addr.street_prefix == "N.")
self.assertTrue(addr.street == "Carroll")
self.assertTrue(addr.street_suffix == "St.")
self.assertTrue(addr.city == None)
self.assertTrue(addr.state == None)
self.assertTrue(addr.zip == None)
self.assertTrue(addr.apartment == None)
# self.assertTrue(addr.building == None)
def test_no_suffix(self):
addr = Address("230 Lakelawn", self.parser)
# print addr
self.assertTrue(addr.house_number == "230")
self.assertTrue(addr.street_prefix == None)
self.assertTrue(addr.street == "Lakelawn")
self.assertTrue(addr.street_suffix == None)
self.assertTrue(addr.city == None)
self.assertTrue(addr.state == None)
self.assertTrue(addr.zip == None)
self.assertTrue(addr.apartment == None)
# self.assertTrue(addr.building == None)
# def test_building_in_front(self):
# addr = Address("Roundhouse Apartments 626 Langdon", self.parser)
# # print addr
# self.assertTrue(addr.house_number == "626")
# self.assertTrue(addr.street_prefix == None)
# self.assertTrue(addr.street == "Langdon")
# self.assertTrue(addr.street_suffix == None)
# self.assertTrue(addr.city == None)
# self.assertTrue(addr.state == None)
# self.assertTrue(addr.zip == None)
# self.assertTrue(addr.apartment == None)
# # self.assertTrue(addr.building == "Roundhouse Apartments")
def test_streets_named_after_states(self):
addr = Address("504 W. Washington Ave.", self.parser)
# print addr
self.assertTrue(addr.house_number == "504")
self.assertTrue(addr.street_prefix == "W.")
self.assertTrue(addr.street == "Washington")
self.assertTrue(addr.street_suffix == "Ave.")
self.assertTrue(addr.city == None)
self.assertTrue(addr.state == None)
self.assertTrue(addr.zip == None)
self.assertTrue(addr.apartment == None)
# self.assertTrue(addr.building == None)
def test_hash_apartment(self):
addr = Address("407 West Doty St. #2", self.parser)
# print addr
self.assertTrue(addr.house_number == "407")
self.assertTrue(addr.street_prefix == "W.")
self.assertTrue(addr.street == "Doty")
self.assertTrue(addr.street_suffix == "St.")
self.assertTrue(addr.city == None)
self.assertTrue(addr.state == None)
self.assertTrue(addr.zip == None)
self.assertTrue(addr.apartment == "#2")
# self.assertTrue(addr.building == None)
def test_stray_dash_apartment(self):
addr = Address("407 West Doty St. - #2", self.parser)
# print addr
self.assertTrue(addr.house_number == "407")
self.assertTrue(addr.street_prefix == "W.")
self.assertTrue(addr.street == "Doty")
self.assertTrue(addr.street_suffix == "St.")
self.assertTrue(addr.city == None)
self.assertTrue(addr.state == None)
self.assertTrue(addr.zip == None)
self.assertTrue(addr.apartment == "#2")
# self.assertTrue(addr.building == None)
def test_suffixless_street_with_city(self):
addr = Address("431 West Johnson, Madison, WI", self.parser)
# print addr
self.assertTrue(addr.house_number == "431")
self.assertTrue(addr.street_prefix == "W.")
self.assertTrue(addr.street == "Johnson")
self.assertTrue(addr.street_suffix == None)
self.assertTrue(addr.city == "Madison")
self.assertTrue(addr.state == "WI")
self.assertTrue(addr.zip == None)
self.assertTrue(addr.apartment == None)
# self.assertTrue(addr.building == None)
class AddressParserTest(unittest.TestCase):
ap = None
def setUp(self):
self.ap = AddressParser()
def test_load_suffixes(self):
self.assertTrue(self.ap.suffixes["ALLEY"] == "ALY")
def test_load_cities(self):
self.assertTrue("wisconsin rapids" in self.ap.cities)
def test_load_states(self):
self.assertTrue(self.ap.states["Wisconsin"] == "WI")
# Not using preloaded streets any more.
# def test_load_streets(self):
# self.assertTrue("mifflin" in self.ap.streets)
if __name__ == '__main__':
unittest.main()
| 38.507246
| 74
| 0.628528
| 613
| 5,314
| 5.337684
| 0.141925
| 0.325183
| 0.396088
| 0.201711
| 0.736553
| 0.700489
| 0.700489
| 0.700489
| 0.700489
| 0.612469
| 0
| 0.014991
| 0.234287
| 5,314
| 137
| 75
| 38.788321
| 0.789137
| 0.205871
| 0
| 0.505618
| 0
| 0
| 0.077254
| 0
| 0
| 0
| 0
| 0
| 0.662921
| 1
| 0.134831
| false
| 0
| 0.033708
| 0
| 0.213483
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4604e96bf1e6e470a7fca2752369c5ff04c64d4a
| 144
|
py
|
Python
|
custom/_legacy/pact/tests/__init__.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
custom/_legacy/pact/tests/__init__.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
custom/_legacy/pact/tests/__init__.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
from schedules import *
from dots_algorithm import *
from dot_submission import *
from dot_ordering import *
from regimen_properties import *
| 18
| 32
| 0.8125
| 19
| 144
| 5.947368
| 0.526316
| 0.353982
| 0.230089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152778
| 144
| 7
| 33
| 20.571429
| 0.92623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1cdfa3dcf398312f2f6c7914a6569eeb559fe0ce
| 15,228
|
py
|
Python
|
tests/test_message.py
|
adbpy/wire-protocol
|
d892b5ff51d7c9340dea5c671efb9311b5e4d957
|
[
"Apache-2.0"
] | 7
|
2017-07-25T04:30:48.000Z
|
2021-09-22T17:27:50.000Z
|
tests/test_message.py
|
adbpy/wire-protocol
|
d892b5ff51d7c9340dea5c671efb9311b5e4d957
|
[
"Apache-2.0"
] | 382
|
2017-10-18T04:34:25.000Z
|
2021-08-02T05:35:37.000Z
|
tests/test_message.py
|
adbpy/wire-protocol
|
d892b5ff51d7c9340dea5c671efb9311b5e4d957
|
[
"Apache-2.0"
] | 1
|
2018-01-15T20:10:33.000Z
|
2018-01-15T20:10:33.000Z
|
"""
test_message
~~~~~~~~~~~~
Contains tests for the :mod:`~adbwp.message` module.
"""
import pytest
from adbwp import consts, enums, header, message, payload
def test_new_computes_header_data_length_based_on_data_payload(command_type, valid_payload_bytes):
"""
Assert that :func:`~adbwp.message.new` computes and sets the :attr:`~adbwp.header.Header.data_length`
value based on the given data payload value.
"""
instance = message.new(command_type, data=valid_payload_bytes)
assert instance.header.data_length == len(valid_payload_bytes)
def test_new_computes_header_data_checksum_based_on_data_payload(command_type, valid_payload_bytes):
"""
Assert that :func:`~adbwp.message.new` computes and sets the :attr:`~adbwp.header.Header.data_checksum`
value based on the given data payload value.
"""
instance = message.new(command_type, data=valid_payload_bytes)
assert instance.header.data_checksum == payload.checksum(valid_payload_bytes)
def test_new_computes_header_magic_based_on_data_payload(command_type):
"""
Assert that :func:`~adbwp.message.new` computes and sets the :attr:`~adbwp.header.Header.magic`
value based on the given command.
"""
instance = message.new(command_type)
assert instance.header.magic == header.magic(command_type)
def test_new_supports_default_values(command_type):
"""
Assert that :func:`~adbwp.message.new` returns a :class:`~adbwp.message.Message` with the header
field values set to defaults.
"""
instance = message.new(command_type)
assert instance.header.command == command_type
assert instance.header.arg0 == 0
assert instance.header.arg1 == 0
assert instance.data == b''
def test_new_assigns_field_values(command_type, random_arg0, random_arg1, valid_payload_bytes):
"""
Assert that :func:`~adbwp.message.new` returns a :class:`~adbwp.message.Message` with the header
and data field values properly set.
"""
instance = message.new(command_type, random_arg0, random_arg1, valid_payload_bytes)
assert instance.header.command == command_type
assert instance.header.arg0 == random_arg0
assert instance.header.arg1 == random_arg1
assert instance.header.data_length == len(valid_payload_bytes)
assert instance.header.data_checksum == payload.checksum(valid_payload_bytes)
assert instance.header.magic == header.magic(command_type)
assert instance.data == valid_payload_bytes
def test_new_raises_on_incorrect_payload_type(command_type, invalid_payload_type):
"""
Assert that :func:`~adbwp.message.new` raises a :class:`~ValueError` when given a payload
value that is an invalid type.
"""
with pytest.raises(ValueError):
message.new(command_type, data=invalid_payload_type)
def test_new_raises_on_data_payload_too_large(command_type, bytes_larger_than_maxdata):
"""
Assert that :func:`~adbwp.message.new` raises a :class:`~ValueError` when given a data payload
that is larger than :attr:`~adbwp.consts.MAXDATA`.
"""
with pytest.raises(ValueError):
message.new(command_type, data=bytes_larger_than_maxdata)
def test_from_header_assigns_header(command_type, random_arg0, random_arg1):
"""
Assert that :func:`~adbwp.message.from_header` sets the :attr:`~adbwp.message.Message.header`
value based on the given header.
"""
instance = message.from_header(header.new(command_type, random_arg0, random_arg1))
assert instance.header.command == command_type
assert instance.header.arg0 == random_arg0
assert instance.header.arg1 == random_arg1
def test_from_header_raises_on_header_with_incorrect_payload_type(command_type, invalid_payload_type):
"""
Assert that :func:`~adbwp.message.from_header` raises a :class:`~ValueError` when given a payload
value that is an invalid type.
"""
with pytest.raises(ValueError):
message.from_header(header.new(command_type), data=invalid_payload_type)
def test_from_header_raises_on_data_payload_too_large(command_type, bytes_larger_than_maxdata):
"""
Assert that :func:`~adbwp.message.from_header` raises a :class:`~ValueError` when given a data payload
that is larger than :attr:`~adbwp.consts.MAXDATA`.
"""
with pytest.raises(ValueError):
message.from_header(header.new(command_type), data=bytes_larger_than_maxdata)
def test_connect_assigns_correct_header_field_values():
"""
Assert that :func:`~adbwp.message.connect` creates a :class:`~adbwp.message.Message` that
contains a header with the expected field values.
"""
instance = message.connect('', '')
assert instance.header.command == enums.Command.CNXN
assert instance.header.arg0 == consts.VERSION
assert instance.header.arg1 == consts.CONNECT_AUTH_MAXDATA
def test_connect_sets_system_identity_string_data_payload(random_serial, random_banner, system_type):
"""
Assert that :func:`~adbwp.message.connect` creates a :class:`~adbwp.message.Message` that
sets the data payload to a system identity string.
"""
expected = payload.system_identity_string(system_type, random_serial, random_banner)
instance = message.connect(random_serial, random_banner, system_type)
assert instance.header.data_length == len(expected)
assert instance.header.data_checksum == payload.checksum(expected)
assert instance.header.magic == header.magic(enums.Command.CNXN)
assert instance.data == expected
def test_connect_raises_on_system_identity_too_large(random_serial, system_type,
str_larger_than_connect_auth_max_data):
"""
Assert that :func:`~adbwp.message.connect` raises a :class:`~ValueError` when given a system identity
that is larger than :attr:`~adbwp.consts.CONNECT_AUTH_MAXDATA`.
"""
with pytest.raises(ValueError):
message.connect(random_serial, str_larger_than_connect_auth_max_data, system_type)
def test_auth_signature_assigns_correct_header_field_values():
"""
Assert that :func:`~adbwp.message.auth_signature` creates a :class:`~adbwp.message.Message` that
contains a header with the expected field values.
"""
instance = message.auth_signature(b'')
assert instance.header.command == enums.Command.AUTH
assert instance.header.arg0 == enums.AuthType.SIGNATURE
assert instance.header.arg1 == 0
def test_auth_signature_sets_signature_data_payload(random_signature):
"""
Assert that :func:`~adbwp.message.auth_signature` creates a :class:`~adbwp.message.Message` that
sets the data payload to the given signature bytes.
"""
expected = payload.as_bytes(random_signature)
instance = message.auth_signature(random_signature)
assert instance.header.data_length == len(expected)
assert instance.header.data_checksum == payload.checksum(expected)
assert instance.header.magic == header.magic(enums.Command.AUTH)
assert instance.data == expected
def test_auth_signature_raises_on_signature_too_large(bytes_larger_than_connect_auth_max_data):
"""
Assert that :func:`~adbwp.message.auth_signature` raises a :class:`~ValueError` when given a signature
that is larger than :attr:`~adbwp.consts.CONNECT_AUTH_MAXDATA`.
"""
with pytest.raises(ValueError):
message.auth_signature(bytes_larger_than_connect_auth_max_data)
def test_auth_rsa_public_key_assigns_correct_header_field_values():
"""
Assert that :func:`~adbwp.message.auth_rsa_public_key` creates a :class:`~adbwp.message.Message` that
contains a header with the expected field values.
"""
instance = message.auth_rsa_public_key(b'')
assert instance.header.command == enums.Command.AUTH
assert instance.header.arg0 == enums.AuthType.RSAPUBLICKEY
assert instance.header.arg1 == 0
def test_auth_rsa_public_key_sets_public_key_data_payload(random_rsa_public_key):
"""
Assert that :func:`~adbwp.message.auth_rsa_public_key` creates a :class:`~adbwp.message.Message` that
sets the data payload to the given RSA public key bytes.
"""
expected = payload.null_terminate(random_rsa_public_key)
instance = message.auth_rsa_public_key(random_rsa_public_key)
assert instance.header.data_length == len(expected)
assert instance.header.data_checksum == payload.checksum(expected)
assert instance.header.magic == header.magic(enums.Command.AUTH)
assert instance.data == expected
def test_auth_rsa_public_key_raises_on_public_key_too_large(bytes_larger_than_connect_auth_max_data):
"""
Assert that :func:`~adbwp.message.auth_rsa_public_key` raises a :class:`~ValueError` when given a public key
that is larger than :attr:`~adbwp.consts.CONNECT_AUTH_MAXDATA`.
"""
with pytest.raises(ValueError):
message.auth_rsa_public_key(bytes_larger_than_connect_auth_max_data)
def test_open_assigns_correct_header_field_values(random_local_id):
"""
Assert that :func:`~adbwp.message.open` creates a :class:`~adbwp.message.Message` that
contains a header with the expected field values.
"""
instance = message.open(random_local_id, '')
assert instance.header.command == enums.Command.OPEN
assert instance.header.arg0 == random_local_id
assert instance.header.arg1 == 0
def test_open_sets_destination_data_payload(random_local_id, random_destination):
"""
Assert that :func:`~adbwp.message.open` creates a :class:`~adbwp.message.Message` that
sets the data payload to the given stream destination.
"""
expected = payload.null_terminate(random_destination)
instance = message.open(random_local_id, random_destination)
assert instance.header.data_length == len(expected)
assert instance.header.data_checksum == payload.checksum(expected)
assert instance.header.magic == header.magic(enums.Command.OPEN)
assert instance.data == expected
def test_open_raises_on_zero_local_id(random_destination):
"""
Assert that :func:`~adbwp.message.open` raises a :class:`~ValueError` when given
a local id value that is zero.
"""
with pytest.raises(ValueError):
message.open(0, random_destination)
def test_open_raises_on_destination_too_large(random_local_id, bytes_larger_than_maxdata):
"""
Assert that :func:`~adbwp.message.open` raises a :class:`~ValueError` when a destination
that is larger than :attr:`~adbwp.consts.MAXDATA`.
"""
with pytest.raises(ValueError):
message.open(random_local_id, bytes_larger_than_maxdata)
def test_ready_assigns_correct_header_field_values(random_local_id, random_remote_id):
"""
Assert that :func:`~adbwp.message.ready` creates a :class:`~adbwp.message.Message` that
contains a header with the expected field values.
"""
instance = message.ready(random_local_id, random_remote_id)
assert instance.header.command == enums.Command.OKAY
assert instance.header.arg0 == random_local_id
assert instance.header.arg1 == random_remote_id
def test_ready_assigns_empty_data_payload(random_local_id, random_remote_id):
"""
Assert that :func:`~adbwp.message.ready` creates a :class:`~adbwp.message.Message` that
does not have a data payload.
"""
expected = b''
instance = message.ready(random_local_id, random_remote_id)
assert instance.header.data_length == len(expected)
assert instance.header.data_checksum == payload.checksum(expected)
assert instance.header.magic == header.magic(enums.Command.OKAY)
assert instance.data == expected
def test_ready_raises_on_zero_local_id(random_remote_id):
"""
Assert that :func:`~adbwp.message.ready` raises a :class:`~ValueError` when given
a local id value that is zero.
"""
with pytest.raises(ValueError):
message.ready(0, random_remote_id)
def test_ready_raises_on_zero_remote_id(random_local_id):
"""
Assert that :func:`~adbwp.message.ready` raises a :class:`~ValueError` when given
a local id value that is zero.
"""
with pytest.raises(ValueError):
message.ready(random_local_id, 0)
def test_write_assigns_correct_header_field_values(random_local_id, random_remote_id, valid_payload):
"""
Assert that :func:`~adbwp.message.write` creates a :class:`~adbwp.message.Message` that
contains a header with the expected field values.
"""
instance = message.write(random_local_id, random_remote_id, valid_payload)
assert instance.header.command == enums.Command.WRTE
assert instance.header.arg0 == random_local_id
assert instance.header.arg1 == random_remote_id
def test_write_assigns_given_data_payload(random_local_id, random_remote_id, valid_payload, valid_payload_bytes):
"""
Assert that :func:`~adbwp.message.write` creates a :class:`~adbwp.message.Message` that
sets the data payload.
"""
instance = message.write(random_local_id, random_remote_id, valid_payload)
assert instance.header.data_length == len(valid_payload_bytes)
assert instance.header.data_checksum == payload.checksum(valid_payload_bytes)
assert instance.header.magic == header.magic(enums.Command.WRTE)
assert instance.data == valid_payload_bytes
def test_write_raises_on_empty_data_payload(random_local_id, random_remote_id):
"""
Assert that :func:`~adbwp.message.write` raises a :class:`~ValueError` when given
an empty data payload.
"""
with pytest.raises(ValueError):
message.write(random_local_id, random_remote_id, b'')
def test_write_raises_on_data_payload_too_large(random_local_id, random_remote_id, bytes_larger_than_maxdata):
"""
Assert that :func:`~adbwp.message.write` raises a :class:`~ValueError` when given a data payload
that is larger than :attr:`~adbwp.consts.MAXDATA`.
"""
with pytest.raises(ValueError):
message.write(random_local_id, random_remote_id, bytes_larger_than_maxdata)
def test_close_assigns_correct_header_field_values(random_local_id, random_remote_id):
"""
Assert that :func:`~adbwp.message.close` creates a :class:`~adbwp.message.Message` that
contains a header with the expected field values.
"""
instance = message.close(random_local_id, random_remote_id)
assert instance.header.command == enums.Command.CLSE
assert instance.header.arg0 == random_local_id
assert instance.header.arg1 == random_remote_id
def test_close_assigns_no_data_payload(random_local_id, random_remote_id):
"""
Assert that :func:`~adbwp.message.close` creates a :class:`~adbwp.message.Message` that
has no data payload.
"""
expected = b''
instance = message.close(random_local_id, random_remote_id)
assert instance.header.data_length == len(expected)
assert instance.header.data_checksum == payload.checksum(expected)
assert instance.header.magic == header.magic(enums.Command.CLSE)
assert instance.data == expected
def test_close_raises_on_zero_remote_id(random_local_id):
"""
Assert that :func:`~adbwp.message.ready` raises a :class:`~ValueError` when given
a local id value that is zero.
"""
with pytest.raises(ValueError):
message.close(random_local_id, 0)
| 41.268293
| 113
| 0.746979
| 2,056
| 15,228
| 5.261187
| 0.053988
| 0.085421
| 0.10539
| 0.059721
| 0.88703
| 0.854303
| 0.800314
| 0.75936
| 0.716372
| 0.698068
| 0
| 0.003178
| 0.152811
| 15,228
| 368
| 114
| 41.380435
| 0.835284
| 0.309364
| 0
| 0.484076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.420382
| 1
| 0.216561
| false
| 0
| 0.012739
| 0
| 0.229299
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1cf5b1615b2afad999e741ad9427f7d5021ef904
| 15,141
|
py
|
Python
|
tests/unit/pypyr/steps/assert_test.py
|
FooBarQuaxx/pypyr
|
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/pypyr/steps/assert_test.py
|
FooBarQuaxx/pypyr
|
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/pypyr/steps/assert_test.py
|
FooBarQuaxx/pypyr
|
ebe56b2200a53e2f38c78bbb42d466bb1556c37c
|
[
"Apache-2.0"
] | null | null | null |
"""assert.py unit tests."""
import importlib
import pytest
from pypyr.context import Context
from pypyr.errors import KeyNotInContextError, KeyInContextHasNoValueError
# loading assert dynamically because it clashes with built-in assert
assert_step = importlib.import_module('pypyr.steps.assert')
def test_assert_raises_on_empty_context():
"""Context must exist."""
with pytest.raises(AssertionError):
assert_step.run_step(Context())
def test_assert_raises_on_missing_assert():
"""Assert this must exist."""
context = Context({'k1': 'v1'})
with pytest.raises(KeyNotInContextError):
assert_step.run_step(context)
def test_assert_raises_on_empty_assert():
"""Assert can't be empty."""
context = Context({'assert': None})
with pytest.raises(KeyInContextHasNoValueError):
assert_step.run_step(context)
def test_assert_raises_on_empty_assertthis():
"""Assert this must not be empty."""
context = Context({'assert': {'this': None}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == "assert None evaluated to False."
def test_assert_raises_on_assertthis_false():
"""Assert this boolean False raises."""
context = Context({'assert': {'this': False}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == "assert False evaluated to False."
def test_assert_passes_on_assertthis_true():
"""Assert this boolean True passes."""
context = Context({'assert': {'this': True}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_int():
"""Assert this int 1 is True."""
context = Context({'assert': {'this': 1}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_arb_int():
"""Assert this non-0 int is True."""
context = Context({'assert': {'this': 55}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_arb_negative_int():
"""Assert this non-0 int is True."""
context = Context({'assert': {'this': -55}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_float():
"""Assert this non 0 float is True."""
context = Context({'assert': {'this': 3.5}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_false_string():
"""Assert this arbitrary string isn't True raises."""
context = Context({'assert': {'this': 'arb string'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == "assert arb string evaluated to False."
def test_assert_raises_on_assertthis_false_int():
"""Assert this int 0 is False."""
context = Context({'assert': {'this': 0}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == "assert 0 evaluated to False."
def test_assert_passes_on_assertthis_true_string():
"""Assert this boolean string to True passes."""
context = Context({'assert': {'this': 'True'}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals():
"""Assert this does not equal assertEquals."""
context = Context({'assert': {
'this': 'boom',
'equals': 'BOOM'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type "
"str and does not equal assert['equals'] of type str.")
def test_assert_passes_on_assertthis_equals():
"""Assert this equals assertEquals."""
context = Context({'assert': {'this': 'boom',
'equals': 'boom'}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_equals_bools():
"""Assert this equals assertEquals true bools."""
context = Context({'assert': {'this': True,
'equals': True}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_equals_bools_false():
"""Assert this equals assertEquals false bools."""
context = Context({'assert':
{'this': False,
'equals': False}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_bools():
"""Assert this does not equal assertEquals bools."""
context = Context({'assert': {'this': True,
'equals': False}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type bool and does "
"not equal assert['equals'] of type bool.")
def test_assert_passes_on_assertthis_equals_ints():
"""Assert this equals assertEquals true ints."""
context = Context({'assert': {'this': 33,
'equals': 33}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_ints():
"""Assert this does not equal assertEquals ints."""
context = Context({'assert': {'this': 0,
'equals': 23}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type int and does "
"not equal assert['equals'] of type int.")
def test_assert_passes_on_assertthis_equals_floats():
"""Assert this equals assertEquals true ints."""
context = Context({'assert': {'this': 123.45,
'equals': 123.45}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_floats():
"""Assert this does not equal assertEquals ints."""
context = Context({'assert': {'this': 123.45,
'equals': 5.432}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type float and "
"does not equal assert['equals'] of type float.")
def test_assert_raises_on_assertthis_not_equals_string_to_int():
"""Assert this does not equal assertEquals string to int conversion."""
context = Context({'assert': {'this': '23',
'equals': 23}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type str and does "
"not equal assert['equals'] of type int.")
def test_assert_raises_on_assertthis_not_equals_string_to_bool():
"""Assert this string does not equal assertEquals bool."""
context = Context({'assert': {'this': True,
'equals': 'True'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type bool and does "
"not equal assert['equals'] of type str.")
def test_assert_passes_on_assertthis_equals_lists():
"""Assert this equals assertEquals true list."""
context = Context({'assert': {'this': [1, 2, 3, 4.5],
'equals': [1, 2, 3, 4.5]}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_lists():
"""Assert this string does not equal assertEquals list."""
context = Context({'assert': {'this': [1, 2, 8, 4.5],
'equals': [1, 2, 3, 4.5]}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type list and does "
"not equal assert['equals'] of type list.")
def test_assert_passes_on_assertthis_equals_dicts():
"""Assert this equals assertEquals true dict."""
context = Context({'assert': {
'this': {'k1': 1, 'k2': [2, 3], 'k3': False},
'equals': {'k1': 1, 'k2': [2, 3], 'k3': False}}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_dict_to_list():
"""Assert this string does not equal assertEquals dict."""
context = Context({'assert': {'this': {'k1': 1, 'k2': [2, 3], 'k3': False},
'equals': [1, 2, 3, 4.5]}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type dict and does "
"not equal assert['equals'] of type list.")
def test_assert_raises_on_assertthis_not_equals_dict_to_dict():
"""Assert this string does not equal assertEquals dict."""
context = Context({'assert': {
'this': {'k1': 1, 'k2': [2, 3], 'k3': False},
'equals': {'k1': 1, 'k2': [2, 55], 'k3': False}}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type dict and does "
"not equal assert['equals'] of type dict.")
# ---------------------- substitutions ----------------------------------------
def test_assert_passes_on_assertthis_equals_ints_substitutions():
"""Assert this equals assertEquals true ints with substitutions."""
context = Context({'k1': 33,
'k2': 33,
'assert': {'this': '{k1}',
'equals': '{k2}'}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_ints_substitutions():
"""Assert this string does not equal assertEquals int."""
context = Context({'k1': 33,
'k2': 34,
'assert': {'this': '{k1}',
'equals': '{k2}'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type int and does "
"not equal assert['equals'] of type int.")
def test_assert_passes_on_assertthis_not_equals_bools_substitutions():
"""Format expressions doesn't equivocate string True and bool True."""
context = Context({'k1': True,
'k2': 'True',
'assert': {'this': '{k1}',
'equals': '{k2}'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type bool and does "
"not equal assert['equals'] of type str.")
def test_assert_passes_on_assertthis_not_equals_none_substitutions():
"""None equals None."""
context = Context({'k1': None,
'k2': None,
'assert': {'this': '{k1}',
'equals': '{k2}'}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_true_substitutions():
"""Format expressions equivocates string True and bool True."""
context = Context({'k1': True,
'k2': 'True',
'assert': {'this': '{k1}'}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_none_substitutions():
"""Assert this string does not equal assertEquals with a None."""
context = Context({'k1': None,
'k2': 34,
'assert': {'this': '{k1}',
'equals': '{k2}'}})
with pytest.raises(AssertionError):
assert_step.run_step(context)
def test_assert_raises_on_assertthis_bool_substitutions():
"""Assert this string substituted bool evaluates False."""
context = Context({'k1': False,
'k2': 34,
'assert': {'this': '{k1}'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == "assert {k1} evaluated to False."
def test_assert_raises_on_assertthis_substitutions_int():
"""Format expressions doesn't equivocates int 0 and bool True."""
context = Context({'k1': 0,
'k2': 'True',
'assert': {'this': '{k1}'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == "assert {k1} evaluated to False."
def test_assert_assertthis_int_1_is_true():
"""Format expressions equivocates int 1 and bool True."""
context = Context({'k1': 1,
'k2': 'True',
'assert': {'this': '{k1}'}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_none_substitutions():
"""Assert this string substituted None evaluates False."""
context = Context({'k1': None,
'k2': 34,
'assert': {'this': '{k1}'}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == "assert {k1} evaluated to False."
def test_assert_passes_on_assertthis_equals_dicts_substitutions():
"""Assert this equals assertEquals true dict."""
context = Context({'k1': 'v1',
'k2': 'v1',
'assert': {'this': {'k1': 1,
'k2': [2, '{k1}'],
'k3': False},
'equals': {'k1': 1,
'k2': [2, '{k2}'],
'k3': False}}})
assert_step.run_step(context)
def test_assert_passes_on_assertthis_equals_dict_substitutions():
"""Assert this equals assertEquals true dict."""
context = Context({'k1': 'v1',
'k2': 'v1',
'dict1': {'k1': 1,
'k2': [2, '{k1}'],
'k3': False},
'dict2': {'k1': 1,
'k2': [2, '{k1}'],
'k3': False},
'assert': {'this': '{dict1}',
'equals': '{dict2}'}})
assert_step.run_step(context)
def test_assert_raises_on_assertthis_not_equals_dict_to_dict_substitutions():
"""Assert this string does not equal assertEquals dict."""
context = Context({'k1': 'v1',
'k2': 'v2',
'assert': {'this': {'k1': 1,
'k2': [2, '{k1}'],
'k3': False},
'equals': {'k1': 1,
'k2': [2, '{k2}'],
'k3': False}}})
with pytest.raises(AssertionError) as err_info:
assert_step.run_step(context)
assert str(err_info.value) == (
"assert assert['this'] is of type dict and does "
"not equal assert['equals'] of type dict.")
| 36.136038
| 79
| 0.586091
| 1,763
| 15,141
| 4.80658
| 0.061259
| 0.101487
| 0.064432
| 0.084258
| 0.863701
| 0.813901
| 0.774251
| 0.738612
| 0.684329
| 0.67536
| 0
| 0.019219
| 0.274883
| 15,141
| 418
| 80
| 36.222488
| 0.752619
| 0.131894
| 0
| 0.613636
| 0
| 0
| 0.163147
| 0
| 0
| 0
| 0
| 0
| 0.715909
| 1
| 0.159091
| false
| 0.07197
| 0.018939
| 0
| 0.17803
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
1c01df6e0bef83aeebdd2e0384a0e0b56116cdfb
| 6,777
|
py
|
Python
|
cluster/kmeans/k_means_cardio_stats.py
|
travisMichael/unsupervisedLearning
|
f01bd4e36833de4917811e51042e3937510e2701
|
[
"MIT"
] | null | null | null |
cluster/kmeans/k_means_cardio_stats.py
|
travisMichael/unsupervisedLearning
|
f01bd4e36833de4917811e51042e3937510e2701
|
[
"MIT"
] | null | null | null |
cluster/kmeans/k_means_cardio_stats.py
|
travisMichael/unsupervisedLearning
|
f01bd4e36833de4917811e51042e3937510e2701
|
[
"MIT"
] | null | null | null |
from time import time
from sklearn import metrics
from sklearn.cluster import KMeans
from utils import load_data
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics.cluster import homogeneity_score
from sklearn.decomposition import PCA, FastICA
from sklearn.random_projection import GaussianRandomProjection
# https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html
def run_k_means_on_cardiovascular_data(path):
data_set = 'cardio'
x_train, y_train = load_data(path + 'data/' + data_set + '/train/')
# X, y = load_data(path + 'data/' + data_set + '/train/')
f = open("cardiovascular_stats.txt","w+")
bench_k_means("1", x_train, y_train, 1, f, 1)
bench_k_means("2", x_train, y_train, 2, f, 1)
bench_k_means("3", x_train, y_train, 3, f, 1)
bench_k_means("4", x_train, y_train, 4, f, 1)
bench_k_means("5", x_train, y_train, 5, f, 1)
bench_k_means("6", x_train, y_train, 6, f, 1)
bench_k_means("7", x_train, y_train, 7, f, 1)
bench_k_means("8", x_train, y_train, 8, f, 1)
bench_k_means("9", x_train, y_train, 9, f, 1)
bench_k_means("10", x_train, y_train, 10, f, 1)
bench_k_means("11", x_train, y_train, 11, f, 1)
bench_k_means("12", x_train, y_train, 12, f, 1)
bench_k_means("13", x_train, y_train, 13, f, 1)
bench_k_means("14", x_train, y_train, 14, f, 1)
bench_k_means("15", x_train, y_train, 15, f, 1)
f.close()
def run_k_means_on_pca_cardiovascular_data(path):
data_set = 'cardio'
x_train, y_train = load_data(path + 'data/' + data_set + '/train/')
# X, y = load_data(path + 'data/' + data_set + '/train/')
pca = PCA(n_components=5)
pca_x_train = pca.fit_transform(x_train)
f = open("cardiovascular_pca_stats.txt","w+")
bench_k_means("1", pca_x_train, y_train, 1, f, 1)
bench_k_means("2", pca_x_train, y_train, 2, f, 1)
bench_k_means("3", pca_x_train, y_train, 3, f, 1)
bench_k_means("4", pca_x_train, y_train, 4, f, 1)
bench_k_means("5", pca_x_train, y_train, 5, f, 1)
bench_k_means("6", pca_x_train, y_train, 6, f, 1)
bench_k_means("7", pca_x_train, y_train, 7, f, 1)
bench_k_means("8", pca_x_train, y_train, 8, f, 1)
bench_k_means("9", pca_x_train, y_train, 9, f, 1)
bench_k_means("10", pca_x_train, y_train, 10, f, 1)
bench_k_means("11", pca_x_train, y_train, 11, f, 1)
bench_k_means("12", pca_x_train, y_train, 12, f, 1)
bench_k_means("13", pca_x_train, y_train, 13, f, 1)
bench_k_means("14", pca_x_train, y_train, 14, f, 1)
bench_k_means("15", pca_x_train, y_train, 15, f, 1)
f.close()
def run_k_means_on_random_projections_cardiovascular_data(path):
data_set = 'cardio'
x_train, y_train = load_data(path + 'data/' + data_set + '/train/')
# X, y = load_data(path + 'data/' + data_set + '/train/')
pca = GaussianRandomProjection(n_components=5)
pca_x_train = pca.fit_transform(x_train)
f = open("cardiovascular_random_projections_stats.txt","w+")
bench_k_means("1", pca_x_train, y_train, 1, f, 1)
bench_k_means("2", pca_x_train, y_train, 2, f, 1)
bench_k_means("3", pca_x_train, y_train, 3, f, 1)
bench_k_means("4", pca_x_train, y_train, 4, f, 1)
bench_k_means("5", pca_x_train, y_train, 5, f, 1)
bench_k_means("6", pca_x_train, y_train, 6, f, 1)
bench_k_means("7", pca_x_train, y_train, 7, f, 1)
bench_k_means("8", pca_x_train, y_train, 8, f, 1)
bench_k_means("9", pca_x_train, y_train, 9, f, 1)
bench_k_means("10", pca_x_train, y_train, 10, f, 1)
bench_k_means("11", pca_x_train, y_train, 11, f, 1)
bench_k_means("12", pca_x_train, y_train, 12, f, 1)
bench_k_means("13", pca_x_train, y_train, 13, f, 1)
bench_k_means("14", pca_x_train, y_train, 14, f, 1)
bench_k_means("15", pca_x_train, y_train, 15, f, 1)
f.close()
def run_k_means_on_ica_cardiovascular_data(path):
data_set = 'cardio'
x_train, y_train = load_data(path + 'data/' + data_set + '/train/')
# X, y = load_data(path + 'data/' + data_set + '/train/')
pca = FastICA(n_components=5)
pca_x_train = pca.fit_transform(x_train)
f = open("cardiovascular_ica_stats.txt","w+")
bench_k_means("1", pca_x_train, y_train, 1, f, 1)
bench_k_means("2", pca_x_train, y_train, 2, f, 1)
bench_k_means("3", pca_x_train, y_train, 3, f, 1)
bench_k_means("4", pca_x_train, y_train, 4, f, 1)
bench_k_means("5", pca_x_train, y_train, 5, f, 1)
bench_k_means("6", pca_x_train, y_train, 6, f, 1)
bench_k_means("7", pca_x_train, y_train, 7, f, 1)
bench_k_means("8", pca_x_train, y_train, 8, f, 1)
bench_k_means("9", pca_x_train, y_train, 9, f, 1)
bench_k_means("10", pca_x_train, y_train, 10, f, 1)
bench_k_means("11", pca_x_train, y_train, 11, f, 1)
bench_k_means("12", pca_x_train, y_train, 12, f, 1)
bench_k_means("13", pca_x_train, y_train, 13, f, 1)
bench_k_means("14", pca_x_train, y_train, 14, f, 1)
bench_k_means("15", pca_x_train, y_train, 15, f, 1)
f.close()
def bench_k_means(name, data, labels, k, f, iterations):
time_list = []
inertia_list = []
homogeneity_list = []
for i in range(iterations):
t0 = time()
estimator = KMeans(n_clusters=k, random_state=0)
estimator.fit(data)
inertia_list.append(estimator.inertia_)
homogeneity_list.append(metrics.homogeneity_score(labels, estimator.labels_))
time_list.append(time() - t0)
f.write('%-9s\t%.3f\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\n'
% (name,
np.sum(time_list) / iterations,
np.sum(inertia_list) / iterations,
np.sum(homogeneity_list) / iterations,
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
0.0))
print('%-9s\t%.3f\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
0.0))
if __name__ == "__main__":
# train_neural_net('../', False)
# run_k_means_on_pca_cardiovascular_data('../../')
run_k_means_on_random_projections_cardiovascular_data('../../')
# run_k_means_on_ica_cardiovascular_data('../../')
# run_k_means_on_cardiovascular_data('../../')
| 41.833333
| 85
| 0.655305
| 1,179
| 6,777
| 3.391009
| 0.090755
| 0.105053
| 0.112056
| 0.192096
| 0.8009
| 0.80015
| 0.768134
| 0.745373
| 0.729615
| 0.729615
| 0
| 0.046626
| 0.193006
| 6,777
| 161
| 86
| 42.093168
| 0.684403
| 0.07009
| 0
| 0.555556
| 0
| 0.015873
| 0.06405
| 0.03576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039683
| false
| 0
| 0.071429
| 0
| 0.111111
| 0.007937
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c208a6c20989a1c2908ebddaefde4384cf6e38f
| 3,988
|
py
|
Python
|
test/unit/test_depfixer.py
|
thomasrockhu/bfg9000
|
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
|
[
"BSD-3-Clause"
] | 72
|
2015-06-23T02:35:13.000Z
|
2021-12-08T01:47:40.000Z
|
test/unit/test_depfixer.py
|
thomasrockhu/bfg9000
|
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
|
[
"BSD-3-Clause"
] | 139
|
2015-03-01T18:48:17.000Z
|
2021-06-18T15:45:14.000Z
|
test/unit/test_depfixer.py
|
thomasrockhu/bfg9000
|
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
|
[
"BSD-3-Clause"
] | 19
|
2015-12-23T21:24:33.000Z
|
2022-01-06T04:04:41.000Z
|
from io import StringIO
from . import *
from bfg9000 import depfixer
class TestEmitDeps(TestCase):
def test_empty_deps(self):
instream = StringIO('foo:\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), '')
def test_single_dep(self):
instream = StringIO('foo: bar\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\n')
def test_multiple_deps(self):
instream = StringIO('foo: bar baz\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\nbaz:\n')
def test_multiline_deps(self):
instream = StringIO('foo: bar \\\nbaz\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\nbaz:\n')
def test_multiple_targets(self):
instream = StringIO('foo bar: baz quux\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'baz:\nquux:\n')
def test_multiple_rules(self):
instream = StringIO('foo: bar\nbaz: quux\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\nquux:\n')
def test_windows_paths(self):
instream = StringIO('c:\\foo c:\\bar: c:\\baz c:\\quux\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'c:\\baz:\nc:\\quux:\n')
def test_leading_spaces(self):
instream = StringIO(' foo: bar\n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\n')
def test_trailing_spaces(self):
instream = StringIO('foo : bar \n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'bar:\n')
def test_many_spaces(self):
instream = StringIO(' foo bar : baz \n')
outstream = StringIO()
depfixer.emit_deps(instream, outstream)
self.assertEqual(outstream.getvalue(), 'baz:\n')
def test_unexpected_newline(self):
instream = StringIO('foo\n')
outstream = StringIO()
self.assertRaises(depfixer.UnexpectedTokenError, depfixer.emit_deps,
instream, outstream)
instream = StringIO('foo \n')
outstream = StringIO()
self.assertRaises(depfixer.UnexpectedTokenError, depfixer.emit_deps,
instream, outstream)
def test_unexpected_colon(self):
instream = StringIO('foo: :\n')
outstream = StringIO()
self.assertRaises(depfixer.UnexpectedTokenError, depfixer.emit_deps,
instream, outstream)
instream = StringIO('foo: bar :\n')
outstream = StringIO()
self.assertRaises(depfixer.UnexpectedTokenError, depfixer.emit_deps,
instream, outstream)
instream = StringIO('foo: bar:\n')
outstream = StringIO()
self.assertRaises(depfixer.UnexpectedTokenError, depfixer.emit_deps,
instream, outstream)
def test_unexpected_eof(self):
instream = StringIO('foo: bar')
outstream = StringIO()
self.assertRaises(depfixer.ParseError, depfixer.emit_deps, instream,
outstream)
instream = StringIO('foo:')
outstream = StringIO()
self.assertRaises(depfixer.ParseError, depfixer.emit_deps, instream,
outstream)
instream = StringIO('foo: c:\\foo\\')
outstream = StringIO()
self.assertRaises(depfixer.ParseError, depfixer.emit_deps, instream,
outstream)
| 35.927928
| 76
| 0.618857
| 397
| 3,988
| 6.105793
| 0.118388
| 0.118812
| 0.118812
| 0.178218
| 0.8783
| 0.865924
| 0.823432
| 0.813944
| 0.813944
| 0.813944
| 0
| 0.001361
| 0.262788
| 3,988
| 110
| 77
| 36.254545
| 0.823129
| 0
| 0
| 0.629213
| 0
| 0
| 0.082999
| 0.005266
| 0
| 0
| 0
| 0
| 0.202247
| 1
| 0.146067
| false
| 0
| 0.033708
| 0
| 0.191011
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c2463eb3a21ae31490937beaec288216aa3f8ea
| 536
|
py
|
Python
|
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/responder/__init__.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | 2
|
2020-08-24T18:04:22.000Z
|
2020-08-24T18:04:47.000Z
|
nitro/resource/config/responder/__init__.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
nitro/resource/config/responder/__init__.py
|
HanseMerkur/nitro-python
|
d03eb11f492a35a2a8b2a140322fbce22d25a8f7
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['responderaction', 'responderglobal_binding', 'responderglobal_responderpolicy_binding', 'responderhtmlpage', 'responderparam', 'responderpolicy', 'responderpolicy_binding', 'responderpolicy_crvserver_binding', 'responderpolicy_csvserver_binding', 'responderpolicy_lbvserver_binding', 'responderpolicy_responderglobal_binding', 'responderpolicy_responderpolicylabel_binding', 'responderpolicylabel', 'responderpolicylabel_binding', 'responderpolicylabel_policybinding_binding', 'responderpolicylabel_responderpolicy_binding']
| 536
| 536
| 0.875
| 36
| 536
| 12.388889
| 0.333333
| 0.246637
| 0.210762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031716
| 536
| 1
| 536
| 536
| 0.859345
| 0
| 0
| 0
| 0
| 0
| 0.860335
| 0.709497
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c2bc749f187c7eed4b2e52f2d167111094537a8
| 32,080
|
py
|
Python
|
test/nn/test_multiple_module_flipsrotations.py
|
QUVA-Lab/escnn
|
59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882
|
[
"BSD-3-Clause"
] | 4
|
2022-03-16T22:51:39.000Z
|
2022-03-18T18:45:49.000Z
|
test/nn/test_multiple_module_flipsrotations.py
|
QUVA-Lab/escnn
|
59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882
|
[
"BSD-3-Clause"
] | null | null | null |
test/nn/test_multiple_module_flipsrotations.py
|
QUVA-Lab/escnn
|
59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from unittest import TestCase
from escnn.nn import *
from escnn.gspaces import *
import torch
import random
batchnormalizations = [
([('regular_bnorm', 'pointwise')], InnerBatchNorm),
([('g_bnorm', 'norm')], GNormBatchNorm),
([('norm_bnorm', 'norm')], NormBatchNorm),
([('indnorm_bnorm', 'induced_norm')], InducedNormBatchNorm),
]
allbatchnormalizations = []
for bn, _ in batchnormalizations:
allbatchnormalizations += bn
poolings = [
([('regular_mpool', 'pointwise')], PointwiseMaxPool),
([('norm_mpool', 'norm')], NormMaxPool),
]
allpoolings = []
for pl, _ in poolings:
allpoolings += pl
nonlinearities = [
([('p_relu', 'pointwise')], PointwiseNonLinearity),
([('p_sigmoid', 'pointwise')], PointwiseNonLinearity),
([('p_tanh', 'pointwise')], PointwiseNonLinearity),
([('c_relu', 'concatenated')], ConcatenatedNonLinearity),
([('c_sigmoid', 'concatenated')], ConcatenatedNonLinearity),
([('c_tanh', 'concatenated')], ConcatenatedNonLinearity),
([('n_relu', 'norm')], NormNonLinearity),
([('n_sigmoid', 'norm')], NormNonLinearity),
([('vectorfield', 'vectorfield')], VectorFieldNonLinearity),
([('gate', 'gate'), ('gated', 'gated')], GatedNonLinearity2),
]
allnonlinearities = []
for nl, _ in nonlinearities:
allnonlinearities += nl
convolutions = [
([('conv2d', 'any')], R2Conv),
]
allconvolutions = []
for cl, _ in convolutions:
allconvolutions += cl
allfunctions = allbatchnormalizations + allpoolings + allnonlinearities + allconvolutions
class TestNonLinearitiesFlipRotations(TestCase):
def test_dihedral_multiples_nonlinearities_sorted(self):
N = 8
g = flipRot2dOnR2(N)
reprs = []
labels = []
modules = []
gated = 0
for blocks, module in nonlinearities:
# print(blocks)
for name, type in blocks:
if name != 'gate':
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
if name == 'gated':
gated += 1
reprs = [g.trivial_repr] * gated + reprs
labels = ['gate'] * gated + labels
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in nonlinearities:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, function=blocks[0][0]), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_poolings_sorted(self):
N = 8
g = flipRot2dOnR2(N)
reprs = []
labels = []
modules = []
kernel = (3, 3)
for blocks, module in poolings:
# print(blocks)
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in poolings:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, kernel_size=kernel), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_batchnorm_sorted(self):
N = 8
g = flipRot2dOnR2(N)
M = N // 2
for m in range(M // 2 + 1):
g.induced_repr((0, M), g.fibergroup.subgroup((0, M))[0].irrep(1, m))
reprs = []
labels = []
modules = []
for blocks, module in batchnormalizations:
if module not in [NormBatchNorm, InducedNormBatchNorm]:
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
for r in g.representations.values():
if not r.contains_trivial():
for blocks, module in batchnormalizations:
if module == NormBatchNorm:
for name, type in blocks:
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
elif module == InducedNormBatchNorm:
for name, type in blocks:
if any(snl.startswith(type) for snl in r.supported_nonlinearities):
reprs.append(r)
labels.append(name)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in batchnormalizations:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.train()
b, c, h, w = 4, r.size, 30, 30
for i in range(20):
x = GeometricTensor(torch.randn(b, c, h, w), r)
nnl(x)
nnl.eval()
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_nonlinearities_shuffled(self):
N = 8
g = flipRot2dOnR2(N)
reprs = []
labels = []
modules = []
gated = 0
for blocks, module in nonlinearities:
# print(blocks)
for name, type in blocks:
if name != 'gate':
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
if name == 'gated':
gated += 1
reprs = [g.trivial_repr] * gated + reprs
labels = ['gate'] * gated + labels
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in nonlinearities:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, function=blocks[0][0]), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_poolings_shuffled(self):
N = 8
g = flipRot2dOnR2(N)
reprs = []
labels = []
modules = []
kernel = (3, 3)
for blocks, module in poolings:
# print(blocks)
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in poolings:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, kernel_size=kernel), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_batchnorm_shuffled(self):
N = 8
g = flipRot2dOnR2(N)
M = N // 2
for m in range(M // 2 + 1):
g.induced_repr((0, M), g.fibergroup.subgroup((0, M))[0].irrep(1, m))
reprs = []
labels = []
modules = []
for blocks, module in batchnormalizations:
if module not in [NormBatchNorm, InducedNormBatchNorm]:
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
for r in g.representations.values():
if not r.contains_trivial():
for blocks, module in batchnormalizations:
if module == NormBatchNorm:
for name, type in blocks:
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
elif module == InducedNormBatchNorm:
for name, type in blocks:
if any(snl.startswith(type) for snl in r.supported_nonlinearities):
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in batchnormalizations:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.train()
b, c, h, w = 4, r.size, 30, 30
for i in range(20):
x = GeometricTensor(torch.randn(b, c, h, w), r)
nnl(x)
nnl.eval()
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_nonlinearities_sort(self):
N = 8
g = flipRot2dOnR2(N)
reprs = []
labels = []
modules = []
gated = 0
for blocks, module in nonlinearities:
# print(blocks)
for i in range(3):
for name, type in blocks:
if name != 'gate':
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
if name == 'gated':
gated += 1
reprs = [g.trivial_repr] * gated + reprs
labels = ['gate'] * gated + labels
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in nonlinearities:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, function=blocks[0][0]), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=True)
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_poolings_sort(self):
N = 8
g = flipRot2dOnR2(N)
reprs = []
labels = []
modules = []
kernel = (3, 3)
for blocks, module in poolings:
# print(blocks)
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in poolings:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, kernel_size=kernel), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=True)
nnl.check_equivariance(full_space_action=False)
def test_dihedral_multiples_batchnorm_sort(self):
N = 8
g = flipRot2dOnR2(N)
M = N // 2
for m in range(M // 2 + 1):
g.induced_repr((0, M), g.fibergroup.subgroup((0, M))[0].irrep(1, m))
reprs = []
labels = []
modules = []
for blocks, module in batchnormalizations:
if module not in [NormBatchNorm, InducedNormBatchNorm]:
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
for r in g.representations.values():
if not r.contains_trivial():
for blocks, module in batchnormalizations:
if module == NormBatchNorm:
for name, type in blocks:
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
elif module == InducedNormBatchNorm:
for name, type in blocks:
if any(snl.startswith(type) for snl in r.supported_nonlinearities):
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in batchnormalizations:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=True)
nnl.train()
b, c, h, w = 4, r.size, 30, 30
for i in range(20):
x = GeometricTensor(torch.randn(b, c, h, w), r)
nnl(x)
nnl.eval()
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_nonlinearities_sorted(self):
N = 8
g = flipRot2dOnR2(-1, N)
reprs = []
labels = []
modules = []
gated = 0
for blocks, module in nonlinearities:
# print(blocks)
for name, type in blocks:
if name != 'gate':
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
if name == 'gated':
gated += 1
reprs = [g.trivial_repr] * gated + reprs
labels = ['gate'] * gated + labels
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in nonlinearities:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, function=blocks[0][0]), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_poolings_sorted(self):
N = 8
g = flipRot2dOnR2(-1, N)
reprs = []
labels = []
modules = []
kernel = (3, 3)
for blocks, module in poolings:
# print(blocks)
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in poolings:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, kernel_size=kernel), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_batchnorm_sorted(self):
N = 8
g = flipRot2dOnR2(-1, N)
for m in range(5):
g.induced_repr((None, -1), g.fibergroup.subgroup((None, -1))[0].irrep(m))
reprs = []
labels = []
modules = []
for blocks, module in batchnormalizations:
if module not in [NormBatchNorm, InducedNormBatchNorm]:
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
for r in g.representations.values():
if not r.contains_trivial():
for blocks, module in batchnormalizations:
if module == NormBatchNorm:
for name, type in blocks:
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
elif module == InducedNormBatchNorm:
for name, type in blocks:
if any(snl.startswith(type) for snl in r.supported_nonlinearities):
reprs.append(r)
labels.append(name)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in batchnormalizations:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.train()
b, c, h, w = 4, r.size, 30, 30
for i in range(20):
x = GeometricTensor(torch.randn(b, c, h, w), r)
nnl(x)
nnl.eval()
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_nonlinearities_shuffled(self):
N = 8
g = flipRot2dOnR2(-1, N)
reprs = []
labels = []
modules = []
gated = 0
for blocks, module in nonlinearities:
# print(blocks)
for name, type in blocks:
if name != 'gate':
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
if name == 'gated':
gated += 1
reprs = [g.trivial_repr] * gated + reprs
labels = ['gate'] * gated + labels
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in nonlinearities:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, function=blocks[0][0]), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_poolings_shuffled(self):
N = 8
g = flipRot2dOnR2(-1, N)
reprs = []
labels = []
modules = []
kernel = (3, 3)
for blocks, module in poolings:
# print(blocks)
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in poolings:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, kernel_size=kernel), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_batchnorm_shuffled(self):
N = 8
g = flipRot2dOnR2(-1, N)
for m in range(5):
g.induced_repr((None, -1), g.fibergroup.subgroup((None, -1))[0].irrep(m))
reprs = []
labels = []
modules = []
for blocks, module in batchnormalizations:
if module not in [NormBatchNorm, InducedNormBatchNorm]:
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
for r in g.representations.values():
if not r.contains_trivial():
for blocks, module in batchnormalizations:
if module == NormBatchNorm:
for name, type in blocks:
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
elif module == InducedNormBatchNorm:
for name, type in blocks:
if any(snl.startswith(type) for snl in r.supported_nonlinearities):
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in batchnormalizations:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=False)
nnl.train()
b, c, h, w = 4, r.size, 30, 30
for i in range(20):
x = GeometricTensor(torch.randn(b, c, h, w), r)
nnl(x)
nnl.eval()
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_nonlinearities_sort(self):
N = 8
g = flipRot2dOnR2(-1, N)
reprs = []
labels = []
modules = []
gated = 0
for blocks, module in nonlinearities:
# print(blocks)
for i in range(3):
for name, type in blocks:
if name != 'gate':
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
if name == 'gated':
gated += 1
reprs = [g.trivial_repr] * gated + reprs
labels = ['gate'] * gated + labels
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in nonlinearities:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, function=blocks[0][0]), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=True)
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_poolings_sort(self):
N = 8
g = flipRot2dOnR2(-1, N)
reprs = []
labels = []
modules = []
kernel = (3, 3)
for blocks, module in poolings:
# print(blocks)
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in poolings:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr, kernel_size=kernel), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=True)
nnl.check_equivariance(full_space_action=False)
def test_o2_multiples_batchnorm_sort(self):
N = 8
g = flipRot2dOnR2(-1, N)
for m in range(5):
g.induced_repr((None, -1), g.fibergroup.subgroup((None, -1))[0].irrep(m))
reprs = []
labels = []
modules = []
for blocks, module in batchnormalizations:
if module not in [NormBatchNorm, InducedNormBatchNorm]:
for name, type in blocks:
for r in g.representations.values():
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
for r in g.representations.values():
if not r.contains_trivial():
for blocks, module in batchnormalizations:
if module == NormBatchNorm:
for name, type in blocks:
if type in r.supported_nonlinearities:
reprs.append(r)
labels.append(name)
elif module == InducedNormBatchNorm:
for name, type in blocks:
if any(snl.startswith(type) for snl in r.supported_nonlinearities):
reprs.append(r)
labels.append(name)
t = list(zip(reprs, labels))
random.shuffle(t)
reprs, labels = zip(*t)
r = FieldType(g, reprs)
reprs_dict = r.group_by_labels(labels)
for blocks, module in batchnormalizations:
if all(l in reprs_dict for l, _ in blocks):
repr = tuple(reprs_dict[l] for l, _ in blocks)
if len(repr) == 1:
repr = repr[0]
lbs = [l for l, _ in blocks]
if len(lbs) == 1:
lbs = lbs[0]
modules.append((module(repr), lbs))
nnl = MultipleModule(r, labels, modules, reshuffle=True)
nnl.train()
b, c, h, w = 4, r.size, 30, 30
for i in range(20):
x = GeometricTensor(torch.randn(b, c, h, w), r)
nnl(x)
nnl.eval()
nnl.check_equivariance(full_space_action=False)
if __name__ == '__main__':
unittest.main()
| 32.970195
| 95
| 0.470854
| 3,306
| 32,080
| 4.462795
| 0.04265
| 0.045547
| 0.0366
| 0.04392
| 0.92619
| 0.925173
| 0.925173
| 0.925173
| 0.921174
| 0.921174
| 0
| 0.014354
| 0.435349
| 32,080
| 972
| 96
| 33.004115
| 0.800155
| 0.005206
| 0
| 0.908708
| 0
| 0
| 0.01163
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025281
| false
| 0
| 0.008427
| 0
| 0.035112
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c51268539037e05eb20f49d408790376f3fdd60
| 139
|
py
|
Python
|
exapi/rest/hitbtc/market_data/__init__.py
|
astsu-dev/exapi
|
1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12
|
[
"MIT"
] | null | null | null |
exapi/rest/hitbtc/market_data/__init__.py
|
astsu-dev/exapi
|
1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12
|
[
"MIT"
] | null | null | null |
exapi/rest/hitbtc/market_data/__init__.py
|
astsu-dev/exapi
|
1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12
|
[
"MIT"
] | null | null | null |
from exapi.rest.hitbtc.market_data.api import HitbtcMarketDataAPI
from exapi.rest.hitbtc.market_data.interface import IHitbtcMarketDataAPI
| 46.333333
| 72
| 0.884892
| 18
| 139
| 6.722222
| 0.611111
| 0.14876
| 0.214876
| 0.31405
| 0.479339
| 0.479339
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057554
| 139
| 2
| 73
| 69.5
| 0.923664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1c5a7b1a894ecba9075a1d8dea6195c95a40e0ba
| 57
|
py
|
Python
|
sem_seg/test.py
|
ChutianShen/pointnet_kitti
|
6ebd2c7c203c4fcc8172f306c85e55ea06429ba5
|
[
"MIT"
] | null | null | null |
sem_seg/test.py
|
ChutianShen/pointnet_kitti
|
6ebd2c7c203c4fcc8172f306c85e55ea06429ba5
|
[
"MIT"
] | null | null | null |
sem_seg/test.py
|
ChutianShen/pointnet_kitti
|
6ebd2c7c203c4fcc8172f306c85e55ea06429ba5
|
[
"MIT"
] | null | null | null |
import from_seg_to_bbox
from_seg_to_bbox.predict_test()
| 14.25
| 31
| 0.877193
| 11
| 57
| 3.909091
| 0.636364
| 0.325581
| 0.418605
| 0.604651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 57
| 4
| 31
| 14.25
| 0.811321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
98faa302a663e21d585fd37ffc09694b89724c39
| 101
|
py
|
Python
|
swin_transformer_pytorch/__init__.py
|
DUTxutengfei/swin-transformer-pytorch
|
8f0fe680e8d972ebf8df40eea04b669627533c87
|
[
"MIT"
] | 555
|
2021-03-27T18:38:44.000Z
|
2022-03-30T15:24:38.000Z
|
swin_transformer_pytorch/__init__.py
|
DUTxutengfei/swin-transformer-pytorch
|
8f0fe680e8d972ebf8df40eea04b669627533c87
|
[
"MIT"
] | 22
|
2021-03-29T07:28:26.000Z
|
2022-03-28T08:25:40.000Z
|
swin_transformer_pytorch/__init__.py
|
DUTxutengfei/swin-transformer-pytorch
|
8f0fe680e8d972ebf8df40eea04b669627533c87
|
[
"MIT"
] | 88
|
2021-03-28T02:44:43.000Z
|
2022-03-24T07:59:13.000Z
|
from swin_transformer_pytorch.swin_transformer import SwinTransformer, swin_t, swin_s, swin_b, swin_l
| 101
| 101
| 0.881188
| 16
| 101
| 5.125
| 0.625
| 0.365854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069307
| 101
| 1
| 101
| 101
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c704d3f017a17d6847696445a993a9483682daca
| 5,548
|
py
|
Python
|
services/tests/test_commands.py
|
City-of-Helsinki/opencity-profile
|
a430b562b9937f443d391475fabdc27068b95c49
|
[
"MIT"
] | 5
|
2020-03-17T15:56:17.000Z
|
2022-01-31T13:43:31.000Z
|
services/tests/test_commands.py
|
City-of-Helsinki/opencity-profile
|
a430b562b9937f443d391475fabdc27068b95c49
|
[
"MIT"
] | 337
|
2018-05-21T08:35:05.000Z
|
2022-03-14T07:38:15.000Z
|
services/tests/test_commands.py
|
City-of-Helsinki/opencity-profile
|
a430b562b9937f443d391475fabdc27068b95c49
|
[
"MIT"
] | 10
|
2019-08-05T08:16:06.000Z
|
2021-08-06T15:08:44.000Z
|
from io import StringIO
import pytest
from django.core.management import call_command, CommandError
from guardian.shortcuts import assign_perm
from services.models import Service
from utils.utils import SERVICES
def test_command_generate_services_adds_all_services():
assert Service.objects.count() == 0
call_command("generate_services")
assert Service.objects.count() == len(SERVICES)
@pytest.mark.parametrize("service__name", ["berth"])
def test_command_generate_services_adds_only_missing_services(service):
assert Service.objects.count() == 1
call_command("generate_services")
assert Service.objects.count() == len(SERVICES)
def test_command_add_object_permissions_with_correct_arguments_output(
user, service, group
):
user.groups.add(group)
assert not user.has_perm("can_view_profiles", service)
assert not user.has_perm("can_manage_profiles", service)
out = StringIO()
args = [
service.name,
group.name,
"can_view_profiles",
]
call_command("add_object_permission", *args, stdout=out)
args = [
service.name,
group.name,
"can_manage_profiles",
]
call_command("add_object_permission", *args, stdout=out)
assert (
f"Permission can_view_profiles added for {group.name} on service {service.name}"
in out.getvalue()
)
assert (
f"Permission can_manage_profiles added for {group.name} on service {service.name}"
in out.getvalue()
)
assert user.has_perm("can_view_profiles", service)
assert user.has_perm("can_manage_profiles", service)
def test_command_add_object_permissions_errors_out_when_invalid_permission_given(
user, service, group
):
user.groups.add(group)
args = [
service.name,
group.name,
"can_manage_profiles_invalid",
]
with pytest.raises(CommandError, match="Invalid permission given"):
call_command("add_object_permission", *args)
assert not user.has_perm("can_manage_profiles_invalid", service)
def test_command_add_object_permissions_errors_out_when_invalid_group_name_given(
user, service, group
):
user.groups.add(group)
args = [
service.name,
"InvalidGroup",
"can_manage_profiles",
]
with pytest.raises(CommandError, match="Invalid group_name given"):
call_command("add_object_permission", *args)
assert not user.has_perm("can_manage_profiles", service)
def test_command_add_object_permissions_errors_out_when_invalid_service_given(
user, service, group
):
user.groups.add(group)
args = [
"INVALID",
group.name,
"can_manage_profiles",
]
with pytest.raises(CommandError, match="Invalid service given"):
call_command("add_object_permission", *args)
assert not user.has_perm("can_manage_profiles", service)
def test_command_remove_object_permissions_with_correct_arguments_output(
user, service, group
):
user.groups.add(group)
assign_perm("can_view_profiles", group, service)
assign_perm("can_manage_profiles", group, service)
assert user.has_perm("can_view_profiles", service)
assert user.has_perm("can_manage_profiles", service)
out = StringIO()
args = [
service.name,
group.name,
"can_view_profiles",
]
call_command("remove_object_permission", *args, stdout=out)
args = [
service.name,
group.name,
"can_manage_profiles",
]
call_command("remove_object_permission", *args, stdout=out)
assert (
f"Permission can_view_profiles removed for {group.name} on service {service.name}"
in out.getvalue()
)
assert (
f"Permission can_manage_profiles removed for {group.name} on service {service.name}"
in out.getvalue()
)
assert not user.has_perm("can_view_profiles", service)
assert not user.has_perm("can_manage_profiles", service)
def test_command_remove_object_permissions_errors_out_when_invalid_permission_given(
user, service, group
):
user.groups.add(group)
assign_perm("can_manage_profiles", group, service)
assert user.has_perm("can_manage_profiles", service)
args = [
service.name,
group.name,
"can_manage_profiles_invalid",
]
with pytest.raises(CommandError, match="Invalid permission given"):
call_command("remove_object_permission", *args)
assert user.has_perm("can_manage_profiles", service)
def test_command_remove_object_permissions_errors_out_when_invalid_group_name_given(
user, service, group
):
user.groups.add(group)
assign_perm("can_manage_profiles", group, service)
assert user.has_perm("can_manage_profiles", service)
args = [
service.name,
"InvalidGroup",
"can_manage_profiles",
]
with pytest.raises(CommandError, match="Invalid group_name given"):
call_command("remove_object_permission", *args)
assert user.has_perm("can_manage_profiles", service)
def test_command_remove_object_permissions_errors_out_when_invalid_service_given(
user, service, group
):
user.groups.add(group)
assign_perm("can_manage_profiles", group, service)
assert user.has_perm("can_manage_profiles", service)
args = [
"INVALID",
group.name,
"can_manage_profiles",
]
with pytest.raises(CommandError, match="Invalid service given"):
call_command("remove_object_permission", *args)
assert user.has_perm("can_manage_profiles", service)
| 31.522727
| 92
| 0.709805
| 681
| 5,548
| 5.447871
| 0.104258
| 0.065499
| 0.12372
| 0.064151
| 0.91779
| 0.908895
| 0.885984
| 0.885984
| 0.885984
| 0.881132
| 0
| 0.000446
| 0.192502
| 5,548
| 175
| 93
| 31.702857
| 0.827679
| 0
| 0
| 0.745098
| 0
| 0
| 0.25
| 0.055155
| 0
| 0
| 0
| 0
| 0.163399
| 1
| 0.065359
| false
| 0
| 0.039216
| 0
| 0.104575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7833a9302ffd0bcc42dbc0dec450e248766e241
| 76,020
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/SUM_MIDDLE_ROW_COLUMN_MATRIX.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/SUM_MIDDLE_ROW_COLUMN_MATRIX.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/SUM_MIDDLE_ROW_COLUMN_MATRIX.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( mat , n ) :
row_sum = 0
col_sum = 0
for i in range ( n ) :
row_sum += mat [ n // 2 ] [ i ]
print ( "Sum of middle row = " , row_sum )
for i in range ( n ) :
col_sum += mat [ i ] [ n // 2 ]
print ( "Sum of middle column = " , col_sum )
#TOFILL
if __name__ == '__main__':
param = [
([[7, 32, 33, 35, 51, 61, 62, 68, 71, 73], [3, 10, 18, 32, 44, 56, 62, 80, 86, 91], [13, 21, 26, 31, 43, 53, 54, 59, 61, 73], [3, 9, 14, 14, 43, 46, 67, 71, 87, 99], [20, 53, 53, 72, 79, 80, 82, 84, 95, 99], [15, 21, 39, 44, 46, 48, 59, 64, 65, 70], [28, 35, 39, 41, 45, 50, 52, 61, 72, 73], [3, 15, 21, 22, 49, 49, 54, 73, 88, 98], [7, 9, 14, 16, 18, 26, 42, 45, 59, 86], [14, 21, 25, 31, 34, 45, 53, 54, 66, 82]],8,),
([[22, 92, 36, -94, -4, 6, -36, 78, -18, 12, 14, 54, 80, 4, -34, 4, -2, 24, 60, -14, 68, 88, -46, 82, -70, -2, 38, 76, -72, 70, -12, 24, -62, 58, 64, -92, 60, 96, -20, 0], [96, 42, -92, 70, 82, -74, -28, -64, -64, -50, -56, 92, -52, 84, 68, 2, -80, 60, -70, 6, 42, -16, 50, 86, -2, 56, 36, -90, 82, -38, 42, -66, -32, -88, 2, 48, 24, 56, 78, 90], [-86, 4, 8, 22, 92, -62, 88, -54, 50, 0, -32, -24, 38, 64, -22, -4, 30, -26, 82, 10, 4, 78, 78, 48, -42, 94, -14, -54, 24, 14, 36, 46, -16, -14, -72, -98, 30, 2, -28, -10], [-70, 44, 54, 6, 2, 66, -24, 6, 94, 16, 92, -78, -26, -36, 66, 56, -30, -50, -94, -64, 94, 82, -70, 74, 70, 88, -34, -24, -4, -62, 10, 18, -96, -22, -34, -52, 40, -50, -80, 22], [78, -70, -52, 58, 78, -6, -26, -16, -34, -42, 66, 12, -2, 30, -36, -28, 94, 64, 84, -86, -78, -62, -92, 16, 50, -50, 16, 64, -46, -92, -46, -48, -18, -86, -18, -84, 28, 22, 10, -58], [34, -86, 68, -10, -82, -28, -78, -18, -86, 22, -80, -14, 34, -80, -30, -50, 32, 84, -70, -32, 40, 62, -92, -76, 98, 24, -70, 24, 64, -92, 40, -28, -10, 38, -6, -6, -44, 50, -24, 98], [96, 62, 46, 90, 38, -36, -82, 70, -82, 2, -78, -84, -42, 92, 32, 54, 44, -50, -90, 94, 6, 38, 40, -6, -76, 98, -64, -90, 80, -2, -20, 28, 94, -52, 38, -38, 12, -78, -32, -64], [-28, -32, 66, 44, 28, 60, 58, 70, -56, 8, -82, 78, -94, -74, 60, 36, 64, 48, 60, -60, 82, 44, 52, -38, 26, -36, -90, -94, 44, 74, 84, 28, 76, 46, 4, 64, 16, 44, 72, 48], [28, 92, -64, 80, -84, 18, -82, 8, -28, -60, -50, 66, 76, 96, -54, 54, -4, -80, 72, 2, 74, -64, -48, 34, 6, -56, 6, 86, -26, -68, -30, -18, 70, 14, -70, -78, 68, 86, 40, -86], [58, 78, 76, -4, -68, 76, -10, -68, -78, -48, -82, -46, -80, -40, 42, 36, 96, 32, -10, -90, 6, -22, 22, -52, 32, 16, -58, -52, -78, -4, -54, -86, -16, 78, -66, -16, 68, 6, 66, -84], [-58, 30, 62, 70, -38, -22, -68, 98, -62, -54, 80, -38, -90, 38, -8, -36, -52, 48, -2, 82, -78, -72, -6, 96, 44, -34, 90, -2, 30, 92, 40, -18, -76, 46, -60, 36, 90, -54, 56, -24], [84, 34, -20, 4, 0, 80, 70, -82, -74, -12, -24, 72, 30, 16, 62, -44, 50, -64, 98, 58, 74, -64, -34, 82, -24, 20, 22, -34, 74, 4, 52, -8, 26, -8, 74, -26, 34, 60, 40, -24], [-46, -54, 22, 20, 70, -8, 32, 98, 94, 34, -94, -40, 24, 98, -56, 12, -28, 58, 84, -86, 98, 80, -40, -54, -30, 16, 6, 74, 72, -98, 78, -98, -62, 70, 40, -90, 82, 68, -36, -12], [26, -54, 66, 50, -78, -66, -18, 78, -78, -24, 22, 14, -42, -10, 34, -82, 36, 94, -98, 60, 52, 46, -60, -52, -42, -64, 94, -18, 66, -2, -20, -92, -70, 32, 14, 72, 58, 54, -62, 22], [-16, -14, -80, 20, -90, -10, 92, -54, -8, -32, -44, 6, -26, 66, -56, -38, -56, 86, 52, -38, 12, 12, 20, 24, 14, -30, -10, -70, 36, 64, -82, -46, 24, 26, -58, 96, 58, 96, -70, 58], [16, -90, -18, -40, 86, -98, -14, -92, -86, 24, -98, -84, 54, 64, -84, -50, 76, -34, 62, 26, 58, 42, 10, -72, 32, 92, 46, 50, 58, 66, -98, 26, -56, 56, -66, 26, -82, 0, -6, 34], [4, -2, -6, 8, -70, 30, -36, 2, -46, -86, 76, 4, -46, -20, -24, -60, -10, -20, 44, -8, -32, -4, -54, -68, 36, 84, 4, 86, -42, 0, -6, 76, 52, -10, 46, -76, -2, 72, 16, 34], [24, -80, -58, 26, 42, -42, 8, -70, 22, -86, -38, -12, -80, 46, 32, 84, 96, -76, -36, -26, -6, 46, 10, 84, -42, 52, -94, -76, -66, -44, -46, 64, -62, 50, -26, 96, -4, 20, -86, 12], [-42, 78, -32, -98, -86, 2, 54, -30, 68, 24, -40, 66, -92, -66, -48, -30, -98, -96, 88, -92, -40, -24, 52, 70, -54, 66, 18, 96, 22, 26, 46, 6, 76, -54, -74, 0, -82, -56, -60, 0], [-6, -70, 20, -88, 44, 42, 20, 34, -70, 36, 22, 24, 30, -82, 26, 62, -72, -96, 56, -64, 88, -42, 22, 64, 66, -40, 46, 20, -40, -86, 50, 16, 34, -84, -12, -30, -84, 96, -82, -40], [-62, 10, 36, -62, -62, -72, 14, -92, 10, 4, 14, 22, -94, -26, 88, -34, -16, 80, -28, 26, 42, 78, 92, -44, -32, 64, 18, 4, -34, -22, -54, 10, 58, 88, -90, 64, -90, -88, -30, -86], [18, -62, 22, -78, 16, -70, 26, 66, -2, -48, -74, 48, -44, -88, 12, 86, -50, 30, 14, 36, -28, 82, 64, -4, 10, 84, -88, 44, -98, -86, -22, 64, -22, 92, -80, -94, -42, 64, 66, -30], [94, -24, 96, 34, 36, -76, -58, 88, -54, -66, 22, 56, -4, 30, -70, -36, -52, 96, 14, 96, -56, 54, -64, -78, 82, 58, 16, -86, 62, -68, 20, -4, -92, 78, -76, 96, 14, -48, 88, -28], [40, 14, 6, -84, -76, -78, -54, 48, -56, -38, 4, -30, 6, 34, -54, -38, -82, 28, 74, 66, -66, 26, 92, -78, 78, -60, 66, -36, 18, 16, -36, 72, 76, -18, -24, 20, -4, -44, -36, -16], [98, -52, 12, 48, -28, 68, -94, 10, 20, -52, -32, 38, -76, -58, -16, -60, 32, 52, 70, -46, 48, -22, -26, 82, 48, -54, 66, 56, -46, -32, -20, 52, 82, -4, -80, -30, -22, -36, 8, 4], [82, -52, 66, 94, -4, -8, 2, -34, 32, -62, 90, -48, 60, -22, 14, -84, -24, -10, 36, 0, 88, -90, -66, -6, 60, -10, -12, -42, -96, 56, 28, -48, -80, 48, 22, -98, 98, 32, -10, 48], [-54, 2, -68, -46, -38, -46, -80, -62, 50, 12, -80, 0, -64, 4, -92, -64, -52, 64, 24, -46, 4, -98, -92, -90, -68, 88, -98, -54, -74, 50, 28, -30, -4, -48, -88, -44, -86, -10, 66, 64], [-72, 50, -8, 26, 66, -40, 72, -32, -72, 36, 18, 72, 12, 48, 70, -60, 68, 6, 94, -44, -10, -52, 2, -28, 86, 78, 76, 64, 2, -42, -22, 14, -94, 98, -46, -12, 34, -50, 76, 56], [-38, -6, 44, 46, -26, -62, -40, -80, 74, 48, 96, 8, -34, 56, 52, -46, -80, 68, 40, -34, 56, -58, 40, -54, -66, 68, 60, -72, -44, 12, -88, 6, -86, 70, 10, 62, -76, -20, 98, -54], [-86, -88, -24, 0, -96, -82, -34, 2, -84, -40, -2, -30, 92, 16, -42, 74, 40, 30, -34, -98, -34, -6, -46, 40, -78, 72, 74, -56, -82, 18, 60, -68, 60, -16, 88, 16, -28, -2, 84, -88], [66, 96, 92, 18, -58, 16, 18, 4, 18, 22, 42, 48, 14, -6, -60, -76, 62, 54, 40, -22, 76, -96, 6, 44, 24, -80, -26, -70, -90, -88, -62, -68, 22, 16, -32, -70, 22, -8, -70, 44], [-4, 16, -38, 36, 24, 58, 58, 10, -38, -12, -26, -10, 46, -16, -90, -36, -60, -36, 86, -92, 14, 38, 96, -98, -8, 76, -96, 48, -46, 32, -56, -62, -54, 86, -42, -28, 78, 12, 48, 76], [42, 80, 54, -62, 12, -64, 4, -98, -10, -48, -22, 64, 26, -2, -46, -50, 10, 70, 36, -66, 28, -50, 6, -24, 52, 74, 50, -4, -34, 58, 30, -48, 36, 40, 46, -18, 68, 76, 34, -56], [-70, 38, 8, -20, -70, -86, 96, 50, 10, -98, -56, 86, -6, 10, -30, 78, 24, 32, -98, 10, -88, 42, -52, 86, -56, 18, -26, -36, 10, 78, -96, -68, -38, -58, -8, -94, -74, 50, 50, -32], [-2, 6, -30, -4, 2, 42, -98, -66, -92, 52, 68, 96, 80, -68, -4, -96, 90, -56, -50, -30, 2, -40, -48, 44, 20, -22, -8, 36, 66, 30, -26, 0, 6, 80, 78, 2, 60, -72, 4, 94], [28, 52, -16, 80, 72, -54, -76, 0, 62, 32, -40, 32, -40, -72, 52, 24, -4, -80, -94, -46, 54, -54, -32, -76, -62, 78, -60, 72, -58, -86, -24, 46, 20, 90, -54, 38, 36, 64, 26, 60], [-18, -72, 82, -6, 66, 60, 14, 64, 6, 6, -58, -68, 22, 98, -28, 94, -58, -70, -10, 12, 84, 26, -38, 34, -42, -50, -38, 80, -42, 42, 74, -64, 56, -78, 42, -76, -10, -16, 54, 66], [-92, 82, -88, -70, -94, 82, 20, 78, 96, -2, -28, -18, -34, 32, -14, -86, -46, -58, 92, -80, 40, 48, 28, 30, 36, -92, 8, -18, -6, -90, 76, 88, -2, -12, -78, 90, 78, 12, -2, -6], [-52, -68, 72, 58, 52, 16, -68, 6, 50, -44, 96, -8, 66, -8, 68, -90, -24, -50, -42, -44, 60, -90, -46, -86, -52, 90, 96, -82, 66, 14, -4, 34, 8, 66, 6, 50, -52, 62, 60, 50], [-56, -58, -92, -6, 38, -54, 64, 32, 48, -68, 36, -34, 34, -50, 24, -80, -18, -44, -60, -64, -22, 72, 20, -30, -92, 46, 90, 92, -84, 88, -26, -42, -98, -98, 28, -92, 30, -30, -86, 10]],31,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],22,),
([[47, 81], [14, 25]],1,),
([[-38, 30], [-80, 94]],1,),
([[1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0], [1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1], [1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1], [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0], [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1], [0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1], [0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1], [0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0], [1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1], [1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1], [1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], [1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0], [1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], [1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1], [1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0]],31,),
([[1, 6, 6, 8, 8, 15, 19, 21, 22, 26, 29, 30, 31, 32, 35, 37, 37, 40, 41, 41, 44, 46, 48, 52, 54, 54, 55, 60, 61, 61, 67, 68, 76, 77, 78, 80, 80, 81, 81, 81, 82, 83, 85, 87, 89, 91, 97, 97], [4, 5, 6, 8, 9, 13, 14, 19, 22, 23, 29, 29, 30, 35, 36, 36, 39, 40, 41, 43, 43, 44, 45, 46, 46, 51, 51, 53, 55, 57, 58, 59, 60, 60, 61, 64, 65, 68, 69, 70, 70, 75, 76, 78, 81, 82, 88, 92], [4, 5, 5, 8, 17, 18, 19, 19, 20, 20, 21, 21, 22, 23, 29, 29, 31, 32, 33, 33, 33, 34, 38, 43, 44, 45, 47, 58, 61, 66, 72, 72, 72, 74, 75, 76, 78, 78, 80, 83, 85, 86, 88, 92, 92, 96, 97, 99], [1, 3, 4, 6, 8, 9, 14, 14, 15, 15, 16, 18, 18, 20, 21, 21, 23, 23, 24, 27, 32, 33, 35, 35, 36, 43, 44, 44, 45, 47, 48, 50, 51, 51, 55, 55, 55, 55, 66, 67, 67, 70, 86, 88, 92, 93, 94, 99], [1, 2, 4, 7, 10, 10, 11, 13, 13, 15, 16, 17, 22, 31, 32, 35, 36, 37, 37, 41, 41, 43, 45, 46, 47, 50, 51, 51, 54, 55, 58, 64, 67, 68, 70, 72, 73, 76, 77, 82, 83, 84, 84, 85, 85, 89, 93, 94], [3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 15, 16, 17, 17, 21, 21, 23, 25, 26, 27, 29, 30, 32, 36, 40, 41, 43, 43, 49, 49, 57, 57, 61, 62, 68, 71, 73, 75, 81, 84, 89, 91, 92, 94, 95, 97, 97], [1, 1, 4, 16, 16, 16, 19, 24, 26, 26, 28, 31, 33, 34, 34, 35, 36, 37, 40, 52, 54, 56, 57, 62, 64, 64, 66, 70, 71, 72, 72, 73, 73, 74, 78, 81, 81, 83, 83, 85, 88, 90, 92, 93, 93, 94, 98, 99], [2, 4, 6, 8, 8, 9, 11, 14, 15, 17, 17, 20, 21, 22, 22, 28, 29, 30, 31, 31, 32, 36, 44, 47, 50, 50, 55, 59, 62, 62, 63, 66, 67, 70, 76, 76, 76, 78, 80, 80, 81, 83, 84, 86, 88, 91, 95, 97], [4, 6, 8, 10, 11, 13, 17, 17, 21, 22, 33, 33, 37, 41, 43, 45, 47, 48, 51, 52, 53, 58, 58, 58, 58, 58, 63, 65, 66, 67, 67, 68, 69, 71, 73, 75, 80, 81, 82, 82, 83, 89, 89, 94, 95, 97, 98, 99], [3, 5, 10, 11, 11, 12, 13, 17, 17, 18, 20, 23, 23, 24, 27, 31, 31, 34, 39, 39, 39, 43, 43, 44, 45, 46, 50, 51, 53, 55, 60, 61, 64, 68, 75, 75, 76, 78, 81, 82, 83, 86, 88, 93, 93, 96, 96, 98], [2, 2, 3, 6, 7, 13, 16, 21, 23, 23, 23, 24, 29, 30, 32, 35, 36, 36, 38, 39, 39, 39, 41, 42, 42, 44, 46, 51, 51, 52, 53, 64, 71, 73, 74, 80, 81, 84, 86, 86, 93, 94, 96, 96, 96, 96, 97, 99], [2, 4, 5, 12, 14, 16, 20, 22, 25, 26, 33, 34, 35, 35, 36, 40, 44, 49, 50, 50, 51, 51, 51, 52, 55, 58, 58, 59, 60, 61, 62, 64, 66, 66, 66, 72, 75, 76, 81, 82, 82, 84, 86, 89, 92, 93, 93, 96], [1, 2, 2, 3, 4, 5, 6, 7, 11, 13, 13, 15, 19, 20, 23, 26, 27, 29, 30, 30, 38, 39, 40, 40, 41, 43, 53, 57, 65, 70, 71, 78, 78, 79, 80, 81, 82, 82, 83, 87, 87, 93, 93, 96, 96, 97, 97, 98], [4, 11, 12, 18, 18, 21, 21, 27, 27, 28, 29, 33, 34, 37, 40, 41, 41, 45, 55, 56, 56, 57, 58, 58, 63, 63, 65, 65, 66, 68, 68, 69, 69, 73, 74, 78, 80, 82, 83, 83, 85, 87, 89, 92, 95, 95, 96, 97], [1, 4, 7, 7, 14, 15, 22, 24, 24, 27, 30, 32, 33, 34, 39, 39, 40, 41, 44, 48, 56, 56, 58, 59, 61, 61, 62, 63, 64, 65, 68, 69, 70, 72, 78, 78, 80, 80, 82, 83, 83, 84, 86, 87, 92, 93, 94, 94], [1, 1, 4, 5, 6, 6, 7, 9, 10, 10, 14, 16, 17, 19, 21, 24, 26, 30, 31, 32, 37, 37, 38, 40, 45, 49, 52, 52, 54, 54, 61, 61, 65, 67, 70, 72, 78, 79, 80, 82, 84, 85, 87, 88, 88, 92, 94, 97], [3, 6, 10, 10, 11, 12, 12, 13, 14, 15, 16, 18, 21, 23, 25, 27, 27, 27, 27, 30, 33, 35, 40, 41, 44, 48, 50, 50, 51, 52, 54, 54, 55, 58, 58, 58, 59, 62, 65, 69, 72, 72, 74, 74, 76, 79, 80, 98], [1, 2, 4, 4, 4, 5, 6, 7, 9, 9, 10, 12, 22, 23, 24, 26, 26, 28, 33, 35, 35, 38, 42, 44, 48, 48, 52, 54, 56, 60, 63, 68, 68, 68, 72, 75, 77, 79, 79, 82, 85, 88, 89, 91, 91, 91, 92, 93], [1, 8, 11, 13, 22, 23, 23, 26, 30, 31, 33, 34, 35, 35, 37, 39, 40, 44, 46, 46, 46, 47, 47, 47, 54, 59, 60, 60, 61, 62, 64, 66, 69, 74, 75, 77, 78, 79, 79, 82, 83, 86, 87, 92, 94, 96, 99, 99], [1, 1, 3, 8, 11, 14, 19, 20, 20, 20, 21, 24, 25, 25, 28, 34, 37, 38, 38, 39, 40, 47, 53, 54, 56, 57, 58, 62, 65, 69, 70, 70, 71, 71, 73, 76, 78, 78, 81, 84, 87, 92, 94, 94, 94, 96, 98, 99], [3, 4, 4, 15, 19, 21, 23, 26, 30, 31, 32, 34, 35, 37, 38, 41, 46, 46, 46, 51, 52, 53, 58, 63, 65, 68, 68, 68, 69, 70, 70, 70, 71, 72, 73, 74, 75, 75, 77, 80, 81, 84, 84, 86, 96, 96, 96, 98], [3, 4, 8, 9, 9, 11, 16, 19, 19, 20, 20, 23, 27, 27, 28, 30, 31, 34, 36, 40, 41, 43, 45, 46, 53, 53, 55, 58, 58, 59, 62, 63, 64, 65, 68, 68, 71, 72, 75, 78, 80, 87, 87, 88, 89, 94, 97, 99], [1, 3, 3, 10, 12, 12, 12, 12, 13, 15, 17, 18, 22, 24, 24, 28, 29, 31, 33, 33, 34, 34, 40, 43, 44, 48, 48, 49, 51, 55, 60, 63, 67, 68, 70, 72, 73, 75, 75, 77, 82, 85, 88, 91, 93, 94, 95, 98], [6, 6, 7, 8, 9, 14, 15, 18, 19, 26, 28, 28, 28, 30, 31, 33, 33, 36, 38, 39, 43, 44, 46, 48, 56, 57, 57, 60, 60, 61, 67, 69, 70, 71, 73, 74, 79, 80, 82, 84, 86, 86, 90, 92, 94, 95, 96, 98], [2, 2, 3, 9, 10, 14, 15, 15, 16, 19, 25, 26, 28, 31, 32, 33, 33, 34, 35, 41, 41, 42, 42, 43, 48, 48, 58, 59, 61, 66, 66, 69, 72, 73, 77, 78, 79, 79, 83, 86, 88, 92, 92, 92, 92, 95, 96, 97], [1, 6, 7, 8, 11, 14, 15, 16, 16, 18, 23, 23, 24, 25, 28, 29, 31, 32, 36, 38, 38, 41, 42, 43, 44, 46, 55, 55, 56, 59, 62, 64, 67, 69, 69, 70, 71, 72, 76, 81, 84, 86, 86, 87, 87, 89, 94, 95], [3, 3, 6, 10, 11, 15, 16, 18, 18, 27, 28, 28, 30, 30, 33, 34, 35, 35, 39, 43, 45, 48, 50, 51, 52, 53, 55, 62, 62, 62, 67, 68, 69, 70, 71, 72, 74, 74, 80, 81, 84, 85, 85, 86, 88, 88, 88, 96], [1, 2, 4, 5, 5, 5, 6, 12, 14, 14, 16, 16, 19, 28, 28, 29, 30, 32, 35, 36, 38, 39, 41, 47, 52, 57, 58, 58, 62, 64, 66, 71, 75, 76, 80, 81, 82, 83, 84, 85, 86, 87, 90, 91, 93, 96, 97, 98], [4, 7, 8, 10, 11, 12, 14, 17, 19, 19, 20, 24, 24, 28, 29, 29, 31, 31, 32, 33, 35, 36, 40, 42, 43, 47, 49, 53, 53, 53, 54, 54, 58, 58, 61, 64, 67, 72, 74, 79, 80, 80, 84, 86, 91, 91, 96, 97], [2, 4, 6, 6, 11, 12, 17, 19, 20, 21, 25, 26, 29, 30, 30, 31, 32, 39, 42, 42, 47, 48, 48, 49, 49, 49, 51, 55, 56, 59, 62, 65, 67, 67, 68, 68, 69, 73, 73, 76, 79, 82, 86, 87, 87, 88, 98, 98], [2, 3, 5, 7, 8, 16, 17, 18, 29, 29, 30, 31, 32, 33, 36, 38, 38, 40, 43, 45, 47, 56, 58, 59, 61, 63, 65, 65, 67, 68, 68, 69, 73, 74, 78, 80, 81, 82, 82, 89, 91, 92, 92, 94, 96, 97, 97, 98], [4, 8, 11, 12, 14, 15, 24, 27, 29, 32, 33, 36, 37, 38, 42, 46, 46, 47, 47, 49, 50, 53, 58, 58, 61, 64, 64, 65, 68, 69, 73, 74, 76, 79, 79, 82, 83, 84, 85, 89, 89, 90, 95, 95, 95, 97, 99, 99], [3, 3, 3, 5, 6, 7, 10, 13, 14, 16, 18, 23, 25, 26, 27, 31, 31, 35, 38, 41, 44, 46, 52, 57, 58, 62, 63, 63, 63, 64, 68, 69, 71, 72, 72, 76, 76, 78, 80, 83, 83, 88, 89, 90, 92, 94, 95, 98], [3, 8, 11, 15, 15, 26, 27, 29, 30, 32, 32, 37, 39, 42, 47, 49, 52, 52, 52, 53, 53, 54, 54, 59, 60, 61, 61, 62, 64, 65, 66, 66, 67, 67, 68, 69, 73, 74, 77, 79, 90, 90, 91, 91, 95, 98, 99, 99], [2, 4, 6, 8, 9, 10, 11, 15, 15, 16, 20, 21, 22, 23, 25, 26, 27, 27, 36, 38, 42, 45, 47, 47, 51, 53, 53, 55, 57, 59, 59, 62, 65, 66, 72, 73, 76, 82, 82, 83, 88, 90, 90, 91, 95, 96, 99, 99], [1, 2, 3, 6, 6, 7, 11, 16, 17, 19, 20, 23, 24, 24, 26, 28, 31, 33, 36, 37, 38, 39, 40, 40, 44, 46, 46, 51, 51, 53, 62, 62, 63, 64, 68, 69, 70, 73, 78, 78, 85, 87, 90, 91, 93, 93, 95, 98], [3, 9, 9, 11, 14, 16, 17, 18, 18, 22, 22, 25, 29, 30, 34, 35, 37, 37, 41, 42, 43, 45, 45, 52, 54, 55, 55, 57, 63, 64, 65, 68, 69, 70, 70, 71, 74, 75, 75, 77, 86, 86, 87, 93, 94, 95, 95, 99], [1, 3, 3, 10, 13, 14, 15, 18, 19, 20, 22, 23, 24, 25, 26, 32, 34, 40, 41, 41, 41, 44, 44, 46, 53, 57, 57, 59, 60, 61, 62, 63, 64, 70, 72, 72, 77, 78, 86, 88, 90, 92, 92, 93, 93, 94, 95, 98], [2, 4, 5, 7, 17, 20, 20, 21, 24, 24, 25, 25, 27, 28, 29, 29, 33, 35, 35, 35, 37, 38, 43, 43, 45, 48, 49, 52, 53, 59, 62, 64, 65, 70, 71, 72, 72, 75, 75, 86, 88, 89, 89, 91, 91, 93, 96, 97], [5, 6, 6, 9, 13, 16, 17, 18, 20, 21, 25, 26, 26, 31, 34, 43, 44, 45, 45, 47, 48, 48, 51, 51, 54, 56, 56, 57, 61, 61, 66, 67, 69, 69, 70, 72, 76, 76, 81, 83, 85, 90, 96, 96, 97, 98, 98, 99], [3, 4, 5, 6, 12, 13, 14, 14, 18, 20, 22, 24, 32, 35, 38, 38, 39, 41, 44, 48, 51, 52, 54, 55, 55, 57, 58, 59, 60, 60, 62, 64, 66, 69, 69, 74, 74, 76, 78, 79, 81, 82, 82, 82, 85, 86, 91, 99], [2, 6, 7, 8, 10, 14, 15, 15, 16, 16, 18, 21, 24, 30, 31, 32, 37, 38, 39, 41, 42, 42, 44, 45, 50, 51, 52, 53, 59, 60, 61, 61, 67, 67, 72, 73, 74, 75, 77, 79, 81, 88, 90, 91, 95, 95, 97, 98], [2, 3, 4, 7, 7, 7, 9, 15, 17, 18, 19, 20, 22, 24, 26, 26, 28, 29, 33, 36, 39, 40, 42, 43, 45, 49, 58, 61, 68, 68, 71, 75, 75, 75, 75, 76, 77, 78, 79, 80, 83, 86, 91, 94, 95, 98, 99, 99], [5, 6, 7, 10, 10, 11, 12, 14, 17, 19, 20, 24, 29, 31, 32, 35, 41, 44, 47, 47, 49, 50, 54, 57, 60, 61, 64, 66, 69, 70, 71, 72, 75, 75, 75, 77, 80, 81, 82, 88, 88, 90, 94, 97, 97, 97, 98, 99], [1, 1, 4, 6, 6, 7, 8, 11, 11, 14, 17, 18, 20, 21, 25, 29, 31, 31, 32, 38, 40, 41, 42, 44, 44, 45, 46, 51, 52, 58, 61, 62, 66, 67, 73, 74, 76, 79, 82, 84, 85, 86, 87, 90, 91, 92, 94, 97], [1, 1, 3, 4, 7, 7, 10, 11, 12, 13, 16, 24, 24, 27, 28, 29, 34, 36, 38, 39, 39, 42, 45, 48, 55, 57, 60, 62, 62, 63, 63, 69, 72, 76, 77, 78, 81, 81, 82, 83, 90, 93, 94, 94, 96, 98, 99, 99], [1, 1, 1, 1, 2, 2, 3, 7, 8, 14, 14, 19, 19, 23, 23, 25, 26, 27, 31, 43, 48, 49, 49, 50, 51, 51, 52, 55, 56, 57, 57, 57, 59, 62, 63, 63, 67, 71, 74, 74, 74, 76, 81, 84, 85, 87, 98, 98], [1, 1, 5, 9, 10, 12, 16, 18, 19, 20, 23, 26, 28, 35, 35, 36, 37, 40, 41, 41, 44, 44, 54, 57, 59, 60, 60, 60, 61, 63, 67, 74, 76, 79, 79, 84, 85, 86, 89, 89, 90, 91, 92, 92, 92, 95, 96, 98]],35,),
([[-18, -22, 0, 40, 84, 14, -90, 8, -52, 70, 24, 92, -22, 92, -38, -78, 76, 70, -6, -34, 68, -92, -58, -58, -58, -90, -76, 62, -46, -22, 6], [-78, 0, -42, -10, 94, -78, 26, 28, 30, 34, -68, -68, 52, 70, 86, -54, 42, 60, -34, 14, 36, 30, -64, -48, -76, -36, -78, 66, 18, 96, 2], [62, -88, 90, -32, -40, 56, 18, 96, 72, -50, 20, 72, 64, -82, 30, 66, -32, 16, 64, 96, -82, 72, -94, -48, 14, 60, 6, -78, 44, -80, 22], [-42, -86, -16, -62, 4, -30, 46, 10, 94, -12, 14, 96, -62, 68, 72, 68, -58, 2, 26, -12, 2, -16, 32, 26, 92, 64, -62, -80, -70, 76, -14], [96, 78, -4, -34, -88, 34, 50, 0, 46, 94, 14, 26, 58, -14, 82, 24, 86, 74, -8, 50, 54, -66, 46, -80, 20, 74, 2, -68, 92, -96, -2], [74, -70, -36, 76, 90, 50, 74, 78, 12, 40, 0, -8, -18, -34, -66, 86, 48, 44, 18, 96, -66, 48, 0, -36, 72, -40, 50, -32, -2, -50, 78], [18, -80, 70, -16, 34, -54, -94, -40, 60, -4, -50, -44, -56, -68, 22, -12, 54, 10, 90, -76, -28, 76, 72, -2, -78, 34, -24, 14, -80, -86, 68], [16, -88, 82, -48, -90, 36, 56, -80, -44, 40, 18, -84, -30, 40, -48, 52, 74, 18, 84, 92, 76, -26, -8, -4, 32, -92, 10, -88, -74, -58, -56], [22, 98, 12, 44, 30, 70, -60, 62, -78, -60, 80, -96, 46, 8, 26, 54, 20, -58, 80, -36, 44, -20, 18, 36, -22, 50, 90, 64, -56, 4, -28], [-6, -18, -92, -68, 20, -22, -60, -50, -72, 64, -50, 76, -36, 40, -30, 64, 96, 2, -82, 52, -50, 20, 34, 52, -24, -14, 96, 76, -48, -6, -98], [-60, 48, -82, -38, -26, 98, 56, 98, 78, -82, -92, -70, 56, -80, -46, -96, -10, -70, -88, 92, -54, 16, 88, -26, -74, 34, -56, 54, -52, 2, 72], [16, 82, -70, 42, -40, 38, 48, -86, -28, 46, -40, -30, -54, 58, 94, -54, -88, 46, 42, 84, 58, -74, 94, -2, 72, -50, 72, 36, 26, 50, -80], [-80, -34, 16, 20, -72, 86, 22, 82, -64, -38, -24, -82, -30, 2, 32, 18, -88, 82, 0, 90, -36, -92, 50, -30, -72, -20, 74, -14, -42, 52, 66], [40, 54, 42, -34, -20, 18, 88, -32, -52, -40, -8, 8, 60, 0, 22, 94, -96, -72, -76, -18, 60, -52, -98, -92, 30, 66, 76, -38, -38, 24, 70], [-82, -60, 86, 98, -42, -12, -92, -78, 92, -90, 54, 0, 8, 98, 50, 80, -24, 20, -86, 56, -86, 38, 6, -44, -24, -2, 16, -50, 36, 10, 98], [-34, 92, -52, -72, -54, 64, -48, -46, 88, -28, -56, 92, -8, -18, -70, -48, -2, -42, -76, -62, -34, 8, -22, -4, -12, -14, -26, -46, 40, 12, -84], [50, 70, -52, -86, 50, 36, -18, -82, -12, -74, -90, 14, 18, -10, 80, 24, -22, -10, -30, 92, 70, 60, 16, -18, 10, 2, 2, 18, 44, -72, -72], [54, -66, 22, 76, -34, 68, -36, -50, -32, -20, -70, 44, 56, 88, -12, -32, 42, -30, 90, -88, 30, -10, -28, -16, 40, -58, 12, -70, 12, -24, 74], [48, -36, -52, -36, 8, -20, -60, 64, 50, 94, -64, -74, -70, 40, -80, 46, 22, 94, -52, -58, -76, -36, -76, 92, -76, -92, -64, -78, -2, -20, 62], [-30, 34, 74, -48, -56, -18, -8, 88, 18, 80, -72, -52, -52, 82, -20, 58, 58, -50, 68, 26, 18, 34, -86, -8, 40, 42, 12, 92, -14, -4, -78], [-18, -80, 66, 66, -14, 16, 26, -24, 32, 24, 58, 0, 36, -76, -48, 36, 88, -18, 42, -4, 2, 48, -90, -84, 2, 92, 78, 92, -62, 4, 72], [90, -56, -48, -68, 70, -2, -94, -52, -12, 2, 64, 12, -70, 18, 28, -98, -80, 48, 34, -58, 24, 6, -60, -54, -70, 96, 88, 38, 42, -40, 18], [-2, -48, 32, 62, -42, 70, -10, -42, 20, 88, 44, -12, -46, -10, -96, 18, 44, -46, 90, -6, 74, 88, 8, -42, 26, -10, 84, -28, -12, -88, -98], [56, -64, -4, 32, 98, 12, 82, -46, 80, 16, -32, 54, 54, -28, -56, 54, 88, -46, 68, -74, 24, 4, 96, -84, 86, 14, -66, 12, -64, -86, 10], [26, -50, 72, -2, -50, -88, 96, -24, 48, 96, 26, 24, 46, 80, -70, -84, -30, 64, 44, -86, 24, -20, 12, 96, -26, 42, 88, -44, -54, -84, -66], [-28, 90, -66, 46, 16, -84, 22, -62, 20, -26, 22, 86, 40, -2, -36, 60, 90, 14, -24, 32, 66, 32, 12, 92, 22, -82, -96, 20, -64, 16, -22], [26, -80, 12, -42, -80, 72, -10, 42, 26, -32, 56, 96, -34, -14, -28, 62, -58, -36, -24, -22, -86, -48, -28, 48, -26, 26, 38, 10, -42, -8, -26], [-76, 22, 60, 88, 38, 44, -62, -68, -96, -64, 12, 42, 94, 10, 90, 68, -44, 74, -28, -86, -20, -22, -60, -78, -20, 68, -32, -40, 12, -64, 82], [60, -66, -14, -90, 40, 26, 52, -70, 92, -64, 68, 6, -84, -32, -90, -30, 18, -68, -50, 68, 54, 24, -68, -92, -32, -40, -30, 78, 60, -94, -48], [-14, -2, 72, 70, 2, 24, -54, 14, 98, -2, 70, 24, -60, -28, -72, -36, -50, -12, 60, -98, -80, -46, -88, 28, -74, -94, -28, 92, 30, -38, -8], [-78, 26, -94, -24, 14, 80, 60, -80, -28, 86, 4, 54, 88, -34, 4, -44, 18, -96, 18, -28, 90, 88, 42, 8, 66, 24, 0, -70, -78, -64, -20]],29,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],38,),
([[91, 17, 91, 54, 63, 43, 59, 7, 5, 73, 55, 46, 78, 60, 96, 32, 22, 66, 40, 34, 2, 48, 97, 26, 34, 17, 56, 88, 69, 30, 52, 87, 98], [84, 89, 34, 38, 49, 47, 99, 97, 48, 75, 43, 13, 7, 21, 76, 88, 18, 29, 86, 94, 89, 1, 40, 87, 94, 33, 12, 87, 38, 46, 54, 56, 79], [24, 21, 46, 88, 21, 31, 78, 91, 69, 62, 88, 88, 49, 37, 21, 30, 71, 57, 48, 1, 63, 46, 78, 80, 10, 57, 52, 31, 90, 13, 16, 12, 67], [48, 3, 74, 98, 23, 56, 27, 66, 4, 38, 14, 29, 20, 9, 84, 72, 25, 18, 98, 21, 37, 9, 34, 16, 42, 11, 14, 73, 4, 79, 22, 63, 37], [73, 26, 87, 85, 18, 14, 96, 87, 71, 41, 67, 71, 69, 61, 19, 8, 31, 64, 28, 6, 20, 1, 50, 9, 13, 42, 41, 99, 43, 75, 24, 34, 67], [40, 92, 49, 22, 85, 79, 3, 12, 66, 91, 64, 88, 85, 56, 1, 58, 2, 49, 46, 3, 69, 47, 39, 64, 97, 72, 36, 6, 97, 67, 47, 81, 50], [10, 22, 88, 26, 66, 41, 29, 55, 34, 86, 35, 31, 13, 31, 26, 5, 72, 45, 93, 86, 99, 99, 87, 91, 80, 40, 89, 44, 20, 33, 55, 42, 19], [88, 43, 80, 48, 35, 35, 80, 57, 89, 64, 10, 33, 55, 6, 76, 64, 59, 65, 62, 23, 32, 78, 45, 87, 41, 96, 54, 44, 82, 63, 14, 76, 34], [40, 32, 33, 4, 36, 81, 35, 1, 35, 22, 98, 37, 69, 69, 8, 4, 33, 61, 80, 37, 73, 45, 18, 17, 7, 38, 90, 59, 98, 20, 79, 21, 67], [15, 71, 7, 16, 55, 43, 65, 61, 11, 69, 87, 34, 62, 4, 30, 6, 10, 27, 22, 28, 18, 3, 28, 52, 58, 87, 70, 74, 66, 25, 68, 46, 73], [34, 89, 5, 16, 91, 93, 86, 19, 95, 4, 3, 71, 34, 25, 96, 86, 60, 86, 90, 72, 88, 2, 29, 91, 66, 92, 60, 34, 81, 22, 56, 90, 31], [83, 43, 58, 84, 38, 98, 3, 17, 5, 48, 50, 9, 84, 85, 1, 16, 23, 57, 30, 59, 47, 1, 59, 33, 33, 86, 82, 29, 2, 3, 2, 53, 57], [62, 77, 77, 80, 62, 72, 4, 41, 10, 97, 32, 85, 35, 70, 10, 18, 33, 93, 97, 96, 14, 54, 86, 31, 65, 45, 31, 3, 56, 85, 20, 35, 10], [54, 24, 10, 51, 45, 90, 47, 83, 6, 32, 60, 58, 74, 7, 15, 62, 47, 94, 99, 48, 12, 80, 13, 66, 52, 19, 62, 13, 7, 79, 20, 34, 44], [25, 76, 25, 5, 39, 26, 50, 69, 39, 35, 90, 80, 33, 78, 80, 62, 62, 35, 96, 67, 57, 44, 22, 52, 80, 6, 78, 24, 84, 64, 67, 3, 90], [10, 10, 92, 4, 17, 49, 6, 65, 56, 2, 46, 57, 4, 37, 37, 65, 18, 65, 92, 24, 36, 98, 86, 6, 63, 64, 9, 77, 40, 64, 32, 14, 67], [36, 12, 98, 90, 96, 94, 17, 26, 83, 26, 16, 89, 29, 98, 2, 59, 78, 14, 51, 40, 84, 1, 83, 50, 97, 65, 68, 20, 20, 48, 80, 15, 87], [26, 1, 56, 67, 76, 38, 19, 29, 90, 58, 62, 77, 12, 92, 22, 49, 44, 83, 84, 51, 25, 9, 61, 69, 1, 2, 83, 20, 34, 38, 70, 2, 32], [54, 28, 21, 94, 62, 51, 60, 43, 76, 13, 1, 45, 5, 84, 52, 21, 38, 39, 89, 9, 67, 56, 93, 45, 38, 79, 95, 42, 70, 68, 15, 52, 44], [46, 34, 89, 97, 46, 41, 55, 63, 5, 91, 95, 40, 3, 31, 65, 53, 35, 42, 8, 75, 24, 31, 59, 19, 84, 79, 60, 91, 63, 99, 83, 75, 23], [52, 96, 12, 22, 5, 84, 10, 69, 56, 10, 74, 27, 85, 92, 96, 77, 75, 89, 26, 81, 18, 73, 83, 37, 43, 4, 74, 39, 29, 75, 98, 91, 34], [74, 23, 95, 17, 90, 40, 71, 6, 98, 80, 53, 52, 48, 19, 40, 38, 14, 13, 24, 90, 25, 96, 51, 10, 38, 89, 16, 85, 51, 46, 84, 94, 50], [72, 34, 29, 54, 13, 1, 91, 39, 55, 7, 69, 60, 72, 10, 88, 35, 37, 62, 73, 5, 2, 15, 76, 4, 99, 5, 31, 19, 65, 29, 62, 82, 14], [70, 95, 44, 52, 30, 12, 29, 54, 6, 6, 61, 32, 5, 16, 53, 2, 16, 2, 85, 81, 63, 50, 2, 23, 41, 32, 61, 61, 64, 53, 22, 63, 92], [95, 62, 20, 58, 14, 38, 81, 30, 11, 59, 93, 72, 69, 73, 17, 15, 41, 81, 58, 84, 59, 73, 89, 15, 62, 81, 79, 76, 72, 82, 12, 42, 4], [46, 61, 24, 78, 8, 36, 91, 60, 87, 15, 35, 77, 14, 30, 64, 25, 16, 3, 57, 95, 14, 89, 30, 87, 47, 39, 90, 25, 82, 27, 85, 65, 81], [23, 53, 6, 29, 53, 66, 38, 15, 78, 59, 47, 91, 13, 12, 96, 8, 93, 65, 9, 85, 12, 55, 11, 89, 91, 6, 24, 56, 55, 98, 23, 78, 76], [78, 15, 32, 58, 70, 69, 8, 51, 64, 42, 79, 24, 73, 8, 38, 21, 18, 31, 89, 60, 60, 17, 87, 62, 56, 94, 59, 83, 39, 63, 72, 45, 41], [16, 71, 94, 55, 37, 40, 84, 88, 62, 15, 26, 52, 36, 31, 20, 70, 89, 1, 52, 15, 77, 12, 79, 26, 2, 75, 10, 53, 27, 63, 55, 76, 50], [42, 65, 39, 23, 69, 31, 84, 47, 68, 53, 28, 7, 10, 54, 62, 37, 61, 82, 24, 29, 69, 44, 44, 34, 95, 44, 31, 7, 21, 9, 64, 51, 20], [33, 74, 71, 30, 98, 92, 74, 50, 90, 23, 8, 90, 81, 38, 5, 12, 65, 22, 99, 44, 30, 1, 81, 82, 33, 13, 47, 52, 17, 88, 40, 91, 89], [69, 97, 51, 49, 71, 2, 43, 7, 51, 86, 25, 74, 91, 55, 42, 23, 83, 55, 73, 53, 55, 75, 93, 75, 69, 81, 6, 75, 2, 66, 51, 37, 19], [65, 39, 98, 7, 42, 20, 34, 4, 22, 20, 26, 80, 56, 70, 7, 95, 87, 49, 19, 17, 58, 65, 29, 22, 26, 15, 28, 93, 9, 16, 75, 76, 78]],20,)
]
filled_function_param = [
([[7, 32, 33, 35, 51, 61, 62, 68, 71, 73], [3, 10, 18, 32, 44, 56, 62, 80, 86, 91], [13, 21, 26, 31, 43, 53, 54, 59, 61, 73], [3, 9, 14, 14, 43, 46, 67, 71, 87, 99], [20, 53, 53, 72, 79, 80, 82, 84, 95, 99], [15, 21, 39, 44, 46, 48, 59, 64, 65, 70], [28, 35, 39, 41, 45, 50, 52, 61, 72, 73], [3, 15, 21, 22, 49, 49, 54, 73, 88, 98], [7, 9, 14, 16, 18, 26, 42, 45, 59, 86], [14, 21, 25, 31, 34, 45, 53, 54, 66, 82]],8,),
([[22, 92, 36, -94, -4, 6, -36, 78, -18, 12, 14, 54, 80, 4, -34, 4, -2, 24, 60, -14, 68, 88, -46, 82, -70, -2, 38, 76, -72, 70, -12, 24, -62, 58, 64, -92, 60, 96, -20, 0], [96, 42, -92, 70, 82, -74, -28, -64, -64, -50, -56, 92, -52, 84, 68, 2, -80, 60, -70, 6, 42, -16, 50, 86, -2, 56, 36, -90, 82, -38, 42, -66, -32, -88, 2, 48, 24, 56, 78, 90], [-86, 4, 8, 22, 92, -62, 88, -54, 50, 0, -32, -24, 38, 64, -22, -4, 30, -26, 82, 10, 4, 78, 78, 48, -42, 94, -14, -54, 24, 14, 36, 46, -16, -14, -72, -98, 30, 2, -28, -10], [-70, 44, 54, 6, 2, 66, -24, 6, 94, 16, 92, -78, -26, -36, 66, 56, -30, -50, -94, -64, 94, 82, -70, 74, 70, 88, -34, -24, -4, -62, 10, 18, -96, -22, -34, -52, 40, -50, -80, 22], [78, -70, -52, 58, 78, -6, -26, -16, -34, -42, 66, 12, -2, 30, -36, -28, 94, 64, 84, -86, -78, -62, -92, 16, 50, -50, 16, 64, -46, -92, -46, -48, -18, -86, -18, -84, 28, 22, 10, -58], [34, -86, 68, -10, -82, -28, -78, -18, -86, 22, -80, -14, 34, -80, -30, -50, 32, 84, -70, -32, 40, 62, -92, -76, 98, 24, -70, 24, 64, -92, 40, -28, -10, 38, -6, -6, -44, 50, -24, 98], [96, 62, 46, 90, 38, -36, -82, 70, -82, 2, -78, -84, -42, 92, 32, 54, 44, -50, -90, 94, 6, 38, 40, -6, -76, 98, -64, -90, 80, -2, -20, 28, 94, -52, 38, -38, 12, -78, -32, -64], [-28, -32, 66, 44, 28, 60, 58, 70, -56, 8, -82, 78, -94, -74, 60, 36, 64, 48, 60, -60, 82, 44, 52, -38, 26, -36, -90, -94, 44, 74, 84, 28, 76, 46, 4, 64, 16, 44, 72, 48], [28, 92, -64, 80, -84, 18, -82, 8, -28, -60, -50, 66, 76, 96, -54, 54, -4, -80, 72, 2, 74, -64, -48, 34, 6, -56, 6, 86, -26, -68, -30, -18, 70, 14, -70, -78, 68, 86, 40, -86], [58, 78, 76, -4, -68, 76, -10, -68, -78, -48, -82, -46, -80, -40, 42, 36, 96, 32, -10, -90, 6, -22, 22, -52, 32, 16, -58, -52, -78, -4, -54, -86, -16, 78, -66, -16, 68, 6, 66, -84], [-58, 30, 62, 70, -38, -22, -68, 98, -62, -54, 80, -38, -90, 38, -8, -36, -52, 48, -2, 82, -78, -72, -6, 96, 44, -34, 90, -2, 30, 92, 40, -18, -76, 46, -60, 36, 90, -54, 56, -24], [84, 34, -20, 4, 0, 80, 70, -82, -74, -12, -24, 72, 30, 16, 62, -44, 50, -64, 98, 58, 74, -64, -34, 82, -24, 20, 22, -34, 74, 4, 52, -8, 26, -8, 74, -26, 34, 60, 40, -24], [-46, -54, 22, 20, 70, -8, 32, 98, 94, 34, -94, -40, 24, 98, -56, 12, -28, 58, 84, -86, 98, 80, -40, -54, -30, 16, 6, 74, 72, -98, 78, -98, -62, 70, 40, -90, 82, 68, -36, -12], [26, -54, 66, 50, -78, -66, -18, 78, -78, -24, 22, 14, -42, -10, 34, -82, 36, 94, -98, 60, 52, 46, -60, -52, -42, -64, 94, -18, 66, -2, -20, -92, -70, 32, 14, 72, 58, 54, -62, 22], [-16, -14, -80, 20, -90, -10, 92, -54, -8, -32, -44, 6, -26, 66, -56, -38, -56, 86, 52, -38, 12, 12, 20, 24, 14, -30, -10, -70, 36, 64, -82, -46, 24, 26, -58, 96, 58, 96, -70, 58], [16, -90, -18, -40, 86, -98, -14, -92, -86, 24, -98, -84, 54, 64, -84, -50, 76, -34, 62, 26, 58, 42, 10, -72, 32, 92, 46, 50, 58, 66, -98, 26, -56, 56, -66, 26, -82, 0, -6, 34], [4, -2, -6, 8, -70, 30, -36, 2, -46, -86, 76, 4, -46, -20, -24, -60, -10, -20, 44, -8, -32, -4, -54, -68, 36, 84, 4, 86, -42, 0, -6, 76, 52, -10, 46, -76, -2, 72, 16, 34], [24, -80, -58, 26, 42, -42, 8, -70, 22, -86, -38, -12, -80, 46, 32, 84, 96, -76, -36, -26, -6, 46, 10, 84, -42, 52, -94, -76, -66, -44, -46, 64, -62, 50, -26, 96, -4, 20, -86, 12], [-42, 78, -32, -98, -86, 2, 54, -30, 68, 24, -40, 66, -92, -66, -48, -30, -98, -96, 88, -92, -40, -24, 52, 70, -54, 66, 18, 96, 22, 26, 46, 6, 76, -54, -74, 0, -82, -56, -60, 0], [-6, -70, 20, -88, 44, 42, 20, 34, -70, 36, 22, 24, 30, -82, 26, 62, -72, -96, 56, -64, 88, -42, 22, 64, 66, -40, 46, 20, -40, -86, 50, 16, 34, -84, -12, -30, -84, 96, -82, -40], [-62, 10, 36, -62, -62, -72, 14, -92, 10, 4, 14, 22, -94, -26, 88, -34, -16, 80, -28, 26, 42, 78, 92, -44, -32, 64, 18, 4, -34, -22, -54, 10, 58, 88, -90, 64, -90, -88, -30, -86], [18, -62, 22, -78, 16, -70, 26, 66, -2, -48, -74, 48, -44, -88, 12, 86, -50, 30, 14, 36, -28, 82, 64, -4, 10, 84, -88, 44, -98, -86, -22, 64, -22, 92, -80, -94, -42, 64, 66, -30], [94, -24, 96, 34, 36, -76, -58, 88, -54, -66, 22, 56, -4, 30, -70, -36, -52, 96, 14, 96, -56, 54, -64, -78, 82, 58, 16, -86, 62, -68, 20, -4, -92, 78, -76, 96, 14, -48, 88, -28], [40, 14, 6, -84, -76, -78, -54, 48, -56, -38, 4, -30, 6, 34, -54, -38, -82, 28, 74, 66, -66, 26, 92, -78, 78, -60, 66, -36, 18, 16, -36, 72, 76, -18, -24, 20, -4, -44, -36, -16], [98, -52, 12, 48, -28, 68, -94, 10, 20, -52, -32, 38, -76, -58, -16, -60, 32, 52, 70, -46, 48, -22, -26, 82, 48, -54, 66, 56, -46, -32, -20, 52, 82, -4, -80, -30, -22, -36, 8, 4], [82, -52, 66, 94, -4, -8, 2, -34, 32, -62, 90, -48, 60, -22, 14, -84, -24, -10, 36, 0, 88, -90, -66, -6, 60, -10, -12, -42, -96, 56, 28, -48, -80, 48, 22, -98, 98, 32, -10, 48], [-54, 2, -68, -46, -38, -46, -80, -62, 50, 12, -80, 0, -64, 4, -92, -64, -52, 64, 24, -46, 4, -98, -92, -90, -68, 88, -98, -54, -74, 50, 28, -30, -4, -48, -88, -44, -86, -10, 66, 64], [-72, 50, -8, 26, 66, -40, 72, -32, -72, 36, 18, 72, 12, 48, 70, -60, 68, 6, 94, -44, -10, -52, 2, -28, 86, 78, 76, 64, 2, -42, -22, 14, -94, 98, -46, -12, 34, -50, 76, 56], [-38, -6, 44, 46, -26, -62, -40, -80, 74, 48, 96, 8, -34, 56, 52, -46, -80, 68, 40, -34, 56, -58, 40, -54, -66, 68, 60, -72, -44, 12, -88, 6, -86, 70, 10, 62, -76, -20, 98, -54], [-86, -88, -24, 0, -96, -82, -34, 2, -84, -40, -2, -30, 92, 16, -42, 74, 40, 30, -34, -98, -34, -6, -46, 40, -78, 72, 74, -56, -82, 18, 60, -68, 60, -16, 88, 16, -28, -2, 84, -88], [66, 96, 92, 18, -58, 16, 18, 4, 18, 22, 42, 48, 14, -6, -60, -76, 62, 54, 40, -22, 76, -96, 6, 44, 24, -80, -26, -70, -90, -88, -62, -68, 22, 16, -32, -70, 22, -8, -70, 44], [-4, 16, -38, 36, 24, 58, 58, 10, -38, -12, -26, -10, 46, -16, -90, -36, -60, -36, 86, -92, 14, 38, 96, -98, -8, 76, -96, 48, -46, 32, -56, -62, -54, 86, -42, -28, 78, 12, 48, 76], [42, 80, 54, -62, 12, -64, 4, -98, -10, -48, -22, 64, 26, -2, -46, -50, 10, 70, 36, -66, 28, -50, 6, -24, 52, 74, 50, -4, -34, 58, 30, -48, 36, 40, 46, -18, 68, 76, 34, -56], [-70, 38, 8, -20, -70, -86, 96, 50, 10, -98, -56, 86, -6, 10, -30, 78, 24, 32, -98, 10, -88, 42, -52, 86, -56, 18, -26, -36, 10, 78, -96, -68, -38, -58, -8, -94, -74, 50, 50, -32], [-2, 6, -30, -4, 2, 42, -98, -66, -92, 52, 68, 96, 80, -68, -4, -96, 90, -56, -50, -30, 2, -40, -48, 44, 20, -22, -8, 36, 66, 30, -26, 0, 6, 80, 78, 2, 60, -72, 4, 94], [28, 52, -16, 80, 72, -54, -76, 0, 62, 32, -40, 32, -40, -72, 52, 24, -4, -80, -94, -46, 54, -54, -32, -76, -62, 78, -60, 72, -58, -86, -24, 46, 20, 90, -54, 38, 36, 64, 26, 60], [-18, -72, 82, -6, 66, 60, 14, 64, 6, 6, -58, -68, 22, 98, -28, 94, -58, -70, -10, 12, 84, 26, -38, 34, -42, -50, -38, 80, -42, 42, 74, -64, 56, -78, 42, -76, -10, -16, 54, 66], [-92, 82, -88, -70, -94, 82, 20, 78, 96, -2, -28, -18, -34, 32, -14, -86, -46, -58, 92, -80, 40, 48, 28, 30, 36, -92, 8, -18, -6, -90, 76, 88, -2, -12, -78, 90, 78, 12, -2, -6], [-52, -68, 72, 58, 52, 16, -68, 6, 50, -44, 96, -8, 66, -8, 68, -90, -24, -50, -42, -44, 60, -90, -46, -86, -52, 90, 96, -82, 66, 14, -4, 34, 8, 66, 6, 50, -52, 62, 60, 50], [-56, -58, -92, -6, 38, -54, 64, 32, 48, -68, 36, -34, 34, -50, 24, -80, -18, -44, -60, -64, -22, 72, 20, -30, -92, 46, 90, 92, -84, 88, -26, -42, -98, -98, 28, -92, 30, -30, -86, 10]],31,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],22,),
([[47, 81], [14, 25]],1,),
([[-38, 30], [-80, 94]],1,),
([[1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0], [1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1], [1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1], [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0], [1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0], [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1], [0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1], [0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1], [0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0], [1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1], [1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1], [0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1], [1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], [1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0], [1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], [1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1], [1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0]],31,),
([[1, 6, 6, 8, 8, 15, 19, 21, 22, 26, 29, 30, 31, 32, 35, 37, 37, 40, 41, 41, 44, 46, 48, 52, 54, 54, 55, 60, 61, 61, 67, 68, 76, 77, 78, 80, 80, 81, 81, 81, 82, 83, 85, 87, 89, 91, 97, 97], [4, 5, 6, 8, 9, 13, 14, 19, 22, 23, 29, 29, 30, 35, 36, 36, 39, 40, 41, 43, 43, 44, 45, 46, 46, 51, 51, 53, 55, 57, 58, 59, 60, 60, 61, 64, 65, 68, 69, 70, 70, 75, 76, 78, 81, 82, 88, 92], [4, 5, 5, 8, 17, 18, 19, 19, 20, 20, 21, 21, 22, 23, 29, 29, 31, 32, 33, 33, 33, 34, 38, 43, 44, 45, 47, 58, 61, 66, 72, 72, 72, 74, 75, 76, 78, 78, 80, 83, 85, 86, 88, 92, 92, 96, 97, 99], [1, 3, 4, 6, 8, 9, 14, 14, 15, 15, 16, 18, 18, 20, 21, 21, 23, 23, 24, 27, 32, 33, 35, 35, 36, 43, 44, 44, 45, 47, 48, 50, 51, 51, 55, 55, 55, 55, 66, 67, 67, 70, 86, 88, 92, 93, 94, 99], [1, 2, 4, 7, 10, 10, 11, 13, 13, 15, 16, 17, 22, 31, 32, 35, 36, 37, 37, 41, 41, 43, 45, 46, 47, 50, 51, 51, 54, 55, 58, 64, 67, 68, 70, 72, 73, 76, 77, 82, 83, 84, 84, 85, 85, 89, 93, 94], [3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 15, 16, 17, 17, 21, 21, 23, 25, 26, 27, 29, 30, 32, 36, 40, 41, 43, 43, 49, 49, 57, 57, 61, 62, 68, 71, 73, 75, 81, 84, 89, 91, 92, 94, 95, 97, 97], [1, 1, 4, 16, 16, 16, 19, 24, 26, 26, 28, 31, 33, 34, 34, 35, 36, 37, 40, 52, 54, 56, 57, 62, 64, 64, 66, 70, 71, 72, 72, 73, 73, 74, 78, 81, 81, 83, 83, 85, 88, 90, 92, 93, 93, 94, 98, 99], [2, 4, 6, 8, 8, 9, 11, 14, 15, 17, 17, 20, 21, 22, 22, 28, 29, 30, 31, 31, 32, 36, 44, 47, 50, 50, 55, 59, 62, 62, 63, 66, 67, 70, 76, 76, 76, 78, 80, 80, 81, 83, 84, 86, 88, 91, 95, 97], [4, 6, 8, 10, 11, 13, 17, 17, 21, 22, 33, 33, 37, 41, 43, 45, 47, 48, 51, 52, 53, 58, 58, 58, 58, 58, 63, 65, 66, 67, 67, 68, 69, 71, 73, 75, 80, 81, 82, 82, 83, 89, 89, 94, 95, 97, 98, 99], [3, 5, 10, 11, 11, 12, 13, 17, 17, 18, 20, 23, 23, 24, 27, 31, 31, 34, 39, 39, 39, 43, 43, 44, 45, 46, 50, 51, 53, 55, 60, 61, 64, 68, 75, 75, 76, 78, 81, 82, 83, 86, 88, 93, 93, 96, 96, 98], [2, 2, 3, 6, 7, 13, 16, 21, 23, 23, 23, 24, 29, 30, 32, 35, 36, 36, 38, 39, 39, 39, 41, 42, 42, 44, 46, 51, 51, 52, 53, 64, 71, 73, 74, 80, 81, 84, 86, 86, 93, 94, 96, 96, 96, 96, 97, 99], [2, 4, 5, 12, 14, 16, 20, 22, 25, 26, 33, 34, 35, 35, 36, 40, 44, 49, 50, 50, 51, 51, 51, 52, 55, 58, 58, 59, 60, 61, 62, 64, 66, 66, 66, 72, 75, 76, 81, 82, 82, 84, 86, 89, 92, 93, 93, 96], [1, 2, 2, 3, 4, 5, 6, 7, 11, 13, 13, 15, 19, 20, 23, 26, 27, 29, 30, 30, 38, 39, 40, 40, 41, 43, 53, 57, 65, 70, 71, 78, 78, 79, 80, 81, 82, 82, 83, 87, 87, 93, 93, 96, 96, 97, 97, 98], [4, 11, 12, 18, 18, 21, 21, 27, 27, 28, 29, 33, 34, 37, 40, 41, 41, 45, 55, 56, 56, 57, 58, 58, 63, 63, 65, 65, 66, 68, 68, 69, 69, 73, 74, 78, 80, 82, 83, 83, 85, 87, 89, 92, 95, 95, 96, 97], [1, 4, 7, 7, 14, 15, 22, 24, 24, 27, 30, 32, 33, 34, 39, 39, 40, 41, 44, 48, 56, 56, 58, 59, 61, 61, 62, 63, 64, 65, 68, 69, 70, 72, 78, 78, 80, 80, 82, 83, 83, 84, 86, 87, 92, 93, 94, 94], [1, 1, 4, 5, 6, 6, 7, 9, 10, 10, 14, 16, 17, 19, 21, 24, 26, 30, 31, 32, 37, 37, 38, 40, 45, 49, 52, 52, 54, 54, 61, 61, 65, 67, 70, 72, 78, 79, 80, 82, 84, 85, 87, 88, 88, 92, 94, 97], [3, 6, 10, 10, 11, 12, 12, 13, 14, 15, 16, 18, 21, 23, 25, 27, 27, 27, 27, 30, 33, 35, 40, 41, 44, 48, 50, 50, 51, 52, 54, 54, 55, 58, 58, 58, 59, 62, 65, 69, 72, 72, 74, 74, 76, 79, 80, 98], [1, 2, 4, 4, 4, 5, 6, 7, 9, 9, 10, 12, 22, 23, 24, 26, 26, 28, 33, 35, 35, 38, 42, 44, 48, 48, 52, 54, 56, 60, 63, 68, 68, 68, 72, 75, 77, 79, 79, 82, 85, 88, 89, 91, 91, 91, 92, 93], [1, 8, 11, 13, 22, 23, 23, 26, 30, 31, 33, 34, 35, 35, 37, 39, 40, 44, 46, 46, 46, 47, 47, 47, 54, 59, 60, 60, 61, 62, 64, 66, 69, 74, 75, 77, 78, 79, 79, 82, 83, 86, 87, 92, 94, 96, 99, 99], [1, 1, 3, 8, 11, 14, 19, 20, 20, 20, 21, 24, 25, 25, 28, 34, 37, 38, 38, 39, 40, 47, 53, 54, 56, 57, 58, 62, 65, 69, 70, 70, 71, 71, 73, 76, 78, 78, 81, 84, 87, 92, 94, 94, 94, 96, 98, 99], [3, 4, 4, 15, 19, 21, 23, 26, 30, 31, 32, 34, 35, 37, 38, 41, 46, 46, 46, 51, 52, 53, 58, 63, 65, 68, 68, 68, 69, 70, 70, 70, 71, 72, 73, 74, 75, 75, 77, 80, 81, 84, 84, 86, 96, 96, 96, 98], [3, 4, 8, 9, 9, 11, 16, 19, 19, 20, 20, 23, 27, 27, 28, 30, 31, 34, 36, 40, 41, 43, 45, 46, 53, 53, 55, 58, 58, 59, 62, 63, 64, 65, 68, 68, 71, 72, 75, 78, 80, 87, 87, 88, 89, 94, 97, 99], [1, 3, 3, 10, 12, 12, 12, 12, 13, 15, 17, 18, 22, 24, 24, 28, 29, 31, 33, 33, 34, 34, 40, 43, 44, 48, 48, 49, 51, 55, 60, 63, 67, 68, 70, 72, 73, 75, 75, 77, 82, 85, 88, 91, 93, 94, 95, 98], [6, 6, 7, 8, 9, 14, 15, 18, 19, 26, 28, 28, 28, 30, 31, 33, 33, 36, 38, 39, 43, 44, 46, 48, 56, 57, 57, 60, 60, 61, 67, 69, 70, 71, 73, 74, 79, 80, 82, 84, 86, 86, 90, 92, 94, 95, 96, 98], [2, 2, 3, 9, 10, 14, 15, 15, 16, 19, 25, 26, 28, 31, 32, 33, 33, 34, 35, 41, 41, 42, 42, 43, 48, 48, 58, 59, 61, 66, 66, 69, 72, 73, 77, 78, 79, 79, 83, 86, 88, 92, 92, 92, 92, 95, 96, 97], [1, 6, 7, 8, 11, 14, 15, 16, 16, 18, 23, 23, 24, 25, 28, 29, 31, 32, 36, 38, 38, 41, 42, 43, 44, 46, 55, 55, 56, 59, 62, 64, 67, 69, 69, 70, 71, 72, 76, 81, 84, 86, 86, 87, 87, 89, 94, 95], [3, 3, 6, 10, 11, 15, 16, 18, 18, 27, 28, 28, 30, 30, 33, 34, 35, 35, 39, 43, 45, 48, 50, 51, 52, 53, 55, 62, 62, 62, 67, 68, 69, 70, 71, 72, 74, 74, 80, 81, 84, 85, 85, 86, 88, 88, 88, 96], [1, 2, 4, 5, 5, 5, 6, 12, 14, 14, 16, 16, 19, 28, 28, 29, 30, 32, 35, 36, 38, 39, 41, 47, 52, 57, 58, 58, 62, 64, 66, 71, 75, 76, 80, 81, 82, 83, 84, 85, 86, 87, 90, 91, 93, 96, 97, 98], [4, 7, 8, 10, 11, 12, 14, 17, 19, 19, 20, 24, 24, 28, 29, 29, 31, 31, 32, 33, 35, 36, 40, 42, 43, 47, 49, 53, 53, 53, 54, 54, 58, 58, 61, 64, 67, 72, 74, 79, 80, 80, 84, 86, 91, 91, 96, 97], [2, 4, 6, 6, 11, 12, 17, 19, 20, 21, 25, 26, 29, 30, 30, 31, 32, 39, 42, 42, 47, 48, 48, 49, 49, 49, 51, 55, 56, 59, 62, 65, 67, 67, 68, 68, 69, 73, 73, 76, 79, 82, 86, 87, 87, 88, 98, 98], [2, 3, 5, 7, 8, 16, 17, 18, 29, 29, 30, 31, 32, 33, 36, 38, 38, 40, 43, 45, 47, 56, 58, 59, 61, 63, 65, 65, 67, 68, 68, 69, 73, 74, 78, 80, 81, 82, 82, 89, 91, 92, 92, 94, 96, 97, 97, 98], [4, 8, 11, 12, 14, 15, 24, 27, 29, 32, 33, 36, 37, 38, 42, 46, 46, 47, 47, 49, 50, 53, 58, 58, 61, 64, 64, 65, 68, 69, 73, 74, 76, 79, 79, 82, 83, 84, 85, 89, 89, 90, 95, 95, 95, 97, 99, 99], [3, 3, 3, 5, 6, 7, 10, 13, 14, 16, 18, 23, 25, 26, 27, 31, 31, 35, 38, 41, 44, 46, 52, 57, 58, 62, 63, 63, 63, 64, 68, 69, 71, 72, 72, 76, 76, 78, 80, 83, 83, 88, 89, 90, 92, 94, 95, 98], [3, 8, 11, 15, 15, 26, 27, 29, 30, 32, 32, 37, 39, 42, 47, 49, 52, 52, 52, 53, 53, 54, 54, 59, 60, 61, 61, 62, 64, 65, 66, 66, 67, 67, 68, 69, 73, 74, 77, 79, 90, 90, 91, 91, 95, 98, 99, 99], [2, 4, 6, 8, 9, 10, 11, 15, 15, 16, 20, 21, 22, 23, 25, 26, 27, 27, 36, 38, 42, 45, 47, 47, 51, 53, 53, 55, 57, 59, 59, 62, 65, 66, 72, 73, 76, 82, 82, 83, 88, 90, 90, 91, 95, 96, 99, 99], [1, 2, 3, 6, 6, 7, 11, 16, 17, 19, 20, 23, 24, 24, 26, 28, 31, 33, 36, 37, 38, 39, 40, 40, 44, 46, 46, 51, 51, 53, 62, 62, 63, 64, 68, 69, 70, 73, 78, 78, 85, 87, 90, 91, 93, 93, 95, 98], [3, 9, 9, 11, 14, 16, 17, 18, 18, 22, 22, 25, 29, 30, 34, 35, 37, 37, 41, 42, 43, 45, 45, 52, 54, 55, 55, 57, 63, 64, 65, 68, 69, 70, 70, 71, 74, 75, 75, 77, 86, 86, 87, 93, 94, 95, 95, 99], [1, 3, 3, 10, 13, 14, 15, 18, 19, 20, 22, 23, 24, 25, 26, 32, 34, 40, 41, 41, 41, 44, 44, 46, 53, 57, 57, 59, 60, 61, 62, 63, 64, 70, 72, 72, 77, 78, 86, 88, 90, 92, 92, 93, 93, 94, 95, 98], [2, 4, 5, 7, 17, 20, 20, 21, 24, 24, 25, 25, 27, 28, 29, 29, 33, 35, 35, 35, 37, 38, 43, 43, 45, 48, 49, 52, 53, 59, 62, 64, 65, 70, 71, 72, 72, 75, 75, 86, 88, 89, 89, 91, 91, 93, 96, 97], [5, 6, 6, 9, 13, 16, 17, 18, 20, 21, 25, 26, 26, 31, 34, 43, 44, 45, 45, 47, 48, 48, 51, 51, 54, 56, 56, 57, 61, 61, 66, 67, 69, 69, 70, 72, 76, 76, 81, 83, 85, 90, 96, 96, 97, 98, 98, 99], [3, 4, 5, 6, 12, 13, 14, 14, 18, 20, 22, 24, 32, 35, 38, 38, 39, 41, 44, 48, 51, 52, 54, 55, 55, 57, 58, 59, 60, 60, 62, 64, 66, 69, 69, 74, 74, 76, 78, 79, 81, 82, 82, 82, 85, 86, 91, 99], [2, 6, 7, 8, 10, 14, 15, 15, 16, 16, 18, 21, 24, 30, 31, 32, 37, 38, 39, 41, 42, 42, 44, 45, 50, 51, 52, 53, 59, 60, 61, 61, 67, 67, 72, 73, 74, 75, 77, 79, 81, 88, 90, 91, 95, 95, 97, 98], [2, 3, 4, 7, 7, 7, 9, 15, 17, 18, 19, 20, 22, 24, 26, 26, 28, 29, 33, 36, 39, 40, 42, 43, 45, 49, 58, 61, 68, 68, 71, 75, 75, 75, 75, 76, 77, 78, 79, 80, 83, 86, 91, 94, 95, 98, 99, 99], [5, 6, 7, 10, 10, 11, 12, 14, 17, 19, 20, 24, 29, 31, 32, 35, 41, 44, 47, 47, 49, 50, 54, 57, 60, 61, 64, 66, 69, 70, 71, 72, 75, 75, 75, 77, 80, 81, 82, 88, 88, 90, 94, 97, 97, 97, 98, 99], [1, 1, 4, 6, 6, 7, 8, 11, 11, 14, 17, 18, 20, 21, 25, 29, 31, 31, 32, 38, 40, 41, 42, 44, 44, 45, 46, 51, 52, 58, 61, 62, 66, 67, 73, 74, 76, 79, 82, 84, 85, 86, 87, 90, 91, 92, 94, 97], [1, 1, 3, 4, 7, 7, 10, 11, 12, 13, 16, 24, 24, 27, 28, 29, 34, 36, 38, 39, 39, 42, 45, 48, 55, 57, 60, 62, 62, 63, 63, 69, 72, 76, 77, 78, 81, 81, 82, 83, 90, 93, 94, 94, 96, 98, 99, 99], [1, 1, 1, 1, 2, 2, 3, 7, 8, 14, 14, 19, 19, 23, 23, 25, 26, 27, 31, 43, 48, 49, 49, 50, 51, 51, 52, 55, 56, 57, 57, 57, 59, 62, 63, 63, 67, 71, 74, 74, 74, 76, 81, 84, 85, 87, 98, 98], [1, 1, 5, 9, 10, 12, 16, 18, 19, 20, 23, 26, 28, 35, 35, 36, 37, 40, 41, 41, 44, 44, 54, 57, 59, 60, 60, 60, 61, 63, 67, 74, 76, 79, 79, 84, 85, 86, 89, 89, 90, 91, 92, 92, 92, 95, 96, 98]],35,),
([[-18, -22, 0, 40, 84, 14, -90, 8, -52, 70, 24, 92, -22, 92, -38, -78, 76, 70, -6, -34, 68, -92, -58, -58, -58, -90, -76, 62, -46, -22, 6], [-78, 0, -42, -10, 94, -78, 26, 28, 30, 34, -68, -68, 52, 70, 86, -54, 42, 60, -34, 14, 36, 30, -64, -48, -76, -36, -78, 66, 18, 96, 2], [62, -88, 90, -32, -40, 56, 18, 96, 72, -50, 20, 72, 64, -82, 30, 66, -32, 16, 64, 96, -82, 72, -94, -48, 14, 60, 6, -78, 44, -80, 22], [-42, -86, -16, -62, 4, -30, 46, 10, 94, -12, 14, 96, -62, 68, 72, 68, -58, 2, 26, -12, 2, -16, 32, 26, 92, 64, -62, -80, -70, 76, -14], [96, 78, -4, -34, -88, 34, 50, 0, 46, 94, 14, 26, 58, -14, 82, 24, 86, 74, -8, 50, 54, -66, 46, -80, 20, 74, 2, -68, 92, -96, -2], [74, -70, -36, 76, 90, 50, 74, 78, 12, 40, 0, -8, -18, -34, -66, 86, 48, 44, 18, 96, -66, 48, 0, -36, 72, -40, 50, -32, -2, -50, 78], [18, -80, 70, -16, 34, -54, -94, -40, 60, -4, -50, -44, -56, -68, 22, -12, 54, 10, 90, -76, -28, 76, 72, -2, -78, 34, -24, 14, -80, -86, 68], [16, -88, 82, -48, -90, 36, 56, -80, -44, 40, 18, -84, -30, 40, -48, 52, 74, 18, 84, 92, 76, -26, -8, -4, 32, -92, 10, -88, -74, -58, -56], [22, 98, 12, 44, 30, 70, -60, 62, -78, -60, 80, -96, 46, 8, 26, 54, 20, -58, 80, -36, 44, -20, 18, 36, -22, 50, 90, 64, -56, 4, -28], [-6, -18, -92, -68, 20, -22, -60, -50, -72, 64, -50, 76, -36, 40, -30, 64, 96, 2, -82, 52, -50, 20, 34, 52, -24, -14, 96, 76, -48, -6, -98], [-60, 48, -82, -38, -26, 98, 56, 98, 78, -82, -92, -70, 56, -80, -46, -96, -10, -70, -88, 92, -54, 16, 88, -26, -74, 34, -56, 54, -52, 2, 72], [16, 82, -70, 42, -40, 38, 48, -86, -28, 46, -40, -30, -54, 58, 94, -54, -88, 46, 42, 84, 58, -74, 94, -2, 72, -50, 72, 36, 26, 50, -80], [-80, -34, 16, 20, -72, 86, 22, 82, -64, -38, -24, -82, -30, 2, 32, 18, -88, 82, 0, 90, -36, -92, 50, -30, -72, -20, 74, -14, -42, 52, 66], [40, 54, 42, -34, -20, 18, 88, -32, -52, -40, -8, 8, 60, 0, 22, 94, -96, -72, -76, -18, 60, -52, -98, -92, 30, 66, 76, -38, -38, 24, 70], [-82, -60, 86, 98, -42, -12, -92, -78, 92, -90, 54, 0, 8, 98, 50, 80, -24, 20, -86, 56, -86, 38, 6, -44, -24, -2, 16, -50, 36, 10, 98], [-34, 92, -52, -72, -54, 64, -48, -46, 88, -28, -56, 92, -8, -18, -70, -48, -2, -42, -76, -62, -34, 8, -22, -4, -12, -14, -26, -46, 40, 12, -84], [50, 70, -52, -86, 50, 36, -18, -82, -12, -74, -90, 14, 18, -10, 80, 24, -22, -10, -30, 92, 70, 60, 16, -18, 10, 2, 2, 18, 44, -72, -72], [54, -66, 22, 76, -34, 68, -36, -50, -32, -20, -70, 44, 56, 88, -12, -32, 42, -30, 90, -88, 30, -10, -28, -16, 40, -58, 12, -70, 12, -24, 74], [48, -36, -52, -36, 8, -20, -60, 64, 50, 94, -64, -74, -70, 40, -80, 46, 22, 94, -52, -58, -76, -36, -76, 92, -76, -92, -64, -78, -2, -20, 62], [-30, 34, 74, -48, -56, -18, -8, 88, 18, 80, -72, -52, -52, 82, -20, 58, 58, -50, 68, 26, 18, 34, -86, -8, 40, 42, 12, 92, -14, -4, -78], [-18, -80, 66, 66, -14, 16, 26, -24, 32, 24, 58, 0, 36, -76, -48, 36, 88, -18, 42, -4, 2, 48, -90, -84, 2, 92, 78, 92, -62, 4, 72], [90, -56, -48, -68, 70, -2, -94, -52, -12, 2, 64, 12, -70, 18, 28, -98, -80, 48, 34, -58, 24, 6, -60, -54, -70, 96, 88, 38, 42, -40, 18], [-2, -48, 32, 62, -42, 70, -10, -42, 20, 88, 44, -12, -46, -10, -96, 18, 44, -46, 90, -6, 74, 88, 8, -42, 26, -10, 84, -28, -12, -88, -98], [56, -64, -4, 32, 98, 12, 82, -46, 80, 16, -32, 54, 54, -28, -56, 54, 88, -46, 68, -74, 24, 4, 96, -84, 86, 14, -66, 12, -64, -86, 10], [26, -50, 72, -2, -50, -88, 96, -24, 48, 96, 26, 24, 46, 80, -70, -84, -30, 64, 44, -86, 24, -20, 12, 96, -26, 42, 88, -44, -54, -84, -66], [-28, 90, -66, 46, 16, -84, 22, -62, 20, -26, 22, 86, 40, -2, -36, 60, 90, 14, -24, 32, 66, 32, 12, 92, 22, -82, -96, 20, -64, 16, -22], [26, -80, 12, -42, -80, 72, -10, 42, 26, -32, 56, 96, -34, -14, -28, 62, -58, -36, -24, -22, -86, -48, -28, 48, -26, 26, 38, 10, -42, -8, -26], [-76, 22, 60, 88, 38, 44, -62, -68, -96, -64, 12, 42, 94, 10, 90, 68, -44, 74, -28, -86, -20, -22, -60, -78, -20, 68, -32, -40, 12, -64, 82], [60, -66, -14, -90, 40, 26, 52, -70, 92, -64, 68, 6, -84, -32, -90, -30, 18, -68, -50, 68, 54, 24, -68, -92, -32, -40, -30, 78, 60, -94, -48], [-14, -2, 72, 70, 2, 24, -54, 14, 98, -2, 70, 24, -60, -28, -72, -36, -50, -12, 60, -98, -80, -46, -88, 28, -74, -94, -28, 92, 30, -38, -8], [-78, 26, -94, -24, 14, 80, 60, -80, -28, 86, 4, 54, 88, -34, 4, -44, 18, -96, 18, -28, 90, 88, 42, 8, 66, 24, 0, -70, -78, -64, -20]],29,),
([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],38,),
([[91, 17, 91, 54, 63, 43, 59, 7, 5, 73, 55, 46, 78, 60, 96, 32, 22, 66, 40, 34, 2, 48, 97, 26, 34, 17, 56, 88, 69, 30, 52, 87, 98], [84, 89, 34, 38, 49, 47, 99, 97, 48, 75, 43, 13, 7, 21, 76, 88, 18, 29, 86, 94, 89, 1, 40, 87, 94, 33, 12, 87, 38, 46, 54, 56, 79], [24, 21, 46, 88, 21, 31, 78, 91, 69, 62, 88, 88, 49, 37, 21, 30, 71, 57, 48, 1, 63, 46, 78, 80, 10, 57, 52, 31, 90, 13, 16, 12, 67], [48, 3, 74, 98, 23, 56, 27, 66, 4, 38, 14, 29, 20, 9, 84, 72, 25, 18, 98, 21, 37, 9, 34, 16, 42, 11, 14, 73, 4, 79, 22, 63, 37], [73, 26, 87, 85, 18, 14, 96, 87, 71, 41, 67, 71, 69, 61, 19, 8, 31, 64, 28, 6, 20, 1, 50, 9, 13, 42, 41, 99, 43, 75, 24, 34, 67], [40, 92, 49, 22, 85, 79, 3, 12, 66, 91, 64, 88, 85, 56, 1, 58, 2, 49, 46, 3, 69, 47, 39, 64, 97, 72, 36, 6, 97, 67, 47, 81, 50], [10, 22, 88, 26, 66, 41, 29, 55, 34, 86, 35, 31, 13, 31, 26, 5, 72, 45, 93, 86, 99, 99, 87, 91, 80, 40, 89, 44, 20, 33, 55, 42, 19], [88, 43, 80, 48, 35, 35, 80, 57, 89, 64, 10, 33, 55, 6, 76, 64, 59, 65, 62, 23, 32, 78, 45, 87, 41, 96, 54, 44, 82, 63, 14, 76, 34], [40, 32, 33, 4, 36, 81, 35, 1, 35, 22, 98, 37, 69, 69, 8, 4, 33, 61, 80, 37, 73, 45, 18, 17, 7, 38, 90, 59, 98, 20, 79, 21, 67], [15, 71, 7, 16, 55, 43, 65, 61, 11, 69, 87, 34, 62, 4, 30, 6, 10, 27, 22, 28, 18, 3, 28, 52, 58, 87, 70, 74, 66, 25, 68, 46, 73], [34, 89, 5, 16, 91, 93, 86, 19, 95, 4, 3, 71, 34, 25, 96, 86, 60, 86, 90, 72, 88, 2, 29, 91, 66, 92, 60, 34, 81, 22, 56, 90, 31], [83, 43, 58, 84, 38, 98, 3, 17, 5, 48, 50, 9, 84, 85, 1, 16, 23, 57, 30, 59, 47, 1, 59, 33, 33, 86, 82, 29, 2, 3, 2, 53, 57], [62, 77, 77, 80, 62, 72, 4, 41, 10, 97, 32, 85, 35, 70, 10, 18, 33, 93, 97, 96, 14, 54, 86, 31, 65, 45, 31, 3, 56, 85, 20, 35, 10], [54, 24, 10, 51, 45, 90, 47, 83, 6, 32, 60, 58, 74, 7, 15, 62, 47, 94, 99, 48, 12, 80, 13, 66, 52, 19, 62, 13, 7, 79, 20, 34, 44], [25, 76, 25, 5, 39, 26, 50, 69, 39, 35, 90, 80, 33, 78, 80, 62, 62, 35, 96, 67, 57, 44, 22, 52, 80, 6, 78, 24, 84, 64, 67, 3, 90], [10, 10, 92, 4, 17, 49, 6, 65, 56, 2, 46, 57, 4, 37, 37, 65, 18, 65, 92, 24, 36, 98, 86, 6, 63, 64, 9, 77, 40, 64, 32, 14, 67], [36, 12, 98, 90, 96, 94, 17, 26, 83, 26, 16, 89, 29, 98, 2, 59, 78, 14, 51, 40, 84, 1, 83, 50, 97, 65, 68, 20, 20, 48, 80, 15, 87], [26, 1, 56, 67, 76, 38, 19, 29, 90, 58, 62, 77, 12, 92, 22, 49, 44, 83, 84, 51, 25, 9, 61, 69, 1, 2, 83, 20, 34, 38, 70, 2, 32], [54, 28, 21, 94, 62, 51, 60, 43, 76, 13, 1, 45, 5, 84, 52, 21, 38, 39, 89, 9, 67, 56, 93, 45, 38, 79, 95, 42, 70, 68, 15, 52, 44], [46, 34, 89, 97, 46, 41, 55, 63, 5, 91, 95, 40, 3, 31, 65, 53, 35, 42, 8, 75, 24, 31, 59, 19, 84, 79, 60, 91, 63, 99, 83, 75, 23], [52, 96, 12, 22, 5, 84, 10, 69, 56, 10, 74, 27, 85, 92, 96, 77, 75, 89, 26, 81, 18, 73, 83, 37, 43, 4, 74, 39, 29, 75, 98, 91, 34], [74, 23, 95, 17, 90, 40, 71, 6, 98, 80, 53, 52, 48, 19, 40, 38, 14, 13, 24, 90, 25, 96, 51, 10, 38, 89, 16, 85, 51, 46, 84, 94, 50], [72, 34, 29, 54, 13, 1, 91, 39, 55, 7, 69, 60, 72, 10, 88, 35, 37, 62, 73, 5, 2, 15, 76, 4, 99, 5, 31, 19, 65, 29, 62, 82, 14], [70, 95, 44, 52, 30, 12, 29, 54, 6, 6, 61, 32, 5, 16, 53, 2, 16, 2, 85, 81, 63, 50, 2, 23, 41, 32, 61, 61, 64, 53, 22, 63, 92], [95, 62, 20, 58, 14, 38, 81, 30, 11, 59, 93, 72, 69, 73, 17, 15, 41, 81, 58, 84, 59, 73, 89, 15, 62, 81, 79, 76, 72, 82, 12, 42, 4], [46, 61, 24, 78, 8, 36, 91, 60, 87, 15, 35, 77, 14, 30, 64, 25, 16, 3, 57, 95, 14, 89, 30, 87, 47, 39, 90, 25, 82, 27, 85, 65, 81], [23, 53, 6, 29, 53, 66, 38, 15, 78, 59, 47, 91, 13, 12, 96, 8, 93, 65, 9, 85, 12, 55, 11, 89, 91, 6, 24, 56, 55, 98, 23, 78, 76], [78, 15, 32, 58, 70, 69, 8, 51, 64, 42, 79, 24, 73, 8, 38, 21, 18, 31, 89, 60, 60, 17, 87, 62, 56, 94, 59, 83, 39, 63, 72, 45, 41], [16, 71, 94, 55, 37, 40, 84, 88, 62, 15, 26, 52, 36, 31, 20, 70, 89, 1, 52, 15, 77, 12, 79, 26, 2, 75, 10, 53, 27, 63, 55, 76, 50], [42, 65, 39, 23, 69, 31, 84, 47, 68, 53, 28, 7, 10, 54, 62, 37, 61, 82, 24, 29, 69, 44, 44, 34, 95, 44, 31, 7, 21, 9, 64, 51, 20], [33, 74, 71, 30, 98, 92, 74, 50, 90, 23, 8, 90, 81, 38, 5, 12, 65, 22, 99, 44, 30, 1, 81, 82, 33, 13, 47, 52, 17, 88, 40, 91, 89], [69, 97, 51, 49, 71, 2, 43, 7, 51, 86, 25, 74, 91, 55, 42, 23, 83, 55, 73, 53, 55, 75, 93, 75, 69, 81, 6, 75, 2, 66, 51, 37, 19], [65, 39, 98, 7, 42, 20, 34, 4, 22, 20, 26, 80, 56, 70, 7, 95, 87, 49, 19, 17, 58, 65, 29, 22, 26, 15, 28, 93, 9, 16, 75, 76, 78]],20,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 1,490.588235
| 9,104
| 0.416127
| 20,247
| 76,020
| 1.560972
| 0.007458
| 0.208575
| 0.270337
| 0.324252
| 0.986173
| 0.985414
| 0.985414
| 0.985414
| 0.985414
| 0.985414
| 0
| 0.560356
| 0.26889
| 76,020
| 51
| 9,105
| 1,490.588235
| 0.008295
| 0.002434
| 0
| 0.536585
| 0
| 0
| 0.000884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0
| 0
| 0.02439
| 0.073171
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
c7c76bdaf39bda5362e3c9fd86a3d8249a97be09
| 192
|
py
|
Python
|
server/__init__.py
|
tetelevm/OrdeRPG
|
5bea9fbaf3fdd84ab14f7e3033e18eead2cf30ab
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
tetelevm/OrdeRPG
|
5bea9fbaf3fdd84ab14f7e3033e18eead2cf30ab
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
tetelevm/OrdeRPG
|
5bea9fbaf3fdd84ab14f7e3033e18eead2cf30ab
|
[
"MIT"
] | null | null | null |
"""
The script responsible for building everything from the server part of
the project.
"""
from server.settings import *
from server.db import *
from server.framework.start_script import *
| 19.2
| 70
| 0.776042
| 27
| 192
| 5.481481
| 0.592593
| 0.202703
| 0.216216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151042
| 192
| 9
| 71
| 21.333333
| 0.907975
| 0.432292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1be4aac76ba1754db6a6b6efbf037b9b90b7fd22
| 4,571
|
py
|
Python
|
python/lib/routines/locate_star_settings.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/routines/locate_star_settings.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/routines/locate_star_settings.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
from .. import *
from ..lib_care.measure.level_sets import compute_self_consistent_astar_rstar
def routine_locate_star_settings(df,wjr):
#compute the contour values for fk_pbc and lr_pbc and record the intersections in df_star
x1_col='r'
x2_col='varkappa'
nsamples=1000
navg=50
mode='spline'#'linear'
printing=False
# D=2
# D_lst=[2,0.7,14]
D_lst=sorted(set(df.D.values))[::-1]
print(f"D_lst={D_lst}")
kappa_lst=sorted(set(df.kappa.values))[::-1]
print(f"iterating over kappa_lst={kappa_lst} and the PBC full model results...")
dict_out_lst=[]
#FK model
model_name='fk_pbc'
for D in D_lst:
for kappa in kappa_lst:
query = (df['D']==D)
query&= (df['kappa']==kappa)
query&= query_template
X=df.loc[query,[x1_col,x2_col]].values
#computation of level set curves set to powerlaw fits from the full models
if printing:
print(f"for the {model_name} model, when kappa={kappa:.0f} Hz and D={D:.2f} cm^2/s...")
output_col='m'
m=float(wjr[model_name][output_col])
y=df.loc[query,output_col].values
try:
contour_m_values=comp_longest_level_set_and_smooth(X,y,level=m,navg=navg)
except AssertionError as e:#assert(num_contours>0)
contour_m_values=None
output_col='M'
M=float(wjr[model_name][output_col])
y=df.loc[query,output_col].values
try:
contour_M_values=comp_longest_level_set_and_smooth(X,y,level=M,navg=navg)
except AssertionError as e:#assert(num_contours>0)
contour_M_values=None
if (contour_m_values is not None) and (contour_M_values is not None):
contour_m_values_LR=contour_m_values.copy()
contour_M_values_LR=contour_M_values.copy()
try:
#compute the self-consistent intersection points
rstar,astar=compute_self_consistent_astar_rstar(contour_m_values,contour_M_values)
except AssertionError as e:#assert (x1star_values.shape[0]>0)
rstar=np.nan
astar=np.nan
else:
rstar=np.nan
astar=np.nan
#collect results into a dictionary
dict_out={
'model_name':model_name,
'rstar':rstar,
'astar':astar,
'kappa':kappa,
'D':D,
'm':m,
'M':M
}
# append that dict to list
dict_out_lst.append(dict_out)
#LR model
model_name='lr_pbc'
for D in D_lst:
for kappa in kappa_lst:
query = (df['D']==D)
query&= (df['kappa']==kappa)
query&= query_template
X=df.loc[query,[x1_col,x2_col]].values
if printing:
print(f"for the {model_name} model, when kappa={kappa:.0f} Hz and D={D:.2f} cm^2/s...")
output_col='m'
m=float(wjr[model_name][output_col])
y=df.loc[query,output_col].values
try:
contour_m_values=comp_longest_level_set_and_smooth(X,y,level=m,navg=navg)
except AssertionError as e:#assert(num_contours>0)
contour_m_values=None
output_col='M'
M=float(wjr[model_name][output_col])
y=df.loc[query,output_col].values
try:
contour_M_values=comp_longest_level_set_and_smooth(X,y,level=M,navg=navg)
except AssertionError as e:#assert(num_contours>0)
contour_M_values=None
if (contour_m_values is not None) and (contour_M_values is not None):
contour_m_values_LR=contour_m_values.copy()
contour_M_values_LR=contour_M_values.copy()
try:
#compute the self-consistent intersection points
rstar,astar=compute_self_consistent_astar_rstar(contour_m_values,contour_M_values)
except AssertionError as e:#assert (x1star_values.shape[0]>0)
rstar=np.nan
astar=np.nan
else:
rstar=np.nan
astar=np.nan
#collect results into a dictionary
dict_out={
'model_name':model_name,
'rstar':rstar,
'astar':astar,
'kappa':kappa,
'D':D,
'm':m,
'M':M
}
# append that dict to list
dict_out_lst.append(dict_out)
df_star=pd.DataFrame(dict_out_lst)
return df_star
| 35.992126
| 101
| 0.583898
| 631
| 4,571
| 3.980983
| 0.187005
| 0.076433
| 0.133758
| 0.054936
| 0.801354
| 0.789013
| 0.789013
| 0.789013
| 0.789013
| 0.789013
| 0
| 0.011458
| 0.312623
| 4,571
| 126
| 102
| 36.277778
| 0.788033
| 0.124699
| 0
| 0.807692
| 0
| 0.019231
| 0.08438
| 0.005274
| 0
| 0
| 0
| 0
| 0.057692
| 1
| 0.009615
| false
| 0
| 0.019231
| 0
| 0.038462
| 0.067308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4091dc19620db8bad04488e77b474e26125c59c2
| 16,298
|
py
|
Python
|
DETR/modules/ExplanationGenerator.py
|
Fostereee/Transformer-MM-Explainability
|
6dc4925b83a38e39069369da599b11d548128eb5
|
[
"MIT"
] | 322
|
2021-03-29T20:42:57.000Z
|
2022-03-28T12:26:47.000Z
|
DETR/modules/ExplanationGenerator.py
|
Fostereee/Transformer-MM-Explainability
|
6dc4925b83a38e39069369da599b11d548128eb5
|
[
"MIT"
] | 14
|
2021-04-23T23:45:58.000Z
|
2022-03-15T02:46:01.000Z
|
DETR/modules/ExplanationGenerator.py
|
Fostereee/Transformer-MM-Explainability
|
6dc4925b83a38e39069369da599b11d548128eb5
|
[
"MIT"
] | 51
|
2021-04-05T15:44:52.000Z
|
2022-03-25T02:28:49.000Z
|
import numpy as np
import torch
from torch.nn.functional import softmax
def compute_rollout_attention(all_layer_matrices, start_layer=0):
# adding residual consideration
num_tokens = all_layer_matrices[0].shape[1]
eye = torch.eye(num_tokens).to(all_layer_matrices[0].device)
all_layer_matrices = [all_layer_matrices[i] + eye for i in range(len(all_layer_matrices))]
all_layer_matrices = [all_layer_matrices[i] / all_layer_matrices[i].sum(dim=-1, keepdim=True)
for i in range(len(all_layer_matrices))]
matrices_aug = all_layer_matrices
joint_attention = matrices_aug[start_layer]
for i in range(start_layer+1, len(matrices_aug)):
joint_attention = matrices_aug[i].matmul(joint_attention)
return joint_attention
# rule 5 from paper
def avg_heads(cam, grad):
cam = cam.reshape(-1, cam.shape[-2], cam.shape[-1])
grad = grad.reshape(-1, grad.shape[-2], grad.shape[-1])
cam = grad * cam
cam = cam.clamp(min=0).mean(dim=0)
return cam
# rules 6 + 7 from paper
def apply_self_attention_rules(R_ss, R_sq, cam_ss):
R_sq_addition = torch.matmul(cam_ss, R_sq)
R_ss_addition = torch.matmul(cam_ss, R_ss)
return R_ss_addition, R_sq_addition
# rule 10 from paper
def apply_mm_attention_rules(R_ss, R_qq, cam_sq, apply_normalization=True, apply_self_in_rule_10=True):
R_ss_normalized = R_ss
R_qq_normalized = R_qq
if apply_normalization:
R_ss_normalized = handle_residual(R_ss)
R_qq_normalized = handle_residual(R_qq)
R_sq_addition = torch.matmul(R_ss_normalized.t(), torch.matmul(cam_sq, R_qq_normalized))
if not apply_self_in_rule_10:
R_sq_addition = cam_sq
R_sq_addition[torch.isnan(R_sq_addition)] = 0
return R_sq_addition
# normalization- eq. 8+9
def handle_residual(orig_self_attention):
self_attention = orig_self_attention.clone()
diag_idx = range(self_attention.shape[-1])
self_attention -= torch.eye(self_attention.shape[-1]).to(self_attention.device)
assert self_attention[diag_idx, diag_idx].min() >= 0
self_attention = self_attention / self_attention.sum(dim=-1, keepdim=True)
self_attention += torch.eye(self_attention.shape[-1]).to(self_attention.device)
return self_attention
class Generator:
def __init__(self, model):
self.model = model
self.model.eval()
def forward(self, input_ids, attention_mask):
return self.model(input_ids, attention_mask)
def generate_transformer_att(self, img, target_index, index=None):
outputs = self.model(img)
kwargs = {"alpha": 1,
"target_index": target_index}
if index == None:
index = outputs['pred_logits'][0, target_index, :-1].max(1)[1]
kwargs["target_class"] = index
one_hot = torch.zeros_like(outputs['pred_logits']).to(outputs['pred_logits'].device)
one_hot[0, target_index, index] = 1
one_hot_vector = one_hot.clone().detach()
one_hot.requires_grad_(True)
one_hot = torch.sum(one_hot.cuda() * outputs['pred_logits'])
self.model.zero_grad()
one_hot.backward(retain_graph=True)
self.model.relprop(one_hot_vector, **kwargs)
decoder_blocks = self.model.transformer.decoder.layers
encoder_blocks = self.model.transformer.encoder.layers
# initialize relevancy matrices
image_bboxes = encoder_blocks[0].self_attn.get_attn().shape[-1]
queries_num = decoder_blocks[0].self_attn.get_attn().shape[-1]
# image self attention matrix
self.R_i_i = torch.eye(image_bboxes, image_bboxes).to(encoder_blocks[0].self_attn.get_attn().device)
# queries self attention matrix
self.R_q_q = torch.eye(queries_num, queries_num).to(encoder_blocks[0].self_attn.get_attn().device)
# impact of image boxes on queries
self.R_q_i = torch.zeros(queries_num, image_bboxes).to(encoder_blocks[0].self_attn.get_attn().device)
# R_q_i generated from last layer
decoder_last = decoder_blocks[-1]
cam_q_i = decoder_last.multihead_attn.get_attn_cam().detach()
grad_q_i = decoder_last.multihead_attn.get_attn_gradients().detach()
cam_q_i = avg_heads(cam_q_i, grad_q_i)
self.R_q_i = cam_q_i
aggregated = self.R_q_i.unsqueeze_(0)
aggregated = aggregated[:, target_index, :].unsqueeze_(0)
return aggregated
def handle_self_attention_image(self, blocks):
for blk in blocks:
grad = blk.self_attn.get_attn_gradients().detach()
if self.use_lrp:
cam = blk.self_attn.get_attn_cam().detach()
else:
cam = blk.self_attn.get_attn().detach()
cam = avg_heads(cam, grad)
self.R_i_i += torch.matmul(cam, self.R_i_i)
def handle_co_attn_self_query(self, block):
grad = block.self_attn.get_attn_gradients().detach()
if self.use_lrp:
cam = block.self_attn.get_attn_cam().detach()
else:
cam = block.self_attn.get_attn().detach()
cam = avg_heads(cam, grad)
R_q_q_add, R_q_i_add = apply_self_attention_rules(self.R_q_q, self.R_q_i, cam)
self.R_q_q += R_q_q_add
self.R_q_i += R_q_i_add
def handle_co_attn_query(self, block):
if self.use_lrp:
cam_q_i = block.multihead_attn.get_attn_cam().detach()
else:
cam_q_i = block.multihead_attn.get_attn().detach()
grad_q_i = block.multihead_attn.get_attn_gradients().detach()
cam_q_i = avg_heads(cam_q_i, grad_q_i)
self.R_q_i += apply_mm_attention_rules(self.R_q_q, self.R_i_i, cam_q_i,
apply_normalization=self.normalize_self_attention,
apply_self_in_rule_10=self.apply_self_in_rule_10)
def generate_ours(self, img, target_index, index=None, use_lrp=True, normalize_self_attention=True, apply_self_in_rule_10=True):
self.use_lrp = use_lrp
self.normalize_self_attention = normalize_self_attention
self.apply_self_in_rule_10 = apply_self_in_rule_10
outputs = self.model(img)
outputs = outputs['pred_logits']
kwargs = {"alpha": 1,
"target_index": target_index}
if index == None:
index = outputs[0, target_index, :-1].max(1)[1]
kwargs["target_class"] = index
one_hot = torch.zeros_like(outputs).to(outputs.device)
one_hot[0, target_index, index] = 1
one_hot_vector = one_hot
one_hot.requires_grad_(True)
one_hot = torch.sum(one_hot.cuda() * outputs)
self.model.zero_grad()
one_hot.backward(retain_graph=True)
if use_lrp:
self.model.relprop(one_hot_vector, **kwargs)
decoder_blocks = self.model.transformer.decoder.layers
encoder_blocks = self.model.transformer.encoder.layers
# initialize relevancy matrices
image_bboxes = encoder_blocks[0].self_attn.get_attn().shape[-1]
queries_num = decoder_blocks[0].self_attn.get_attn().shape[-1]
# image self attention matrix
self.R_i_i = torch.eye(image_bboxes, image_bboxes).to(encoder_blocks[0].self_attn.get_attn().device)
# queries self attention matrix
self.R_q_q = torch.eye(queries_num, queries_num).to(encoder_blocks[0].self_attn.get_attn().device)
# impact of image boxes on queries
self.R_q_i = torch.zeros(queries_num, image_bboxes).to(encoder_blocks[0].self_attn.get_attn().device)
# image self attention in the encoder
self.handle_self_attention_image(encoder_blocks)
# decoder self attention of queries followd by multi-modal attention
for blk in decoder_blocks:
# decoder self attention
self.handle_co_attn_self_query(blk)
# encoder decoder attention
self.handle_co_attn_query(blk)
aggregated = self.R_q_i.unsqueeze_(0)
aggregated = aggregated[:,target_index, :].unsqueeze_(0).detach()
return aggregated
def generate_partial_lrp(self, img, target_index, index=None):
outputs = self.model(img)
kwargs = {"alpha": 1,
"target_index": target_index}
if index == None:
index = outputs['pred_logits'][0, target_index, :-1].max(1)[1]
kwargs["target_class"] = index
one_hot = torch.zeros_like(outputs['pred_logits']).to(outputs['pred_logits'].device)
one_hot[0, target_index, index] = 1
one_hot_vector = one_hot.clone().detach()
self.model.relprop(one_hot_vector, **kwargs)
# get cross attn cam from last decoder layer
cam_q_i = self.model.transformer.decoder.layers[-1].multihead_attn.get_attn_cam().detach()
cam_q_i = cam_q_i.reshape(-1, cam_q_i.shape[-2], cam_q_i.shape[-1])
cam_q_i = cam_q_i.mean(dim=0)
self.R_q_i = cam_q_i
# normalize to get non-negative cams
self.R_q_i = (self.R_q_i - self.R_q_i.min()) / (self.R_q_i.max() - self.R_q_i.min())
aggregated = self.R_q_i.unsqueeze_(0)
aggregated = aggregated[:, target_index, :].unsqueeze_(0)
return aggregated
def generate_raw_attn(self, img, target_index):
outputs = self.model(img)
# get cross attn cam from last decoder layer
cam_q_i = self.model.transformer.decoder.layers[-1].multihead_attn.get_attn().detach()
cam_q_i = cam_q_i.reshape(-1, cam_q_i.shape[-2], cam_q_i.shape[-1])
cam_q_i = cam_q_i.mean(dim=0)
self.R_q_i = cam_q_i
aggregated = self.R_q_i.unsqueeze_(0)
aggregated = aggregated[:, target_index, :].unsqueeze_(0)
return aggregated
def generate_rollout(self, img, target_index):
outputs = self.model(img)
decoder_blocks = self.model.transformer.decoder.layers
encoder_blocks = self.model.transformer.encoder.layers
cams_image = []
cams_queries = []
# image self attention in the encoder
for blk in encoder_blocks:
cam = blk.self_attn.get_attn().detach()
cam = cam.mean(dim=0)
cams_image.append(cam)
# decoder self attention of queries
for blk in decoder_blocks:
# decoder self attention
cam = blk.self_attn.get_attn().detach()
cam = cam.mean(dim=0)
cams_queries.append(cam)
# rollout for self-attention values
self.R_i_i = compute_rollout_attention(cams_image)
self.R_q_q = compute_rollout_attention(cams_queries)
decoder_last = decoder_blocks[-1]
cam_q_i = decoder_last.multihead_attn.get_attn().detach()
cam_q_i = cam_q_i.reshape(-1, cam_q_i.shape[-2], cam_q_i.shape[-1])
cam_q_i = cam_q_i.mean(dim=0)
self.R_q_i = torch.matmul(self.R_q_q.t(), torch.matmul(cam_q_i, self.R_i_i))
aggregated = self.R_q_i.unsqueeze_(0)
aggregated = aggregated[:, target_index, :].unsqueeze_(0)
return aggregated
def gradcam(self, cam, grad):
cam = cam.reshape(-1, cam.shape[-2], cam.shape[-1])
grad = grad.reshape(-1, grad.shape[-2], grad.shape[-1])
grad = grad.mean(dim=[1, 2], keepdim=True)
cam = (cam * grad).mean(0).clamp(min=0)
return cam
def generate_attn_gradcam(self, img, target_index, index=None):
outputs = self.model(img)
if index == None:
index = outputs['pred_logits'][0, target_index, :-1].max(1)[1]
one_hot = torch.zeros_like(outputs['pred_logits']).to(outputs['pred_logits'].device)
one_hot[0, target_index, index] = 1
one_hot.requires_grad_(True)
one_hot = torch.sum(one_hot.cuda() * outputs['pred_logits'])
self.model.zero_grad()
one_hot.backward(retain_graph=True)
# get cross attn cam from last decoder layer
cam_q_i = self.model.transformer.decoder.layers[-1].multihead_attn.get_attn().detach()
grad_q_i = self.model.transformer.decoder.layers[-1].multihead_attn.get_attn_gradients().detach()
cam_q_i = self.gradcam(cam_q_i, grad_q_i)
self.R_q_i = cam_q_i
aggregated = self.R_q_i.unsqueeze_(0)
aggregated = aggregated[:, target_index, :].unsqueeze_(0)
return aggregated
class GeneratorAlbationNoAgg:
def __init__(self, model):
self.model = model
self.model.eval()
def forward(self, input_ids, attention_mask):
return self.model(input_ids, attention_mask)
def handle_self_attention_image(self, blocks):
for blk in blocks:
grad = blk.self_attn.get_attn_gradients().detach()
if self.use_lrp:
cam = blk.self_attn.get_attn_cam().detach()
else:
cam = blk.self_attn.get_attn().detach()
cam = avg_heads(cam, grad)
self.R_i_i = torch.matmul(cam, self.R_i_i)
def handle_co_attn_self_query(self, block):
grad = block.self_attn.get_attn_gradients().detach()
if self.use_lrp:
cam = block.self_attn.get_attn_cam().detach()
else:
cam = block.self_attn.get_attn().detach()
cam = avg_heads(cam, grad)
R_q_q_add, R_q_i_add = apply_self_attention_rules(self.R_q_q, self.R_q_i, cam)
self.R_q_q = R_q_q_add
self.R_q_i = R_q_i_add
def handle_co_attn_query(self, block):
if self.use_lrp:
cam_q_i = block.multihead_attn.get_attn_cam().detach()
else:
cam_q_i = block.multihead_attn.get_attn().detach()
grad_q_i = block.multihead_attn.get_attn_gradients().detach()
cam_q_i = avg_heads(cam_q_i, grad_q_i)
self.R_q_i = apply_mm_attention_rules(self.R_q_q, self.R_i_i, cam_q_i,
apply_normalization=self.normalize_self_attention,
apply_self_in_rule_10=self.apply_self_in_rule_10)
def generate_ours_abl(self, img, target_index, index=None, use_lrp=False, normalize_self_attention=False, apply_self_in_rule_10=True):
self.use_lrp = use_lrp
self.normalize_self_attention = normalize_self_attention
self.apply_self_in_rule_10 = apply_self_in_rule_10
outputs = self.model(img)
outputs = outputs['pred_logits']
kwargs = {"alpha": 1,
"target_index": target_index}
if index == None:
index = outputs[0, target_index, :-1].max(1)[1]
kwargs["target_class"] = index
one_hot = torch.zeros_like(outputs).to(outputs.device)
one_hot[0, target_index, index] = 1
one_hot_vector = one_hot
one_hot.requires_grad_(True)
one_hot = torch.sum(one_hot.cuda() * outputs)
self.model.zero_grad()
one_hot.backward(retain_graph=True)
if use_lrp:
self.model.relprop(one_hot_vector, **kwargs)
decoder_blocks = self.model.transformer.decoder.layers
encoder_blocks = self.model.transformer.encoder.layers
# initialize relevancy matrices
image_bboxes = encoder_blocks[0].self_attn.get_attn().shape[-1]
queries_num = decoder_blocks[0].self_attn.get_attn().shape[-1]
# image self attention matrix
self.R_i_i = torch.eye(image_bboxes, image_bboxes).to(encoder_blocks[0].self_attn.get_attn().device)
# queries self attention matrix
self.R_q_q = torch.eye(queries_num, queries_num).to(encoder_blocks[0].self_attn.get_attn().device)
# impact of image boxes on queries
self.R_q_i = torch.zeros(queries_num, image_bboxes).to(encoder_blocks[0].self_attn.get_attn().device)
# image self attention in the encoder
self.handle_self_attention_image(encoder_blocks)
# decoder self attention of queries followd by multi-modal attention
for blk in decoder_blocks:
# decoder self attention
self.handle_co_attn_self_query(blk)
# encoder decoder attention
self.handle_co_attn_query(blk)
aggregated = self.R_q_i.unsqueeze_(0)
aggregated = aggregated[:,target_index, :].unsqueeze_(0).detach()
return aggregated
| 40.341584
| 138
| 0.655541
| 2,365
| 16,298
| 4.17759
| 0.069767
| 0.016397
| 0.046761
| 0.044028
| 0.843927
| 0.824393
| 0.815081
| 0.802024
| 0.773785
| 0.771761
| 0
| 0.012061
| 0.2369
| 16,298
| 403
| 139
| 40.441687
| 0.782343
| 0.067616
| 0
| 0.737762
| 0
| 0
| 0.017083
| 0
| 0
| 0
| 0
| 0
| 0.003497
| 1
| 0.08042
| false
| 0
| 0.01049
| 0.006993
| 0.15035
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
409eedb9e947cab673b5a52a6de48288fafe0a7d
| 29,045
|
py
|
Python
|
zeping_point_jobs.py
|
cinjon/ml-capsules-inverted-attention-routing
|
978b0f58eba1007bcef0b6cb045f3d2040f76a31
|
[
"AML"
] | null | null | null |
zeping_point_jobs.py
|
cinjon/ml-capsules-inverted-attention-routing
|
978b0f58eba1007bcef0b6cb045f3d2040f76a31
|
[
"AML"
] | null | null | null |
zeping_point_jobs.py
|
cinjon/ml-capsules-inverted-attention-routing
|
978b0f58eba1007bcef0b6cb045f3d2040f76a31
|
[
"AML"
] | null | null | null |
"""Run the jobs in this file.
Example running jobs:
python zeping_jobs.py
When you want to add more jobs just put them below and MAKE SURE that all of the
do_jobs for the ones above are False.
"""
from zeping_run_on_cluster import do_jobarray
email = 'zz2332@nyu.edu'
code_directory = '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/ml-capsules-inverted-attention-routing'
def run(find_counter=None):
job = {
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/data/MNIST',
'affnist_data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/data/affNIST'
}
# 0: resnet backbone xent
if find_counter == 0:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.08',
'counter': find_counter,
'config': 'resnet_backbone_points5',
'criterion': 'backbone_xent', # 'nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet5',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': 500, # NOTE: so we aren't doing it rn.
'lr': 3e-4,
# 'weight_decay': 5e-4,
'optimizer': 'adam',
'epochs': 200
})
return find_counter, job
# 1-2: pointcapsnet 16 backbone xent
# vars: weight_decay
if find_counter in [1, 2]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.08',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'backbone_xent', # 'nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet5',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': 500, # NOTE: so we aren't doing it rn.
'lr': 3e-4,
'optimizer': 'adam',
'epoch': 200
})
if find_counter == 1:
job.update({'weight_decay': 0})
if find_counter == 2:
job.update({'weight_decay': 5e-4})
return find_counter, job
# 3-4: pointcapsnet 8 backbone xent
# vars: weight_decay
if find_counter in [3, 4]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.08',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap8',
'criterion': 'backbone_xent', # 'nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet5',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': 500, # NOTE: so we aren't doing it rn.
'lr': 3e-4,
'optimizer': 'adam',
'epoch': 200
})
if find_counter == 3:
job.update({
'weight_decay': 0
})
if find_counter == 4:
job.update({
'weight_decay': 5e-4
})
return find_counter, job
# 5-8: pointcapsnet 8 backbone nce
# vars: weight_decay, nce_presence_temperature
if find_counter in [5, 6, 7, 8]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.08',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap8',
'criterion': 'backbone_nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet5',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1, # 500,
'lr': 3e-4,
'optimizer': 'adam',
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'nce_presence_lambda': 1.0,
'epoch': 200
})
if find_counter == 5:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.1
})
if find_counter == 6:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.01
})
if find_counter == 7:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.1
})
if find_counter == 8:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.01
})
return find_counter, job
# 9-12: pointcapsnet 16 backbone nce
# vars: weight_decay, nce_presence_temperature
if find_counter in [9, 10, 11, 12]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.08',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'backbone_nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet5',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1, # 500,
'lr': 3e-4,
'optimizer': 'adam',
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'nce_presence_lambda': 1.0,
'epoch': 200
})
if find_counter == 9:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.1
})
if find_counter == 10:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.01
})
if find_counter == 11:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.1
})
if find_counter == 12:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.01
})
return find_counter, job
# 13-16: pointcapsnet cap8 backbone nce with different object
# vars: weight_decay, nce_presence_temperature
if find_counter in [13, 14, 15, 16]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.08',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap8',
'criterion': 'backbone_nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet5',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1, # 500,
'lr': 3e-4,
'optimizer': 'adam',
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'nce_presence_lambda': 1.0,
'use_diff_object': True,
'epoch': 200
})
if find_counter == 13:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.1
})
if find_counter == 14:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.01
})
if find_counter == 15:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.1
})
if find_counter == 16:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.01
})
return find_counter, job
# 17-20: pointcapsnet cap16 backbone nce with different object
# vars: weight_decay, nce_presence_temperature
if find_counter in [17, 18, 19, 20]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.08',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'backbone_nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet5',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1, # 500,
'lr': 3e-4,
'optimizer': 'adam',
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'nce_presence_lambda': 1.0,
'use_diff_object': True,
'epoch': 200
})
if find_counter == 17:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.1
})
if find_counter == 18:
job.update({
'weight_decay': 0,
'nce_presence_temperature': 0.01
})
if find_counter == 19:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.1
})
if find_counter == 20:
job.update({
'weight_decay': 5e-4,
'nce_presence_temperature': 0.01
})
return find_counter, job
# 21-22: pointcapsnet backbone nce on dataset16
# vars: nce_presence_temperature
if find_counter in [21, 22]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.10',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'backbone_nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet16',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
# 'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1,
'lr': 3e-4,
'optimizer': 'adam',
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'nce_presence_lambda': 1.0,
'epoch': 350,
'weight_decay': 0,
'num_output_classes': 16
})
if find_counter == 21:
job.update({
'nce_presence_temperature': 0.1
})
if find_counter == 22:
job.update({
'nce_presence_temperature': 0.01
})
return find_counter, job
# 23-24: pointcapsnet backbone nce on dataset16 with different object
# vars: nce_presence_temperature
if find_counter in [23, 24]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.10',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'backbone_nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet16',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
# 'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1, # 500,
'lr': 3e-4,
'optimizer': 'adam',
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'nce_presence_lambda': 1.0,
'--shapenet_stepsize_range': '0,0',
'--shapenet_rotation_train': '',
'--shapenet_rotation_test': '',
'use_diff_object': True,
'epoch': 350,
'weight_decay': 0,
'num_output_classes': 16
})
if find_counter == 23:
job.update({
'nce_presence_temperature': 0.1
})
if find_counter == 24:
job.update({
'nce_presence_temperature': 0.01
})
return find_counter, job
# 25-26: pointcapsnet backbone nce on dataset16 with different object,
# same origin and no rotation
# vars: nce_presence_temperature
if find_counter in [25, 26]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.10',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'backbone_nceprobs_selective',
'num_routing': 1,
'dataset': 'shapenet16',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
# 'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1, # 500,
'lr': 3e-4,
'optimizer': 'adam',
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'nce_presence_lambda': 1.0,
'shapenet_stepsize_range': '0,0',
'shapenet_rotation_train': '',
'shapenet_rotation_test': '',
'use_diff_object': True,
'epoch': 350,
'weight_decay': 0,
'num_output_classes': 16
})
if find_counter == 25:
job.update({
'nce_presence_temperature': 0.1
})
if find_counter == 26:
job.update({
'nce_presence_temperature': 0.01
})
return find_counter, job
# 27-28: pointcapsnet backbone xent on dataset16
# vars: weight_decay
if find_counter in [27, 28]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.15',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'backbone_xent',
'num_routing': 1,
'dataset': 'shapenet16',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1,
'lr': 3e-4,
'optimizer': 'adam',
'epoch': 350,
'num_output_classes': 16,
'do_modelnet_test_after': 30,
'do_modelnet_test_every': 10,
'modelnet_test_epoch': 30,
'num_workers': 0,
})
if find_counter == 27:
job.update({
'weight_decay': 0,
})
if find_counter == 28:
job.update({
'weight_decay': 5e-4,
})
return find_counter, job
# 29-30: resnet backbone xent on dataset16
# vars: weight_decay
if find_counter in [29, 30]:
num_gpus = 1
time = 8
job.update({
'name': '2020.06.15',
'counter': find_counter,
'config': 'resnet_backbone_points16',
'criterion': 'backbone_xent',
'num_routing': 1,
'dataset': 'shapenet16',
'batch_size': 16,
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 2,
'do_tsne_test_after': -1,
'lr': 3e-4,
'optimizer': 'adam',
'epoch': 350,
'num_output_classes': 16,
'do_modelnet_test_after': 30,
'do_modelnet_test_every': 10,
'modelnet_test_epoch': 30,
'num_workers': 0,
})
if find_counter == 29:
job.update({
'weight_decay': 0,
})
if find_counter == 30:
job.update({
'weight_decay': 5e-4,
})
return find_counter, job
# 31-32: different object with rotation
if find_counter in [31, 32]:
num_gpus = 2
time = 24
job = {
'name': '2020.06.19',
'counter': find_counter,
'config': 'resnet_backbone_points16_smbone3_gap',
'criterion': 'nceprobs_selective',
'num_output_classes': 55,
'num_routing': 1,
'dataset': 'shapenetFull',
'batch_size': 18,
'optimizer': 'adam',
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 5,
'do_tsne_test_after': -1,
'weight_decay': 0,
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'epoch': 350,
'use_diff_object': True,
'shapenet_stepsize_range': '0,0',
'shapenet_rotation_train': '-90,90',
'shapenet_rotation_test': '',
'use_scheduler': True,
'schedule_milestones': '10,30',
'lr': 3e-4,
'num_gpus': num_gpus
}
if find_counter == 31:
job.update({
'nce_presence_temperature': 0.1,
})
if find_counter == 32:
job.update({
'nce_presence_temperature': 0.03,
})
return find_counter, job
# 33-34: different object with rotation
if find_counter in [33, 34]:
num_gpus = 2
time = 24
job = {
'name': '2020.06.19',
'counter': find_counter,
'config': 'resnet_backbone_points16_smbone3_gap',
'criterion': 'nceprobs_selective',
'num_output_classes': 55,
'num_routing': 2,
'dataset': 'shapenetFull',
'batch_size': 8,
'optimizer': 'adam',
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 5,
'do_tsne_test_after': -1,
'weight_decay': 0,
'presence_type': 'l2norm',
'simclr_selection_strategy': 'anchor0_other12',
'epoch': 350,
'use_diff_object': True,
'shapenet_stepsize_range': '0,0',
'shapenet_rotation_train': '-90,90',
'shapenet_rotation_test': '',
'use_scheduler': True,
'schedule_milestones': '10,30',
'lr': 3e-4,
'num_gpus': num_gpus
}
if find_counter == 33:
job.update({
'nce_presence_temperature': 0.1,
})
if find_counter == 34:
job.update({
'nce_presence_temperature': 0.03,
})
return find_counter, job
# 35-38: NewBackboneModel xent
if find_counter in [35, 36, 37, 38]:
num_gpus = 1
time = 24
job = {
'name': '2020.06.24',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'xent',
'num_output_classes': 55,
'num_routing': 1,
'dataset': 'shapenetFull',
'num_frames': 1,
'batch_size': 32,
'optimizer': 'adam',
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 5,
'do_tsne_test_after': -1,
'weight_decay': 0,
'presence_type': 'l2norm',
'epoch': 350,
'use_diff_object': True,
'shapenet_stepsize_range': '0,0',
'shapenet_rotation_test': '',
'use_scheduler': True,
'schedule_milestones': '10,30',
'lr': 3e-4,
'num_gpus': num_gpus
}
if find_counter == 35:
job.update({
'shapenet_rotation_train': '-90,90',
'weight_decay': 0
})
if find_counter == 36:
job.update({
'shapenet_rotation_train': '',
'weight_decay': 0
})
if find_counter == 37:
job.update({
'shapenet_rotation_train': '-90,90',
'weight_decay': 5e-4
})
if find_counter == 38:
job.update({
'shapenet_rotation_train': '',
'weight_decay': 5e-4
})
return find_counter, job
# 39-42: NewBackboneModel xent with dynamic routing
if find_counter in [39, 40, 41, 42]:
num_gpus = 1
time = 24
job = {
'name': '2020.06.24',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'xent',
'num_output_classes': 55,
'num_routing': 1,
'dataset': 'shapenetFull',
'num_frames': 1,
'batch_size': 8,
'optimizer': 'adam',
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 5,
'do_tsne_test_after': -1,
'presence_type': 'l2norm',
'epoch': 350,
'use_diff_object': True,
'shapenet_stepsize_range': '0,0',
'shapenet_rotation_test': '',
'use_scheduler': True,
'schedule_milestones': '10,30',
'lr': 3e-4,
'num_gpus': num_gpus,
'dynamic_routing': True,
}
if find_counter == 39:
job.update({
'shapenet_rotation_train': '-90,90',
'weight_decay': 0
})
if find_counter == 40:
job.update({
'shapenet_rotation_train': '',
'weight_decay': 0
})
if find_counter == 41:
job.update({
'shapenet_rotation_train': '-90,90',
'weight_decay': 5e-4
})
if find_counter == 42:
job.update({
'shapenet_rotation_train': '',
'weight_decay': 5e-4
})
return find_counter, job
# 43-48: 3D point capsules network
if find_counter in [43, 44, 45, 46, 47, 48]:
num_gpus = 1
time = 24
job = {
'name': '2020.06.28',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'autoencoder',
'num_output_classes': 55,
# 'num_routing': 1,
'dataset': 'shapenetFull',
'num_frames': 1,
'batch_size': 8,
'optimizer': 'adam',
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/shapenet',
'do_tsne_test_every': 5,
'do_tsne_test_after': -1,
'presence_type': 'l2norm',
'epoch': 350,
# 'use_diff_object': True,
'shapenet_stepsize_range': '0,0',
# 'shapenet_rotation_train': '',
'shapenet_rotation_test': '',
'use_scheduler': True,
'schedule_milestones': '10,30',
'lr': 3e-4,
'num_gpus': num_gpus,
# 'dynamic_routing': True,
'do_svm_shapenet_every': 1,
'do_svm_shapenet_after': 5,
'linear_batch_size': 16,
}
if find_counter == 43:
job.update({
'shapenet_rotation_train': '',
'weight_decay': 0
})
if find_counter == 44:
job.update({
'shapenet_rotation_train': '',
'weight_decay': 5e-4
})
if find_counter == 45:
job.update({
'shapenet_rotation_train': '-90,90',
'weight_decay': 0
})
if find_counter == 46:
job.update({
'shapenet_rotation_train': '-90,90',
'weight_decay': 5e-4
})
if find_counter == 47:
job.update({
'shapenet_rotation_train': '',
'lr': 1e-4
})
if find_counter == 48:
job.update({
'shapenet_rotation_train': '-90,90',
'lr': 1e-4
})
return find_counter, job
# 49-54: 3D point capsules network with datasetFullMix, datasetFullOriginal and datasetFullComplete
if find_counter in [49, 50, 51, 52, 53, 54]:
num_gpus = 1
time = 24
job = {
'name': '2020.06.30',
'counter': find_counter,
'config': 'pointcapsnet_backbone_points5_cap16',
'criterion': 'autoencoder',
'num_output_classes': 55,
# 'dataset': 'shapenetFull',
'num_frames': 1,
'batch_size': 8,
'optimizer': 'adam',
'results_dir': '/misc/kcgscratch1/ChoGroup/resnick/spaceofmotion/zeping/capsules/results/shapenet',
'data_root': '/misc/kcgscratch1/ChoGroup/resnick/vidcaps/shapenet',
'do_tsne_test_every': 5,
'do_tsne_test_after': -1,
'presence_type': 'l2norm',
'epoch': 350,
'shapenet_stepsize_range': '0,0',
'shapenet_rotation_train': '',
'shapenet_rotation_test': '',
'use_scheduler': True,
'schedule_milestones': '10,30',
'lr': 3e-4,
'weight_decay': 0,
'num_gpus': num_gpus,
'do_svm_shapenet_every': 1,
'do_svm_shapenet_after': 5,
'linear_batch_size': 16,
}
if find_counter == 49:
job.update({
'lr': 1e-4,
'dataset': 'shapenetFullMix'
})
if find_counter == 50:
job.update({
'lr': 3e-4,
'dataset': 'shapenetFullMix'
})
if find_counter == 51:
job.update({
'lr': 1e-4,
'dataset': 'shapenetOriginal',
'do_svm_shapenet_after': 300
})
if find_counter == 52:
job.update({
'lr': 3e-4,
'dataset': 'shapenetOriginal',
'do_svm_shapenet_after': 300
})
if find_counter == 53:
job.update({
'lr': 1e-4,
'dataset': 'shapenetFullComplete',
'do_svm_shapenet_after': 300
})
if find_counter == 54:
job.update({
'lr': 3e-4,
'dataset': 'shapenetFullComplete',
'do_svm_shapenet_after': 300
})
return find_counter, job
else:
print('Counter not found')
return None, None
if __name__ == '__main__':
run()
| 33.891482
| 122
| 0.51551
| 2,894
| 29,045
| 4.909122
| 0.081202
| 0.084395
| 0.065883
| 0.0908
| 0.919476
| 0.911945
| 0.900401
| 0.891532
| 0.852115
| 0.837897
| 0
| 0.060123
| 0.362644
| 29,045
| 856
| 123
| 33.931075
| 0.707325
| 0.06724
| 0
| 0.857337
| 0
| 0.001359
| 0.385204
| 0.204772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001359
| false
| 0
| 0.001359
| 0
| 0.028533
| 0.001359
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
40a707f8c7f03c0b68a2317f03460f6363ebe070
| 26,122
|
py
|
Python
|
code/predicting_model/nfp/preprocessing/test.py
|
shreesowndarya/CASCADE
|
dcd00c18f73f43ef2ed5bf910898bc91112686b2
|
[
"MIT"
] | 18
|
2019-07-19T16:48:38.000Z
|
2021-08-05T11:45:06.000Z
|
code/predicting_model/nfp/preprocessing/test.py
|
danny305/CASCADE
|
e989512f34b13491a6fffc4cf3b84a609cea3fb4
|
[
"MIT"
] | 3
|
2021-09-03T22:47:55.000Z
|
2022-02-16T07:54:19.000Z
|
code/predicting_model/nfp/preprocessing/test.py
|
danny305/CASCADE
|
e989512f34b13491a6fffc4cf3b84a609cea3fb4
|
[
"MIT"
] | 3
|
2021-10-15T02:00:30.000Z
|
2022-01-19T06:29:05.000Z
|
import logging, sys
import numpy as np
from tqdm import tqdm
from scipy.linalg import eigh
from rdkit import Chem
from rdkit.Chem import MolFromSmiles, MolToSmiles, AddHs
from nfp.preprocessing import features
from nfp.preprocessing.features import Tokenizer
import time
class SmilesPreprocessor(object):
""" Given a list of SMILES strings, encode these molecules as atom and
connectivity feature matricies.
Example:
>>> preprocessor = SmilesPreprocessor(explicit_hs=False)
>>> inputs = preprocessor.fit(data.smiles)
"""
def __init__(self, explicit_hs=True, atom_features=None, bond_features=None):
"""
explicit_hs : bool
whether to tell RDkit to add H's to a molecule.
atom_features : function
A function applied to an rdkit.Atom that returns some
representation (i.e., string, integer) for the Tokenizer class.
bond_features : function
A function applied to an rdkit Bond to return some description.
"""
self.atom_tokenizer = Tokenizer()
self.bond_tokenizer = Tokenizer()
self.explicit_hs = explicit_hs
if atom_features is None:
atom_features = features.atom_features_v1
if bond_features is None:
bond_features = features.bond_features_v1
self.atom_features = atom_features
self.bond_features = bond_features
def fit(self, smiles_iterator):
""" Fit an iterator of SMILES strings, creating new atom and bond
tokens for unseen molecules. Returns a dictionary with 'atom' and
'connectivity' entries """
return list(self.preprocess(smiles_iterator, train=True))
def predict(self, smiles_iterator):
""" Uses previously determined atom and bond tokens to convert a SMILES
iterator into 'atom' and 'connectivity' matrices. Ensures that atom and
bond classes commute with previously determined results. """
return list(self.preprocess(smiles_iterator, train=False))
def preprocess(self, smiles_iterator, train=True):
self.atom_tokenizer.train = train
self.bond_tokenizer.train = train
for smiles in tqdm(smiles_iterator):
yield self.construct_feature_matrices(smiles)
@property
def atom_classes(self):
""" The number of atom types found (includes the 0 null-atom type) """
return self.atom_tokenizer.num_classes + 1
@property
def bond_classes(self):
""" The number of bond types found (includes the 0 null-bond type) """
return self.bond_tokenizer.num_classes + 1
def construct_feature_matrices(self, smiles):
""" construct a molecule from the given smiles string and return atom
and bond classes.
Returns
dict with entries
'n_atom' : number of atoms in the molecule
'n_bond' : number of bonds in the molecule
'atom' : (n_atom,) length list of atom classes
'bond' : (n_bond,) list of bond classes
'connectivity' : (n_bond, 2) array of source atom, target atom pairs.
"""
mol = MolFromSmiles(smiles)
if self.explicit_hs:
mol = AddHs(mol)
n_atom = len(mol.GetAtoms())
n_bond = 2 * len(mol.GetBonds())
# If its an isolated atom, add a self-link
if n_bond == 0:
n_bond = 1
atom_feature_matrix = np.zeros(n_atom, dtype='int')
bond_feature_matrix = np.zeros(n_bond, dtype='int')
connectivity = np.zeros((n_bond, 2), dtype='int')
bond_index = 0
atom_seq = mol.GetAtoms()
atoms = [atom_seq[i] for i in range(n_atom)]
for n, atom in enumerate(atoms):
# Atom Classes
atom_feature_matrix[n] = self.atom_tokenizer(
self.atom_features(atom))
start_index = atom.GetIdx()
for bond in atom.GetBonds():
# Is the bond pointing at the target atom
rev = bond.GetBeginAtomIdx() != start_index
# Bond Classes
bond_feature_matrix[n] = self.bond_tokenizer(
self.bond_features(bond, flipped=rev))
# Connectivity
if not rev: # Original direction
connectivity[bond_index, 0] = bond.GetBeginAtomIdx()
connectivity[bond_index, 1] = bond.GetEndAtomIdx()
else: # Reversed
connectivity[bond_index, 0] = bond.GetEndAtomIdx()
connectivity[bond_index, 1] = bond.GetBeginAtomIdx()
bond_index += 1
return {
'n_atom': n_atom,
'n_bond': n_bond,
'atom': atom_feature_matrix,
'bond': bond_feature_matrix,
'connectivity': connectivity,
}
class ConnectivityAPreprocessor(object):
""" Given a list of SMILES strings, encode these molecules as atom and
connectivity feature matricies.
Example:
>>> preprocessor = SmilesPreprocessor(explicit_hs=False)
>>> inputs = preprocessor.fit(data.smiles)
"""
def __init__(self, explicit_hs=True, atom_features=None, bond_features=None):
"""
explicit_hs : bool
whether to tell RDkit to add H's to a molecule.
atom_features : function
A function applied to an rdkit.Atom that returns some
representation (i.e., string, integer) for the Tokenizer class.
bond_features : function
A function applied to an rdkit Bond to return some description.
"""
self.atom_tokenizer = Tokenizer()
self.bond_tokenizer = Tokenizer()
self.explicit_hs = explicit_hs
if atom_features is None:
atom_features = features.atom_features_v1
if bond_features is None:
bond_features = features.bond_features_v1
self.atom_features = atom_features
self.bond_features = bond_features
def fit(self, smiles_iterator):
""" Fit an iterator of SMILES strings, creating new atom and bond
tokens for unseen molecules. Returns a dictionary with 'atom' and
'connectivity' entries """
return list(self.preprocess(smiles_iterator, train=True))
def predict(self, smiles_iterator):
""" Uses previously determined atom and bond tokens to convert a SMILES
iterator into 'atom' and 'connectivity' matrices. Ensures that atom and
bond classes commute with previously determined results. """
return list(self.preprocess(smiles_iterator, train=False))
def preprocess(self, smiles_iterator, train=True):
self.atom_tokenizer.train = train
self.bond_tokenizer.train = train
for smiles in tqdm(smiles_iterator):
yield self.construct_feature_matrices(smiles)
@property
def atom_classes(self):
""" The number of atom types found (includes the 0 null-atom type) """
return self.atom_tokenizer.num_classes + 1
@property
def bond_classes(self):
""" The number of bond types found (includes the 0 null-bond type) """
return self.bond_tokenizer.num_classes + 1
def construct_feature_matrices(self, smiles):
""" construct a molecule from the given smiles string and return atom
and bond classes.
Returns
dict with entries
'n_atom' : number of atoms in the molecule
'n_bond' : number of bonds in the molecule
'atom' : (n_atom,) length list of atom classes
'bond' : (n_bond,) list of bond classes
'connectivity' : (n_bond, 2) array of source atom, target atom pairs.
"""
mol = MolFromSmiles(smiles)
if self.explicit_hs:
mol = AddHs(mol)
n_atom = len(mol.GetAtoms())
n_bond = 2 * len(mol.GetBonds())
# If its an isolated atom, add a self-link
if n_bond == 0:
n_bond = 1
atom_feature_matrix = np.zeros(n_atom, dtype='int')
bond_feature_matrix = np.zeros(n_bond, dtype='int')
connectivity = np.zeros((n_bond, 2), dtype='int')
bond_index = 0
atom_seq = mol.GetAtoms()
atoms = [atom_seq[i] for i in range(n_atom)]
for n, atom in enumerate(atoms):
# Atom Classes
atom_feature_matrix[n] = self.atom_tokenizer(
self.atom_features(atom))
start_index = atom.GetIdx()
for bond in atom.GetBonds():
# Is the bond pointing at the target atom
rev = bond.GetBeginAtomIdx() != start_index
# Bond Classes
bond_feature_matrix[n] = self.bond_tokenizer(
self.bond_features(bond, flipped=rev))
# Connectivity
if not rev: # Original direction
connectivity[bond_index, 0] = bond.GetBeginAtomIdx()
connectivity[bond_index, 1] = bond.GetEndAtomIdx()
else: # Reversed
connectivity[bond_index, 0] = bond.GetEndAtomIdx()
connectivity[bond_index, 1] = bond.GetBeginAtomIdx()
bond_index += 1
return {
'n_atom': n_atom,
'n_bond': n_bond,
'atom': atom_feature_matrix,
'bond': bond_feature_matrix,
'connectivity': connectivity,
}
class MolPreprocessor(SmilesPreprocessor):
""" I should refactor this into a base class and separate
SmilesPreprocessor classes. But the idea is that we only need to redefine
the `construct_feature_matrices` method to have a working preprocessor that
handles 3D structures.
We'll pass an iterator of mol objects instead of SMILES strings this time,
though.
"""
def __init__(self, n_neighbors, cutoff, **kwargs):
""" A preprocessor class that also returns distances between
neighboring atoms. Adds edges for non-bonded atoms to include a maximum
of n_neighbors around each atom """
self.n_neighbors = n_neighbors
self.cutoff = cutoff
super(MolPreprocessor, self).__init__(**kwargs)
def construct_feature_matrices(self, mol):
""" Given an rdkit mol, return atom feature matrices, bond feature
matrices, and connectivity matrices.
Returns
dict with entries
'n_atom' : number of atoms in the molecule
'n_bond' : number of edges (likely n_atom * n_neighbors)
'atom' : (n_atom,) length list of atom classes
'bond' : (n_bond,) list of bond classes. 0 for no bond
'distance' : (n_bond,) list of bond distances
'connectivity' : (n_bond, 2) array of source atom, target atom pairs.
"""
n_atom = len(mol.GetAtoms())
# n_bond is actually the number of atom-atom pairs, so this is defined
# by the number of neighbors for each atom.
#if there is cutoff,
distance_matrix = Chem.Get3DDistanceMatrix(mol)
if self.n_neighbors <= (n_atom - 1):
n_bond = self.n_neighbors * n_atom
else:
# If there are fewer atoms than n_neighbors, all atoms will be
# connected
n_bond = distance_matrix[(distance_matrix < self.cutoff) & (distance_matrix != 0)].size
if n_bond == 0: n_bond = 1
# Initialize the matrices to be filled in during the following loop.
atom_feature_matrix = np.zeros(n_atom, dtype='int')
bond_feature_matrix = np.zeros(n_bond, dtype='int')
bond_distance_matrix = np.zeros(n_bond, dtype=np.float32)
connectivity = np.zeros((n_bond, 2), dtype='int')
# Hopefully we've filtered out all problem mols by now.
if mol is None:
raise RuntimeError("Issue in loading mol")
# Get a list of the atoms in the molecule.
atom_seq = mol.GetAtoms()
atoms = [atom_seq[i] for i in range(n_atom)]
# Here we loop over each atom, and the inner loop iterates over each
# neighbor of the current atom.
bond_index = 0 # keep track of our current bond.
for n, atom in enumerate(atoms):
# update atom feature matrix
atom_feature_matrix[n] = self.atom_tokenizer(
self.atom_features(atom))
# if n_neighbors is greater than total atoms, then each atom is a
# neighbor.
if (self.n_neighbors + 1) > len(mol.GetAtoms()):
neighbor_end_index = len(mol.GetAtoms())
else:
neighbor_end_index = (self.n_neighbors + 1)
distance_atom = distance_matrix[n, :]
cutoff_end_index = distance_atom[distance_atom < self.cutoff].size
end_index = min(neighbor_end_index, cutoff_end_index)
# Loop over each of the nearest neighbors
neighbor_inds = distance_matrix[n, :].argsort()[1:end_index]
if len(neighbor_inds)==0: neighbor_inds = [n]
for neighbor in neighbor_inds:
# update bond feature matrix
bond = mol.GetBondBetweenAtoms(n, int(neighbor))
if bond is None:
bond_feature_matrix[bond_index] = 0
else:
rev = False if bond.GetBeginAtomIdx() == n else True
bond_feature_matrix[bond_index] = self.bond_tokenizer(
self.bond_features(bond, flipped=rev))
distance = distance_matrix[n, neighbor]
bond_distance_matrix[bond_index] = distance
# update connectivity matrix
connectivity[bond_index, 0] = n
connectivity[bond_index, 1] = neighbor
bond_index += 1
return {
'n_atom': n_atom,
'n_bond': n_bond,
'atom': atom_feature_matrix,
'bond': bond_feature_matrix,
'distance': bond_distance_matrix,
'connectivity': connectivity,
}
class MolBPreprocessor(MolPreprocessor):
"""
This is a subclass of Molpreprocessor that preprocessor molecule with
bond property target
"""
def __init__(self, **kwargs):
"""
A preprocessor class that also returns bond_target_matrix, besides the bond matrix
returned by MolPreprocessor. The bond_target_matrix is then used as ref to reduce molecule
to bond property
"""
super(MolBPreprocessor, self).__init__(**kwargs)
def construct_feature_matrices(self, entry):
"""
Given an entry contining rdkit molecule, bond_index and for the target property,
return atom
feature matrices, bond feature matrices, distance matrices, connectivity matrices and bond
ref matrices.
returns
dict with entries
see MolPreproccessor
'bond_index' : ref array to the bond index
"""
mol, bond_index_array = entry
n_atom = len(mol.GetAtoms())
n_pro = len(bond_index_array)
# n_bond is actually the number of atom-atom pairs, so this is defined
# by the number of neighbors for each atom.
#if there is cutoff,
distance_matrix = Chem.Get3DDistanceMatrix(mol)
if self.n_neighbors <= (n_atom - 1):
n_bond = self.n_neighbors * n_atom
else:
# If there are fewer atoms than n_neighbors, all atoms will be
# connected
n_bond = distance_matrix[(distance_matrix < self.cutoff) & (distance_matrix != 0)].size
if n_bond == 0: n_bond = 1
# Initialize the matrices to be filled in during the following loop.
atom_feature_matrix = np.zeros(n_atom, dtype='int')
bond_feature_matrix = np.zeros(n_bond, dtype='int')
bond_distance_matrix = np.zeros(n_bond, dtype=np.float32)
bond_index_matrix = np.full(n_bond, -1, dtype='int')
connectivity = np.zeros((n_bond, 2), dtype='int')
# Hopefully we've filtered out all problem mols by now.
if mol is None:
raise RuntimeError("Issue in loading mol")
# Get a list of the atoms in the molecule.
atom_seq = mol.GetAtoms()
atoms = [atom_seq[i] for i in range(n_atom)]
# Here we loop over each atom, and the inner loop iterates over each
# neighbor of the current atom.
bond_index = 0 # keep track of our current bond.
for n, atom in enumerate(atoms):
# update atom feature matrix
atom_feature_matrix[n] = self.atom_tokenizer(
self.atom_features(atom))
# if n_neighbors is greater than total atoms, then each atom is a
# neighbor.
if (self.n_neighbors + 1) > len(mol.GetAtoms()):
neighbor_end_index = len(mol.GetAtoms())
else:
neighbor_end_index = (self.n_neighbors + 1)
distance_atom = distance_matrix[n, :]
cutoff_end_index = distance_atom[distance_atom < self.cutoff].size
end_index = min(neighbor_end_index, cutoff_end_index)
# Loop over each of the nearest neighbors
neighbor_inds = distance_matrix[n, :].argsort()[1:end_index]
if len(neighbor_inds)==0: neighbor_inds = [n]
for neighbor in neighbor_inds:
# update bond feature matrix
bond = mol.GetBondBetweenAtoms(n, int(neighbor))
if bond is None:
bond_feature_matrix[bond_index] = 0
else:
rev = False if bond.GetBeginAtomIdx() == n else True
bond_feature_matrix[bond_index] = self.bond_tokenizer(
self.bond_features(bond, flipped=rev))
try:
bond_index_matrix[bond_index] = bond_index_array.tolist().index(bond.GetIdx())
except:
pass
distance = distance_matrix[n, neighbor]
bond_distance_matrix[bond_index] = distance
# update connectivity matrix
connectivity[bond_index, 0] = n
connectivity[bond_index, 1] = neighbor
bond_index += 1
return {
'n_atom': n_atom,
'n_bond': n_bond,
'n_pro': n_pro,
'atom': atom_feature_matrix,
'bond': bond_feature_matrix,
'distance': bond_distance_matrix,
'connectivity': connectivity,
'bond_index': bond_index_matrix,
}
class MolAPreprocessor(MolPreprocessor):
"""
This is a subclass of Molpreprocessor that preprocessor molecule with
bond property target
"""
def __init__(self, **kwargs):
"""
A preprocessor class that also returns bond_target_matrix, besides the bond matrix
returned by MolPreprocessor. The bond_target_matrix is then used as ref to reduce molecule
to bond property
"""
super(MolAPreprocessor, self).__init__(**kwargs)
def construct_feature_matrices(self, entry):
"""
Given an entry contining rdkit molecule, bond_index and for the target property,
return atom
feature matrices, bond feature matrices, distance matrices, connectivity matrices and bond
ref matrices.
returns
dict with entries
see MolPreproccessor
'bond_index' : ref array to the bond index
"""
mol, atom_index_array = entry
n_atom = len(mol.GetAtoms())
n_pro = len(atom_index_array)
# n_bond is actually the number of atom-atom pairs, so this is defined
# by the number of neighbors for each atom.
#if there is cutoff,
distance_matrix = Chem.Get3DDistanceMatrix(mol)
if self.n_neighbors <= (n_atom - 1):
n_bond = self.n_neighbors * n_atom
else:
# If there are fewer atoms than n_neighbors, all atoms will be
# connected
n_bond = distance_matrix[(distance_matrix < self.cutoff) & (distance_matrix != 0)].size
if n_bond == 0: n_bond = 1
# Initialize the matrices to be filled in during the following loop.
atom_feature_matrix = np.zeros(n_atom, dtype='int')
bond_feature_matrix = np.zeros(n_bond, dtype='int')
bond_distance_matrix = np.zeros(n_bond, dtype=np.float32)
atom_index_matrix = np.full(n_atom, -1, dtype='int')
connectivity = np.zeros((n_bond, 2), dtype='int')
# Hopefully we've filtered out all problem mols by now.
if mol is None:
raise RuntimeError("Issue in loading mol")
# Get a list of the atoms in the molecule.
atom_seq = mol.GetAtoms()
atoms = [atom_seq[i] for i in range(n_atom)]
# Here we loop over each atom, and the inner loop iterates over each
# neighbor of the current atom.
bond_index = 0 # keep track of our current bond.
for n, atom in enumerate(atoms):
# update atom feature matrix
atom_feature_matrix[n] = self.atom_tokenizer(
self.atom_features(atom))
try:
atom_index_matrix[n] = atom_index_array.tolist().index(atom.GetIdx())
except:
pass
# if n_neighbors is greater than total atoms, then each atom is a
# neighbor.
if (self.n_neighbors + 1) > len(mol.GetAtoms()):
neighbor_end_index = len(mol.GetAtoms())
else:
neighbor_end_index = (self.n_neighbors + 1)
distance_atom = distance_matrix[n, :]
cutoff_end_index = distance_atom[distance_atom < self.cutoff].size
end_index = min(neighbor_end_index, cutoff_end_index)
# Loop over each of the nearest neighbors
neighbor_inds = distance_matrix[n, :].argsort()[1:end_index]
if len(neighbor_inds)==0: neighbor_inds = [n]
for neighbor in neighbor_inds:
# update bond feature matrix
bond = mol.GetBondBetweenAtoms(n, int(neighbor))
if bond is None:
bond_feature_matrix[bond_index] = 0
else:
rev = False if bond.GetBeginAtomIdx() == n else True
bond_feature_matrix[bond_index] = self.bond_tokenizer(
self.bond_features(bond, flipped=rev))
distance = distance_matrix[n, neighbor]
bond_distance_matrix[bond_index] = distance
# update connectivity matrix
connectivity[bond_index, 0] = n
connectivity[bond_index, 1] = neighbor
bond_index += 1
return {
'n_atom': n_atom,
'n_bond': n_bond,
'n_pro': n_pro,
'atom': atom_feature_matrix,
'bond': bond_feature_matrix,
'distance': bond_distance_matrix,
'connectivity': connectivity,
'atom_index': atom_index_matrix,
}
# TODO: rewrite this
# class LaplacianSmilesPreprocessor(SmilesPreprocessor):
# """ Extends the SmilesPreprocessor class to also return eigenvalues and
# eigenvectors of the graph laplacian matrix.
#
# Example:
# >>> preprocessor = SmilesPreprocessor(
# >>> max_atoms=55, max_bonds=62, max_degree=4, explicit_hs=False)
# >>> atom, connectivity, eigenvalues, eigenvectors = preprocessor.fit(
# data.smiles)
# """
#
# def preprocess(self, smiles_iterator, train=True):
#
# self.atom_tokenizer.train = train
# self.bond_tokenizer.train = train
#
# for smiles in tqdm(smiles_iterator):
# G = self._mol_to_nx(smiles)
# A = self._get_atom_feature_matrix(G)
# C = self._get_connectivity_matrix(G)
# W, V = self._get_laplacian_spectral_decomp(G)
# yield A, C, W, V
#
#
# def _get_laplacian_spectral_decomp(self, G):
# """ Return the eigenvalues and eigenvectors of the graph G, padded to
# `self.max_atoms`.
# """
#
# w0 = np.zeros((self.max_atoms, 1))
# v0 = np.zeros((self.max_atoms, self.max_atoms))
#
# w, v = eigh(nx.laplacian_matrix(G).todense())
#
# num_atoms = len(v)
#
# w0[:num_atoms, 0] = w
# v0[:num_atoms, :num_atoms] = v
#
# return w0, v0
#
#
# def fit(self, smiles_iterator):
# results = self._fit(smiles_iterator)
# return {'atom': results[0],
# 'connectivity': results[1],
# 'w': results[2],
# 'v': results[3]}
#
#
# def predict(self, smiles_iterator):
# results = self._predict(smiles_iterator)
# return {'atom': results[0],
# 'connectivity': results[1],
# 'w': results[2],
# 'v': results[3]}
def get_max_atom_bond_size(smiles_iterator, explicit_hs=True):
""" Convienence function to get max_atoms, max_bonds for a set of input
SMILES """
max_atoms = 0
max_bonds = 0
for smiles in tqdm(smiles_iterator):
mol = MolFromSmiles(smiles)
if explicit_hs:
mol = AddHs(mol)
max_atoms = max([max_atoms, len(mol.GetAtoms())])
max_bonds = max([max_bonds, len(mol.GetBonds())])
return dict(max_atoms=max_atoms, max_bonds=max_bonds*2)
def canonicalize_smiles(smiles, isomeric=True, sanitize=True):
try:
mol = MolFromSmiles(smiles, sanitize=sanitize)
return MolToSmiles(mol, isomericSmiles=isomeric)
except Exception:
pass
| 35.540136
| 102
| 0.597772
| 3,120
| 26,122
| 4.819872
| 0.094231
| 0.018287
| 0.02374
| 0.012103
| 0.862349
| 0.848118
| 0.841402
| 0.834619
| 0.831627
| 0.831627
| 0
| 0.006422
| 0.32042
| 26,122
| 734
| 103
| 35.588556
| 0.840694
| 0.344346
| 0
| 0.856707
| 0
| 0
| 0.020191
| 0
| 0
| 0
| 0
| 0.001362
| 0
| 1
| 0.067073
| false
| 0.009146
| 0.027439
| 0
| 0.155488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
907669c62b1a966fa69eaccb3a9bcb9f88c1bbb3
| 6,493
|
py
|
Python
|
extending_streamlit_usage/006_script_in_script/_MODEL_shuffle_launch_tests_all_bach_go_no_go_3_EACH_DEBUG_DELAY_6.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 5
|
2018-05-03T08:16:02.000Z
|
2021-09-04T03:44:24.000Z
|
extending_streamlit_usage/006_script_in_script/_MODEL_shuffle_launch_tests_all_bach_go_no_go_3_EACH_DEBUG_DELAY_6.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 1
|
2022-01-28T19:27:19.000Z
|
2022-01-28T19:27:19.000Z
|
extending_streamlit_usage/006_script_in_script/_MODEL_shuffle_launch_tests_all_bach_go_no_go_3_EACH_DEBUG_DELAY_6.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 2
|
2020-09-10T13:33:27.000Z
|
2022-02-09T11:07:38.000Z
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
#
"""
cd C:\Users\bflaven\Documents\node_test_codeceptjs\
python shuffle_launch_tests_all_bach_go_no_go_3_EACH_DEBUG_DELAY_6.py
"""
import os
import sys
import random
import time
# Sleep function is using seconds so it will generate a random interval within 1 hour.
# timeDelay = random.randrange(0, 3600)
# timeDelay = random.randrange(0, 15)
# print("timeDelay :: ", timeDelay)
# To launch in between the npx command
# time.sleep(timeDelay)
# Set the correct values for your path and script
#VALUES
#my_path = '/Users/brunoflaven/Documents/02_copy/_random_is_all_about/test_platform/e2e/'
my_path = 'C:/Users/bflaven/Documents/node_test_codeceptjs/'
# Values
my_path_cpjs = 'C:/Users/bflaven/Documents/node_test_codeceptjs/'
# print('\n ORIGINAL')
"""
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_12_5_correct_ck_version_cut_and_paste_middle_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_em_5_manual_list_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_23_edition_translate_em_video_obs_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_ck_editor_4_text_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_em_3_slideshow_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_theme_tags_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_11_1_correct_ck_version_layout_options_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_observers_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_about_tabs_ux_searchbox_case_sensitive_lower_1_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_supertag_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_tags_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_authors_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_bookmarks_service_deleted_article_1_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_16_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_23_edition_translate_em_embed_obs_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_24_video_creation_published_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_20_convert_wire_to_article_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_21_convert_urgent_to_article_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_34a_edition_scheduled_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_37_create_media_videos_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_tag_3_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_34a_article_scheduled_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_11_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_11b_show_revisions_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_11b_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_11d_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_12b_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_36_make_bookmark_search_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_theme_tags_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_observers_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_supertag_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_tags_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_authors_tags_test.js'
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_theme_tags_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_observers_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_supertag_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_tags_tags_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_25_translate_taxo_authors_tags_test.js'
--- try_windows_20_convert_wire_to_article_test.js
--- try_windows_21_convert_urgent_to_article_test.js
--- try_windows_34a_edition_scheduled_test.js
--- try_windows_34a_article_scheduled_test.js
"""
full_command_file_names = [
'npx codeceptjs run --config=codecept_F24_EN.conf.js --steps try_windows_63_request_processing_history_test.js',
'npx codeceptjs run --config=codecept_RFI_ES.conf.js --steps try_windows_63_request_processing_history_test.js',
'npx codeceptjs run --config=codecept_MCD_AR.conf.js --steps try_windows_63_request_processing_history_test.js',
'npx codeceptjs run --config=codecept_OBS_EN.conf.js --steps try_windows_63_request_processing_history_test.js'
]
# print("Original list:", file_names)
# print('\n SHUFFLE')
random.shuffle(full_command_file_names)
# sprint("List after first shuffle:", file_names)
# print('\n')
# for file_name in file_names:
# print("python :", file_name)
#print("\n--- Basic Automation with Python ---\n")
#print("--- Python version "+sys.version+" ---\n")
os.chdir(my_path)
print(os.getcwd())
for file_name in full_command_file_names:
print("\n")
print("=== DEBUG " + file_name +" ")
# DO IT
os.system("" + file_name +"")
timeDelay = random.randrange(0, 15)
print("\n=== TIMEDELAY " + str(timeDelay) + " === ")
# To launch in between the npx command
time.sleep(timeDelay)
# Sleep function is using seconds so it will generate a random interval within 1 hour.
# time.sleep(3)
# timeDelay = random.randrange(0, 3600)
| 49.564885
| 130
| 0.804559
| 1,047
| 6,493
| 4.586437
| 0.168099
| 0.095793
| 0.139942
| 0.19242
| 0.80883
| 0.790504
| 0.776343
| 0.733861
| 0.733861
| 0.711787
| 0
| 0.019515
| 0.084553
| 6,493
| 131
| 131
| 49.564886
| 0.788358
| 0.142461
| 0
| 0
| 0
| 0
| 0.54845
| 0.406977
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.181818
| null | null | 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
908efeb89dcb8517307e9c735af568c7cb4ced93
| 153
|
py
|
Python
|
staticpy/lang/common/string.py
|
SnowWalkerJ/StaticPy
|
818b7f009af7a6040313791993f543779781dddf
|
[
"BSD-3-Clause"
] | 13
|
2019-10-14T19:22:11.000Z
|
2021-08-23T08:39:06.000Z
|
staticpy/lang/common/string.py
|
SnowWalkerJ/StaticPy
|
818b7f009af7a6040313791993f543779781dddf
|
[
"BSD-3-Clause"
] | 5
|
2019-09-30T07:42:18.000Z
|
2020-01-01T15:07:00.000Z
|
staticpy/lang/common/string.py
|
SnowWalkerJ/StaticPy
|
818b7f009af7a6040313791993f543779781dddf
|
[
"BSD-3-Clause"
] | null | null | null |
def stringify_arguments(args):
from ..expression import cast_value_to_expression
return ", ".join(map(str, map(cast_value_to_expression, args)))
| 38.25
| 67
| 0.764706
| 21
| 153
| 5.238095
| 0.666667
| 0.163636
| 0.2
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124183
| 153
| 3
| 68
| 51
| 0.820896
| 0
| 0
| 0
| 0
| 0
| 0.013072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
90cb178b813099ce8311d84c32378045f36df510
| 28,907
|
py
|
Python
|
cicerotwebapp/migrations/0001_initial.py
|
ElitosGon/cicerotproject
|
e7ca27cbe2c12b97c6ffac44d4f81c1f6a7d2f4b
|
[
"Apache-2.0"
] | null | null | null |
cicerotwebapp/migrations/0001_initial.py
|
ElitosGon/cicerotproject
|
e7ca27cbe2c12b97c6ffac44d4f81c1f6a7d2f4b
|
[
"Apache-2.0"
] | 3
|
2020-06-05T17:32:33.000Z
|
2021-06-10T19:03:27.000Z
|
cicerotwebapp/migrations/0001_initial.py
|
ElitosGon/cicerotproject
|
e7ca27cbe2c12b97c6ffac44d4f81c1f6a7d2f4b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-05-16 11:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Actividad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_actividad', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('descripcion_actividad', models.TextField(blank=True, max_length=400, null=True, verbose_name='Descripción actividad')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('usuario', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuario')),
],
),
migrations.CreateModel(
name='Comentario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('texto_comentario', models.CharField(blank=True, max_length=255, null=True, verbose_name='Comentario')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Comuna',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_comuna', models.CharField(max_length=100, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='EstadoTour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_estado_tour', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Evaluacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('puntuacion_tiempo_evaluacion', models.IntegerField(blank=True, default=0, null=True, verbose_name='Puntuación tiempo')),
('puntuacion_calidad_evaluacion', models.IntegerField(blank=True, default=0, null=True, verbose_name='Puntuación calidad')),
('puntuacion_cumplimiento_evaluacion', models.IntegerField(blank=True, default=0, null=True, verbose_name='Puntuación cumplimiento')),
('comentario_evaluacion', models.TextField(blank=True, max_length=400, null=True, verbose_name='Comentario')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Favorito',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tipo_favorito', models.CharField(blank=True, max_length=255, null=True, verbose_name='Tipo')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Guia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion_guia', models.TextField(blank=True, max_length=400, null=True, verbose_name='Descripción Guia')),
('clasificacion_guia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Clasificación')),
('rut_guia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Rut')),
('telefono_guia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Teléfono guia')),
('celular_guia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Celular guia')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Horario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inicio_horario', models.DateTimeField(blank=True, null=True, verbose_name='Fecha inicio')),
('fin_horario', models.DateTimeField(blank=True, null=True, verbose_name='Fecha fin')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Inscripcion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cupo_inscripcion', models.IntegerField(blank=True, default=0, null=True, verbose_name='Cupo inscripción')),
('costo_inscripcion', models.IntegerField(blank=True, default=0, null=True, verbose_name='Costo')),
('terminos_servicio', models.CharField(blank=True, max_length=255, null=True, verbose_name='Terminos')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('evaluacion', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='evaliación', to='cicerotwebapp.Evaluacion', verbose_name='Evaluación')),
],
),
migrations.CreateModel(
name='InstanciaTour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inicio_instancia_tour', models.DateTimeField(blank=True, null=True, verbose_name='Fecha inicio')),
('fin_instancia_tour', models.DateTimeField(blank=True, null=True, verbose_name='Fecha fin')),
('cupo_instancia_tour', models.IntegerField(blank=True, default=0, null=True, verbose_name='Cupo instancia tour')),
('costo_instacia_tour', models.IntegerField(blank=True, default=0, null=True, verbose_name='Costo')),
('estado_instacia_tour', models.CharField(blank=True, max_length=225, null=True, verbose_name='Estado instancia')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Multimedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_multimedia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('descripcion_multimedia', models.TextField(blank=True, max_length=400, null=True, verbose_name='Descripción')),
('formato_multimedia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Formato')),
('archivo_multimedia', models.FileField(blank=True, null=True, upload_to='documentos/multimedia/%Y/%m/%d', verbose_name='Archivo multimedia')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('actividad', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Actividad', verbose_name='Actividad')),
],
),
migrations.CreateModel(
name='Pais',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_pais', models.CharField(blank=True, max_length=255, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Provincia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_provincia', models.CharField(max_length=100, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_region', models.CharField(max_length=100, verbose_name='Nombre')),
('sigla_region', models.CharField(max_length=100, verbose_name='Sigla')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='RegistroGuia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('direccion_guia_registro', models.CharField(blank=True, max_length=255, null=True, verbose_name='Dirección Guia')),
('direccion_representante_legal_registro', models.CharField(blank=True, max_length=255, null=True, verbose_name='Dirección Representante legal')),
('razon_social_guia_registro', models.CharField(blank=True, max_length=255, null=True, verbose_name='Razón social')),
('representante_legal_guia_registro', models.CharField(blank=True, max_length=255, null=True, verbose_name='Representante legal')),
('nombre_fantasia_guia_registro', models.CharField(blank=True, max_length=255, null=True, verbose_name='Nombre')),
('es_sello_q_registro', models.BooleanField(default=False, verbose_name='¿Tiene sello Q?')),
('tipo_sello_q_registro', models.CharField(blank=True, max_length=255, null=True, verbose_name='Tipo sello')),
('inicio_sello_q_registro', models.DateTimeField(blank=True, null=True, verbose_name='Fecha inicio sello Q')),
('fin_sello_q_registro', models.DateTimeField(blank=True, null=True, verbose_name='Fecha fin sello Q')),
('tipo_personalidad_registro', models.CharField(blank=True, max_length=255, null=True, verbose_name='Personalidad Jurídica')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Rol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_rol', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('usuario', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='Roles')),
],
),
migrations.CreateModel(
name='ServicioTour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion_servicio_tour', models.TextField(blank=True, max_length=400, null=True, verbose_name='Descripción')),
('es_pago_servicio_tour', models.BooleanField(default=False, verbose_name='¿Tiene costo el servicio?')),
('costo_servicio_tour', models.IntegerField(blank=True, default=0, null=True, verbose_name='Costo')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion_staff', models.TextField(blank=True, max_length=400, null=True, verbose_name='Descripción')),
('cargo_staff', models.CharField(blank=True, max_length=255, null=True, verbose_name='Cargo Staff')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('usuario', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='usuario')),
],
),
migrations.CreateModel(
name='Suscripcion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('usuario_seguido', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seguido', to=settings.AUTH_USER_MODEL, verbose_name='Seguido')),
('usuario_seguidor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seguidor', to=settings.AUTH_USER_MODEL, verbose_name='Seguidor')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('texto_tag', models.CharField(blank=True, max_length=100, null=True, verbose_name='Tag')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='TextoSelect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('categoria_texto_select', models.CharField(blank=True, max_length=100, null=True, verbose_name='Categoría')),
('texto_select', models.CharField(blank=True, max_length=100, null=True, verbose_name='Texto')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='TipoGuia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_tipo_guia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='TipoMultimedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_tipo_multimedia', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='TipoServicio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_tipo_servicio', models.CharField(blank=True, max_length=255, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='TipoTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_tipo_tag', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='TipoTour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_tipo_tour', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Tour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre_tour', models.CharField(blank=True, max_length=100, null=True, verbose_name='Nombre Tour')),
('descripcion_tour', models.TextField(blank=True, max_length=400, null=True, verbose_name='Descripción Tour')),
('capacidad_tour', models.IntegerField(blank=True, null=True, verbose_name='Capacidad Tour')),
('precio_tour', models.IntegerField(blank=True, null=True, verbose_name='Precio Tour')),
('es_oferta', models.BooleanField(default=False, verbose_name='¿Tour en oferta?')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('comunas', models.ManyToManyField(blank=True, related_name='tour', to='cicerotwebapp.Comuna', verbose_name='Comunas tour')),
('estado_tour', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.EstadoTour', verbose_name='Estado Tour')),
('guia', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tour', to='cicerotwebapp.Guia')),
('tags', models.ManyToManyField(blank=True, related_name='tour', to='cicerotwebapp.Tag', verbose_name='Tags tour')),
('tipo_tour', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.TipoTour', verbose_name='Tipo tour')),
],
),
migrations.CreateModel(
name='Transaccion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('monto_transaccion', models.IntegerField(blank=True, default=0, null=True, verbose_name='Monto transacción')),
('codigo_transaccion', models.CharField(blank=True, max_length=255, null=True, verbose_name='Código transacción')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
],
),
migrations.CreateModel(
name='Turista',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion_turista', models.TextField(blank=True, max_length=400, null=True, verbose_name='Descripción Turista')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Fecha creación')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Fecha última modificación')),
('pais', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Pais', verbose_name='Pais')),
('usuario', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='usuario')),
],
),
migrations.AddField(
model_name='tag',
name='tipo_tag',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.TipoTag', verbose_name='Tipo de Tag'),
),
migrations.AddField(
model_name='serviciotour',
name='tipo_servicio',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.TipoServicio', verbose_name='Tipo servicio'),
),
migrations.AddField(
model_name='serviciotour',
name='tour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Tour', verbose_name='Tour'),
),
migrations.AddField(
model_name='provincia',
name='region',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Region', verbose_name='Región'),
),
migrations.AddField(
model_name='multimedia',
name='tipo_multimedia',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.TipoMultimedia', verbose_name='Tipo multimedia'),
),
migrations.AddField(
model_name='multimedia',
name='tour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Tour', verbose_name='Tour'),
),
migrations.AddField(
model_name='multimedia',
name='usuario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuario'),
),
migrations.AddField(
model_name='instanciatour',
name='tour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Tour', verbose_name='Tour'),
),
migrations.AddField(
model_name='inscripcion',
name='instancia_tour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.InstanciaTour', verbose_name='Instancia tour'),
),
migrations.AddField(
model_name='inscripcion',
name='transaccion',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='transacción', to='cicerotwebapp.Transaccion', verbose_name='Transacción'),
),
migrations.AddField(
model_name='inscripcion',
name='turista',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Turista', verbose_name='Turista'),
),
migrations.AddField(
model_name='horario',
name='tour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Tour', verbose_name='Horario Tour'),
),
migrations.AddField(
model_name='guia',
name='registro',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='guia', to='cicerotwebapp.RegistroGuia', verbose_name='Registro guía'),
),
migrations.AddField(
model_name='guia',
name='tags',
field=models.ManyToManyField(blank=True, related_name='guia', to='cicerotwebapp.Tag', verbose_name='Tags guía'),
),
migrations.AddField(
model_name='guia',
name='tipos_guia',
field=models.ManyToManyField(blank=True, related_name='guia', to='cicerotwebapp.TipoGuia', verbose_name='Tipos guía'),
),
migrations.AddField(
model_name='guia',
name='usuario',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuario'),
),
migrations.AddField(
model_name='favorito',
name='tour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Tour', verbose_name='Tour'),
),
migrations.AddField(
model_name='favorito',
name='turista',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Turista', verbose_name='Turista'),
),
migrations.AddField(
model_name='comuna',
name='provincia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Provincia', verbose_name='Provincia'),
),
migrations.AddField(
model_name='comentario',
name='tour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cicerotwebapp.Tour', verbose_name='Tour comentado'),
),
migrations.AddField(
model_name='comentario',
name='usuario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuario que comenta'),
),
]
| 66.300459
| 206
| 0.638254
| 3,096
| 28,907
| 5.756137
| 0.061693
| 0.114808
| 0.096796
| 0.071825
| 0.860558
| 0.858201
| 0.837495
| 0.81735
| 0.806801
| 0.794456
| 0
| 0.007056
| 0.225343
| 28,907
| 435
| 207
| 66.452874
| 0.788639
| 0.002352
| 0
| 0.655738
| 1
| 0
| 0.187509
| 0.029408
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009368
| 0
| 0.018735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2921acde1414d0e442445698e3df7d0415c406b5
| 16,359
|
py
|
Python
|
ucsmsdk/mometa/config/ConfigImpact.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 78
|
2015-11-30T14:10:05.000Z
|
2022-02-13T00:29:08.000Z
|
ucsmsdk/mometa/config/ConfigImpact.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 113
|
2015-11-20T09:42:46.000Z
|
2022-03-16T16:53:29.000Z
|
ucsmsdk/mometa/config/ConfigImpact.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 86
|
2015-12-12T08:22:18.000Z
|
2022-01-23T03:56:34.000Z
|
"""This module contains the general information for ConfigImpact ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ConfigImpactConsts:
CHASSIS_CONFIG_STATE_APPLIED = "applied"
CHASSIS_CONFIG_STATE_APPLYING = "applying"
CHASSIS_CONFIG_STATE_FAILED_TO_APPLY = "failed-to-apply"
CHASSIS_CONFIG_STATE_NOT_APPLIED = "not-applied"
CONFIG_STATE_APPLIED = "applied"
CONFIG_STATE_APPLYING = "applying"
CONFIG_STATE_FAILED_TO_APPLY = "failed-to-apply"
CONFIG_STATE_NOT_APPLIED = "not-applied"
DEPLOYMENT_MODE_IMMEDIATE = "immediate"
DEPLOYMENT_MODE_TIMER_AUTOMATIC = "timer-automatic"
DEPLOYMENT_MODE_USER_ACK = "user-ack"
REBOOT_REQUIRED_FALSE = "false"
REBOOT_REQUIRED_NO = "no"
REBOOT_REQUIRED_TRUE = "true"
REBOOT_REQUIRED_YES = "yes"
class ConfigImpact(ManagedObject):
"""This is ConfigImpact class."""
consts = ConfigImpactConsts()
naming_props = set(['name'])
mo_meta = MoMeta("ConfigImpact", "configImpact", "impact-[name]", VersionMeta.Version212a, "InputOutput", 0x3f, [], ["read-only"], ['configManagedEpImpactResponse'], [], [None])
prop_meta = {
"affected_chassis": MoPropertyMeta("affected_chassis", "affectedChassis", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"affected_obj": MoPropertyMeta("affected_obj", "affectedObj", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"affected_server": MoPropertyMeta("affected_server", "affectedServer", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"changes": MoPropertyMeta("changes", "changes", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|boot-order|server-assignment|operational-policies|local-storage|server-identity|storage|networking|vnic-vhba-placement),){0,8}(defaultValue|boot-order|server-assignment|operational-policies|local-storage|server-identity|storage|networking|vnic-vhba-placement){0,1}""", [], []),
"chassis_config_issues": MoPropertyMeta("chassis_config_issues", "chassisConfigIssues", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|chassis-profile-not-supported|single-path-not-supported|invalid-cmc-version|migration|single-path-unsupported-cmc-version|single-path-operation-not-supported|firmware-version-mismatch|invalid-sas-exp-config-policy-reference|non-interrupt-fsm-running|insufficient-resources|compute-conn-invalid-hw-config|connection-management-unsupported-cmc-version|physical-requirement|single-path-expander-inoperable|connection-management-feature-not-supported|chassis-undiscovered|chassis-feature-capability-mismatch|resource-ownership-conflict|unsupported-sas-exp-config-settings|compute-conn-unsupported-cmc-version|connection-management-expander-inoperable|chassis-unavailable|invalid-chassis-pack|single-path-invalid-configuration|connection-management-operation-not-supported|missing-firmware-image|chassis-feature-capability-mismatch-non-fatal|single-path-feature-not-supported|compute-second-controller-unsupported-cmc-version|connection-management-not-supported|insufficient-power-budget),){0,32}(defaultValue|not-applicable|chassis-profile-not-supported|single-path-not-supported|invalid-cmc-version|migration|single-path-unsupported-cmc-version|single-path-operation-not-supported|firmware-version-mismatch|invalid-sas-exp-config-policy-reference|non-interrupt-fsm-running|insufficient-resources|compute-conn-invalid-hw-config|connection-management-unsupported-cmc-version|physical-requirement|single-path-expander-inoperable|connection-management-feature-not-supported|chassis-undiscovered|chassis-feature-capability-mismatch|resource-ownership-conflict|unsupported-sas-exp-config-settings|compute-conn-unsupported-cmc-version|connection-management-expander-inoperable|chassis-unavailable|invalid-chassis-pack|single-path-invalid-configuration|connection-management-operation-not-supported|missing-firmware-image|chassis-feature-capability-mismatch-non-fatal|single-path-feature-not-supported|compute-second-controller-unsupported-cmc-version|connection-management-not-supported|insufficient-power-budget){0,1}""", [], []),
"chassis_config_qualifier": MoPropertyMeta("chassis_config_qualifier", "chassisConfigQualifier", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|chassis-profile-not-supported|single-path-not-supported|invalid-cmc-version|migration|single-path-unsupported-cmc-version|single-path-operation-not-supported|firmware-version-mismatch|invalid-sas-exp-config-policy-reference|non-interrupt-fsm-running|insufficient-resources|compute-conn-invalid-hw-config|connection-management-unsupported-cmc-version|physical-requirement|single-path-expander-inoperable|connection-management-feature-not-supported|chassis-undiscovered|chassis-feature-capability-mismatch|resource-ownership-conflict|unsupported-sas-exp-config-settings|compute-conn-unsupported-cmc-version|connection-management-expander-inoperable|chassis-unavailable|invalid-chassis-pack|single-path-invalid-configuration|connection-management-operation-not-supported|missing-firmware-image|chassis-feature-capability-mismatch-non-fatal|single-path-feature-not-supported|compute-second-controller-unsupported-cmc-version|connection-management-not-supported|insufficient-power-budget),){0,32}(defaultValue|not-applicable|chassis-profile-not-supported|single-path-not-supported|invalid-cmc-version|migration|single-path-unsupported-cmc-version|single-path-operation-not-supported|firmware-version-mismatch|invalid-sas-exp-config-policy-reference|non-interrupt-fsm-running|insufficient-resources|compute-conn-invalid-hw-config|connection-management-unsupported-cmc-version|physical-requirement|single-path-expander-inoperable|connection-management-feature-not-supported|chassis-undiscovered|chassis-feature-capability-mismatch|resource-ownership-conflict|unsupported-sas-exp-config-settings|compute-conn-unsupported-cmc-version|connection-management-expander-inoperable|chassis-unavailable|invalid-chassis-pack|single-path-invalid-configuration|connection-management-operation-not-supported|missing-firmware-image|chassis-feature-capability-mismatch-non-fatal|single-path-feature-not-supported|compute-second-controller-unsupported-cmc-version|connection-management-not-supported|insufficient-power-budget){0,1}""", [], []),
"chassis_config_state": MoPropertyMeta("chassis_config_state", "chassisConfigState", "string", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["applied", "applying", "failed-to-apply", "not-applied"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version212a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"config_issues": MoPropertyMeta("config_issues", "configIssues", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic),){0,65}(defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic){0,1}""", [], []),
"config_qualifier": MoPropertyMeta("config_qualifier", "configQualifier", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic),){0,65}(defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic){0,1}""", [], []),
"config_state": MoPropertyMeta("config_state", "configState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["applied", "applying", "failed-to-apply", "not-applied"], []),
"deployment_mode": MoPropertyMeta("deployment_mode", "deploymentMode", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["immediate", "timer-automatic", "user-ack"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version212a, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []),
"reboot_required": MoPropertyMeta("reboot_required", "rebootRequired", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"affectedChassis": "affected_chassis",
"affectedObj": "affected_obj",
"affectedServer": "affected_server",
"changes": "changes",
"chassisConfigIssues": "chassis_config_issues",
"chassisConfigQualifier": "chassis_config_qualifier",
"chassisConfigState": "chassis_config_state",
"childAction": "child_action",
"configIssues": "config_issues",
"configQualifier": "config_qualifier",
"configState": "config_state",
"deploymentMode": "deployment_mode",
"dn": "dn",
"name": "name",
"rebootRequired": "reboot_required",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.affected_chassis = None
self.affected_obj = None
self.affected_server = None
self.changes = None
self.chassis_config_issues = None
self.chassis_config_qualifier = None
self.chassis_config_state = None
self.child_action = None
self.config_issues = None
self.config_qualifier = None
self.config_state = None
self.deployment_mode = None
self.reboot_required = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "ConfigImpact", parent_mo_or_dn, **kwargs)
| 170.40625
| 3,146
| 0.797787
| 1,947
| 16,359
| 6.632255
| 0.130971
| 0.02602
| 0.02602
| 0.026175
| 0.793154
| 0.784945
| 0.768373
| 0.763494
| 0.753891
| 0.751723
| 0
| 0.008584
| 0.060028
| 16,359
| 95
| 3,147
| 172.2
| 0.831176
| 0.006357
| 0
| 0
| 0
| 0.097561
| 0.741566
| 0.661044
| 0
| 0
| 0.001293
| 0
| 0
| 1
| 0.012195
| false
| 0
| 0.036585
| 0
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
31b691afdef9bb96b9066102d3b4411c5cf92c37
| 13,057
|
py
|
Python
|
src/biopy/models/ExprAutoEncoders.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | null | null | null |
src/biopy/models/ExprAutoEncoders.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | null | null | null |
src/biopy/models/ExprAutoEncoders.py
|
BioPyTeam/biopy
|
5c1444280d0a5098b61a99d96dc2825259c7ced5
|
[
"MIT"
] | 2
|
2021-07-23T09:30:58.000Z
|
2021-07-23T09:33:25.000Z
|
import torch
from torch import nn
from ..utils import ReverseLayerF
######################### Encoder and Decoder for VAE/AE/SAAE ##################
class VEncoder(nn.Module):
def __init__(self, input_size=58276, hidden_size=16):
super().__init__()
self.enc = nn.Sequential(nn.Linear(input_size, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512))
self.enc_mean = nn.Linear(512, hidden_size)
self.enc_logvar = nn.Linear(512, hidden_size)
def forward(self, x):
x = self.enc(x)
mean = self.enc_mean(x)
log_var = self.enc_logvar(x)
return mean, log_var
class Decoder(nn.Module):
def __init__(self, output_size=58276, hidden_size=16):
super().__init__()
self.dec = nn.Sequential(nn.Linear(hidden_size, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, output_size))
def forward(self, x):
x = self.dec(x)
return x
###############################################################
######################### VAE #####################
class VAE(nn.Module):
ae_type='VAE'
def __init__(self, data_size=131, hidden_size=16, **kwargs):
super().__init__()
self.encoder = VEncoder(input_size=data_size, hidden_size=hidden_size)
self.decoder = Decoder(output_size=data_size, hidden_size=hidden_size)
def forward(self, x):
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
x = self.decoder(z)
return x, z, mean, log_var
def sample(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def encode_and_sample(self, x):
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
return z
######################### AAE #####################
class DoubleDiscriminator(nn.Module):
"""Latent space discriminator"""
def __init__(self, hidden_size, n_out=2, n_hidden=80, **kwargs):
super().__init__()
self.nz = hidden_size
self.n_hidden = n_hidden
self.n_out = n_out
self.net = nn.Sequential(
nn.Linear(hidden_size, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden//2),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden//2),
nn.Linear(n_hidden//2, n_out)
)
def forward(self, x, alpha=None):
if alpha is not None:
x = ReverseLayerF.apply(x, alpha)
x = self.net(x)
return x
class Discriminator(nn.Module):
def __init__(self, hidden_size, n_out=2, **kwargs):
super().__init__()
n_middle = max(hidden_size // 3, 3)
self.net = nn.Sequential(nn.Linear(hidden_size, n_middle),
nn.ReLU(inplace=True),
nn.Linear(n_middle, n_out))
def forward(self, x, alpha=None, **kwargs):
if alpha is not None:
x = ReverseLayerF.apply(x, alpha)
x = self.net(x)
return x
class AAE(nn.Module):
ae_type='AAE'
def __init__(self, data_size=58276, hidden_size=16, num_distrib=2, **kwargs):
super().__init__()
self.encoder = VEncoder(input_size=data_size, hidden_size=hidden_size)
self.decoder = Decoder(output_size=data_size, hidden_size=hidden_size)
self.discriminator = nn.Sequential(nn.Linear(hidden_size, 10),
nn.ReLU(inplace=True),
nn.Linear(10, num_distrib))
def forward(self, x, input_is_z=False, alpha=None):
if not input_is_z:
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
if alpha is not None:
x = ReverseLayerF.apply(z, alpha)
x = self.discriminator(x)
return x
else:
x = self.decoder(z)
return x, z, mean, log_var
else:
x = self.discriminator(x)
return x
def sample(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def encode_and_sample(self, x):
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
return z
######################### SAAE #######################
class ClassDiscriminatorBig(nn.Module):
"""Latent space discriminator"""
def __init__(self, hidden_size, n_hidden=100, n_out=2, num_classes=5, repeat_labels_ntimes=20, **kwargs):
super().__init__()
self.nz = hidden_size
self.repeat_labels_ntimes = repeat_labels_ntimes
self.n_hidden = n_hidden
self.n_out = n_out
self.num_classes = num_classes
self.net = nn.Sequential(
nn.Linear(hidden_size+num_classes*repeat_labels_ntimes, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_hidden),
nn.Linear(n_hidden, n_hidden//2),
nn.ReLU(inplace=True),
nn.Linear(n_hidden//2, n_out)
)
def forward(self, x, alpha=None, labels=None):
if alpha is not None:
x = ReverseLayerF.apply(x, alpha)
one_hot = nn.functional.one_hot(labels, num_classes=self.num_classes)
one_hot = one_hot.repeat_interleave(self.repeat_labels_ntimes, 1)
x = torch.cat((one_hot, x), 1)
x = self.net(x)
return x
class ClassDiscriminator(nn.Module):
def __init__(self, hidden_size, n_out=2, num_classes=5, **kwargs):
super().__init__()
n_middle = max(hidden_size // 3, 3)
self.num_classes = num_classes
self.net = nn.Sequential(nn.Linear(hidden_size+num_classes, n_middle),
nn.ReLU(inplace=True),
nn.Linear(n_middle, n_out))
def forward(self, x, alpha=None, labels=None):
if alpha is not None:
x = ReverseLayerF.apply(x, alpha)
one_hot = nn.functional.one_hot(labels, num_classes=self.num_classes)
x = torch.cat((one_hot, x), 1)
x = self.net(x)
return x
class SupervisedAAE(nn.Module):
ae_type='SAAE'
def __init__(self, data_size=131, hidden_size=16, num_distrib=2, num_classes=5, discriminator=None, **kwargs):
super().__init__()
self.num_classes = num_classes
self.encoder = VEncoder(input_size=data_size, hidden_size=hidden_size)
self.decoder = Decoder(output_size=data_size, hidden_size=hidden_size)
if discriminator is not None:
self.discriminator = discriminator
else:
self.discriminator = nn.Sequential(nn.Linear(hidden_size+num_classes, 20),
nn.ReLU(inplace=True),
nn.Linear(20, num_distrib))
def forward(self, x, input_is_z=False, alpha=None, labels=None):
if not input_is_z:
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
if alpha is not None:
x = ReverseLayerF.apply(z, alpha)
one_hot = nn.functional.one_hot(labels, num_classes=self.num_classes)
x = torch.cat((one_hot, x), 1)
x = self.discriminator(x)
return x
else:
x = self.decoder(z)
return x, z, mean, log_var
else:
one_hot = nn.functional.one_hot(labels-1, num_classes=self.num_classes)
x = torch.cat((one_hot, x), 1)
x = self.discriminator(x)
return x
def sample(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def encode_and_sample(self, x):
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
return z
######################### Encoder for plain AE #####################
class Encoder(nn.Module):
def __init__(self, input_size=58276, hidden_size=16):
super().__init__()
self.enc = nn.Sequential(nn.Linear(input_size, 1024),
nn.ReLU(inplace=True),
nn.BatchNorm1d(1024),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.BatchNorm1d(512),
nn.Linear(512, hidden_size))
def forward(self, x):
x = self.enc(x)
return x
######################### Plain AE #####################
class AE(nn.Module):
def __init__(self, data_size=131, hidden_size=16):
super().__init__()
self.encoder = Encoder(input_size=data_size, hidden_size=hidden_size)
self.decoder = Decoder(output_size=data_size, hidden_size=hidden_size)
def forward(self, x):
z = self.encoder(x)
x = self.decoder(z)
return x, z, 0, 0
def encode_and_sample(self, x):
return self.encoder(x)
######################## small Enc/Dec and AAE ###################
class VEncoder_small(nn.Module):
def __init__(self, input_size=131, hidden_size=16):
super().__init__()
self.enc = nn.Sequential(nn.Linear(input_size, 50),
nn.ReLU(),
nn.BatchNorm1d(50))
self.enc_mean = nn.Linear(50, hidden_size)
self.enc_logvar = nn.Linear(50, hidden_size)
def forward(self, x):
x = self.enc(x)
mean = self.enc_mean(x)
log_var = self.enc_logvar(x)
return mean, log_var
class Decoder_small(nn.Module):
def __init__(self, output_size=131, hidden_size=16):
super().__init__()
self.dec = nn.Sequential(nn.Linear(hidden_size, 50),
nn.ReLU(),
#nn.BatchNorm1d(50),
nn.Linear(50, output_size),
#nn.Sigmoid()
#nn.ReLU(),
#nn.BatchNorm1d(output_shape)
)
def forward(self, x):
x = self.dec(x)
return x
class AAE_small(nn.Module):
ae_type='AAE'
def __init__(self, data_size=131, hidden_size=16, num_distrib=2):
super().__init__()
self.encoder = VEncoder_small(input_size=data_size, hidden_size=hidden_size)
self.decoder = Decoder_small(output_size=data_size, hidden_size=hidden_size)
self.discriminator = nn.Sequential(nn.Linear(hidden_size, 10),
nn.ReLU(inplace=True),
nn.Linear(10, num_distrib))
def forward(self, x, input_is_z=False, alpha=None):
if not input_is_z:
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
if alpha is not None:
x = ReverseLayerF.apply(z, alpha)
x = self.discriminator(x)
return x
else:
x = self.decoder(z)
return x, mean, log_var
else:
x = self.discriminator(x)
return x
def sample(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def encode_and_sample(self, x):
mean, log_var = self.encoder(x)
z = self.sample(mean, log_var)
return z
| 33.479487
| 114
| 0.506318
| 1,530
| 13,057
| 4.080392
| 0.067974
| 0.083293
| 0.041647
| 0.049015
| 0.886593
| 0.870095
| 0.848951
| 0.81227
| 0.783277
| 0.761813
| 0
| 0.027821
| 0.364096
| 13,057
| 390
| 115
| 33.479487
| 0.724076
| 0.018151
| 0
| 0.749104
| 0
| 0
| 0.001046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132616
| false
| 0
| 0.010753
| 0.003584
| 0.311828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31ba3bcd9ae0c130ca8a34bc3eeb817c85e0fef8
| 8,622
|
py
|
Python
|
parser/fase2/team26/G26/optimizar.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team26/G26/optimizar.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team26/G26/optimizar.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
reporte = ""
def optimizar(texto):
texto = optimizarr12(texto)
texto = optimizarr13(texto)
texto = optimizarr14(texto)
texto = optimizarr15(texto)
texto = optimizarr16(texto)
texto = optimizarr17(texto)
texto = optimizarr18(texto)
return texto
#Optimizaciones----------------------------
def optimizarr12(texto):
optimizacion = ""
texto = texto.split("\n")
for linea in texto:
if "(" not in linea:
try:
optimizacion += regla12(linea) + "\n"
except:
optimizacion += linea + "\n"
else:
optimizacion += linea + "\n"
return optimizacion
def optimizarr13(texto):
optimizacion = ""
texto = texto.split("\n")
for linea in texto:
if "(" not in linea:
try:
optimizacion += regla13(linea) + "\n"
except:
optimizacion += linea + "\n"
else:
optimizacion += linea + "\n"
return optimizacion
def optimizarr14(texto):
optimizacion = ""
texto = texto.split("\n")
for linea in texto:
if "(" not in linea:
try:
optimizacion += regla14(linea) + "\n"
except:
optimizacion += linea + "\n"
else:
optimizacion += linea + "\n"
return optimizacion
def optimizarr15(texto):
optimizacion = ""
texto = texto.split("\n")
for linea in texto:
if "(" not in linea:
try:
optimizacion += regla15(linea) + "\n"
except:
optimizacion += linea + "\n"
else:
optimizacion += linea + "\n"
return optimizacion
def optimizarr16(texto):
optimizacion = ""
texto = texto.split("\n")
for linea in texto:
if "(" not in linea:
try:
optimizacion += regla16(linea) + "\n"
except:
optimizacion += linea + "\n"
else:
optimizacion += linea + "\n"
return optimizacion
def optimizarr17(texto):
optimizacion = ""
texto = texto.split("\n")
for linea in texto:
if "(" not in linea:
try:
optimizacion += regla17(linea) + "\n"
except:
optimizacion += linea + "\n"
else:
optimizacion += linea + "\n"
return optimizacion
def optimizarr18(texto):
optimizacion = ""
texto = texto.split("\n")
for linea in texto:
if "(" not in linea:
try:
optimizacion += regla18(linea) + "\n"
except:
optimizacion += linea + "\n"
else:
optimizacion += linea + "\n"
return optimizacion
#Metodos para optimizar-----------------------
def operandos(texto):
newText = ""
flag = False
for i in texto:
if i == "=":
flag = True
if flag and i !="=":
if i != " ":
newText += i
return newText
def operandosr8(texto):
newText = ""
flag = True
for i in texto:
if i == "=":
flag = False
if flag and i != "=" and i!=" ":
newText += i
return newText
def regla8(linea):
rep = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("+")
if opIzq == operacion[0] and operacion[1] == "0":
reporteRegla8(rep)
return False
elif opIzq == operacion[1] and operacion[0] == "0":
reporteRegla8(rep)
return False
else:
return True
def reporteRegla8(l):
global reporte
reporte += "Regla 8:\nSe elimino: "+ l + "\n"
def reporteRegla9(l):
global reporte
reporte += "Regla 9:\nSe elimino: "+ l + "\n"
def reporteRegla10(l):
global reporte
reporte += "Regla 10:\nSe elimino: "+ l + "\n"
def reporteRegla11(l):
global reporte
reporte += "Regla 11:\nSe elimino: "+ l + "\n"
def regla9(linea):
rep = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("-")
if opIzq == operacion[0] and operacion[1] == "0":
reporteRegla9(rep)
return False
else:
return True
def regla10(linea):
rep = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("*")
if opIzq == operacion[0] and operacion[1] == "1":
reporteRegla10(rep)
return False
elif opIzq == operacion[1] and operacion[0] == "1":
reporteRegla10(rep)
return False
else:
return True
def regla11(linea):
rep = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("/")
if opIzq == operacion[0] and operacion[1] == "1":
reporteRegla11(rep)
return False
else:
return True
def regla12(linea):
l = linea
global reporte
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("+")
if operacion[0] == "0":
reporte += "Regla 12:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[1]
elif operacion[1] == "0":
reporte += "Regla 12:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[0]
else:
return " " + opIzq + " = " + operacion[0] + "+" + operacion[1]
def regla13(linea):
global reporte
l = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("-")
if operacion[1] == "0":
reporte += "Regla 13:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[0]
else:
return " " + opIzq + " = " + operacion[0] + " - "+ operacion[1]
def regla14(linea):
global reporte
l = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("*")
if operacion[0] == "1":
reporte += "Regla 14:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[1]
elif operacion[1] == "1":
reporte += "Regla 14:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[0]
else:
return " " + opIzq + " = " + operacion[0] + "*" + operacion[1]
def regla15(linea):
global reporte
l = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("/")
if operacion[1] == "1":
reporte += "Regla 15:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[0]
else:
return " " + opIzq + " = " + operacion[0] + " / " + operacion[1]
def regla16(linea):
global reporte
l = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("*")
if operacion[0] == "2":
reporte += "Regla 16:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[1] + " + " + operacion[1]
elif operacion[1] == "2":
reporte += "Regla 16:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = " + operacion[0] + " + " + operacion[0]
else:
return " " + opIzq + " = " + operacion[0] + "*" + operacion[1]
def regla17(linea):
global reporte
l = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("*")
if operacion[0] == "0":
reporte += "Regla 17:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = 0"
elif operacion[1] == "0":
reporte += "Regla 17:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = 0"
else:
return " " + opIzq + " = " + operacion[0] + "*" + operacion[1]
def regla18(linea):
global reporte
l = linea
opIzq = operandosr8(linea)
linea = operandos(linea)
operacion = linea.split("/")
if operacion[0] == "0":
reporte += "Regla 18:\nSe sustituyo: "+ l +" por -> " + opIzq + " = " + operacion[1] + "\n"
return " " + opIzq + " = 0"
else:
return " " + opIzq + " = " + operacion[0] + "/" + operacion[1]
def getreporte():
return reporte
| 29.426621
| 107
| 0.503016
| 859
| 8,622
| 5.048894
| 0.087311
| 0.078395
| 0.055338
| 0.058105
| 0.845054
| 0.782799
| 0.768042
| 0.731151
| 0.731151
| 0.731151
| 0
| 0.034307
| 0.340756
| 8,622
| 292
| 108
| 29.527397
| 0.728712
| 0.01009
| 0
| 0.744275
| 0
| 0
| 0.097973
| 0
| 0
| 0
| 0
| 0.003425
| 0
| 1
| 0.099237
| false
| 0
| 0
| 0.003817
| 0.248092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ee1ce8bfc1bc99d354d6a02c7af84841f5fad29
| 13,382
|
py
|
Python
|
utils/utils_model_eval.py
|
yyht/Max-Mahalanobis-Training
|
f97103e72050a2989435aa46a6a3a401a8d2cf9b
|
[
"Apache-2.0"
] | 107
|
2020-06-15T09:55:11.000Z
|
2020-12-20T11:27:11.000Z
|
pytorch_ares/third_party/Max-Mahalanobis-Training/utils/utils_model_eval.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 7
|
2020-06-14T03:00:18.000Z
|
2020-12-07T07:10:10.000Z
|
pytorch_ares/third_party/Max-Mahalanobis-Training/utils/utils_model_eval.py
|
haichen-ber/ares
|
474d549aa402b4cdd5e3629d23d035c31b60a360
|
[
"MIT"
] | 19
|
2020-06-14T08:35:33.000Z
|
2020-12-19T13:43:41.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from distutils.version import LooseVersion
import logging
import math
import numpy as np
import tensorflow as tf
from cleverhans.utils import batch_indices, _ArgsWrapper, create_logger
_logger = create_logger("cleverhans.utils.tf")
_logger.setLevel(logging.INFO)
zero = tf.constant(0, dtype=tf.float32)
num_classes = 10
log_offset = 1e-20
det_offset = 1e-6
def ensemble_diversity(y_true, y_pred, num_model):
bool_R_y_true = tf.not_equal(tf.ones_like(y_true) - y_true, zero) # batch_size X (num_class X num_models), 2-D
mask_non_y_pred = tf.boolean_mask(y_pred, bool_R_y_true) # batch_size X (num_class-1) X num_models, 1-D
mask_non_y_pred = tf.reshape(mask_non_y_pred, [-1, num_model, num_classes-1]) # batch_size X num_model X (num_class-1), 3-D
mask_non_y_pred = mask_non_y_pred / tf.norm(mask_non_y_pred, axis=2, keepdims=True) # batch_size X num_model X (num_class-1), 3-D
matrix = tf.matmul(mask_non_y_pred, tf.transpose(mask_non_y_pred, perm=[0, 2, 1])) # batch_size X num_model X num_model, 3-D
all_log_det = tf.linalg.logdet(matrix+det_offset*tf.expand_dims(tf.eye(num_model),0)) # batch_size X 1, 1-D
return all_log_det
def model_eval_targetacc(sess, x, y, y_target, predictions, X_test=None, Y_test=None, Y_test_target=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test_target is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument and Y_test_target argument"
"must be supplied.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
Y_cur_target = np.zeros((args.batch_size,) + Y_test_target.shape[1:],
dtype=Y_test_target.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
feed_dict = {x: X_cur, y: Y_cur, y_target: Y_cur_target}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def model_eval_for_SPSA_targetacc(sess, x, y, y_index, y_target, predictions, X_test=None, Y_test_index=None, Y_test=None, Y_test_target=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None or Y_test_index is None:
raise ValueError("X_test argument and Y_test and Y_test_index argument "
"must be supplied.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
Y_cur_target = np.zeros((args.batch_size,) + Y_test_target.shape[1:],
dtype=Y_test_target.dtype)
for batch in range(nb_batches):
print('Sample %d finished'%batch)
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
#Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
feed_dict = {x: X_cur, y: Y_cur, y_index: Y_test_index[start], y_target: Y_test_target[start]}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def model_eval_for_SPSA(sess, x, y, y_index, predictions, X_test=None, Y_test_index=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None or Y_test_index is None:
raise ValueError("X_test argument and Y_test and Y_test_index argument "
"must be supplied.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
print('Sample %d finished'%batch)
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
#Y_cur_target[:cur_batch_size] = Y_test_target[start:end]
feed_dict = {x: X_cur, y: Y_cur, y_index: Y_test_index[start]}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def get_ensemble_diversity_values(sess, x, y, predictions, number_model, X_test=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument"
"must be supplied.")
ensemble_diversity_records = np.array([])
get_batch_ensemble_diversity = ensemble_diversity(y, predictions, number_model)
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
feed_dict = {x: X_cur, y: Y_cur}
if feed is not None:
feed_dict.update(feed)
ensemble_diversity_records_batch = get_batch_ensemble_diversity.eval(feed_dict=feed_dict)
ensemble_diversity_records = np.concatenate((ensemble_diversity_records, ensemble_diversity_records_batch), axis=0)
assert end >= len(X_test)
return ensemble_diversity_records #len(X_test) X 1
| 41.81875
| 143
| 0.657824
| 1,990
| 13,382
| 4.20603
| 0.104523
| 0.066667
| 0.046595
| 0.021744
| 0.86368
| 0.842533
| 0.838351
| 0.838351
| 0.8319
| 0.827957
| 0
| 0.007595
| 0.252204
| 13,382
| 320
| 144
| 41.81875
| 0.82882
| 0.311912
| 0
| 0.755814
| 0
| 0
| 0.057533
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.02907
| false
| 0
| 0.05814
| 0
| 0.116279
| 0.017442
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b400df83514894d0d10c59440a5c34f2e19109dd
| 11,220
|
py
|
Python
|
petl/util/lookups.py
|
a-musing-moose/petl
|
719cea43117543eaccadb53d255cbbe1177b3cc5
|
[
"MIT"
] | null | null | null |
petl/util/lookups.py
|
a-musing-moose/petl
|
719cea43117543eaccadb53d255cbbe1177b3cc5
|
[
"MIT"
] | null | null | null |
petl/util/lookups.py
|
a-musing-moose/petl
|
719cea43117543eaccadb53d255cbbe1177b3cc5
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, division
import operator
from petl.compat import text_type
from petl.errors import DuplicateKeyError
from petl.util.base import Table, asindices, asdict, Record
def lookup(table, key, value=None, dictionary=None):
"""
Load a dictionary with data from the given table. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> lkp = etl.lookup(table1, 'foo', 'bar')
>>> lkp['a']
[1]
>>> lkp['b']
[2, 3]
>>> # if no value argument is given, defaults to the whole
... # row (as a tuple)
... lkp = etl.lookup(table1, 'foo')
>>> lkp['a']
[('a', 1)]
>>> lkp['b']
[('b', 2), ('b', 3)]
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.lookup(table2, ('foo', 'bar'), 'baz')
>>> lkp[('a', 1)]
[True]
>>> lkp[('b', 2)]
[False]
>>> lkp[('b', 3)]
[True, False]
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.lookup(table1, 'foo', 'bar', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
[1]
>>> lkp['b']
[2, 3]
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
if value is None:
value = flds # default value is complete row
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
valueindices = asindices(hdr, value)
assert len(valueindices) > 0, 'no value selected'
getkey = operator.itemgetter(*keyindices)
getvalue = operator.itemgetter(*valueindices)
for row in it:
k = getkey(row)
v = getvalue(row)
if k in dictionary:
# work properly with shelve
l = dictionary[k]
l.append(v)
dictionary[k] = l
else:
dictionary[k] = [v]
return dictionary
Table.lookup = lookup
def lookupone(table, key, value=None, dictionary=None, strict=False):
"""
Load a dictionary with data from the given table, assuming there is
at most one value for each key. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> # if the specified key is not unique and strict=False (default),
... # the first value wins
... lkp = etl.lookupone(table1, 'foo', 'bar')
>>> lkp['a']
1
>>> lkp['b']
2
>>> # if the specified key is not unique and strict=True, will raise
... # DuplicateKeyError
... try:
... lkp = etl.lookupone(table1, 'foo', strict=True)
... except etl.errors.DuplicateKeyError as e:
... print(e)
...
duplicate key: 'b'
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz')
>>> lkp[('a', 1)]
True
>>> lkp[('b', 2)]
False
>>> lkp[('b', 3)]
True
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.lookupone(table1, 'foo', 'bar', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
1
>>> lkp['b']
2
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
if value is None:
value = flds
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
valueindices = asindices(hdr, value)
assert len(valueindices) > 0, 'no value selected'
getkey = operator.itemgetter(*keyindices)
getvalue = operator.itemgetter(*valueindices)
for row in it:
k = getkey(row)
if strict and k in dictionary:
raise DuplicateKeyError(k)
elif k not in dictionary:
v = getvalue(row)
dictionary[k] = v
return dictionary
Table.lookupone = lookupone
def dictlookup(table, key, dictionary=None):
"""
Load a dictionary with data from the given table, mapping to dicts. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> lkp = etl.dictlookup(table1, 'foo')
>>> lkp['a']
[{'foo': 'a', 'bar': 1}]
>>> lkp['b']
[{'foo': 'b', 'bar': 2}, {'foo': 'b', 'bar': 3}]
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.dictlookup(table2, ('foo', 'bar'))
>>> lkp[('a', 1)]
[{'foo': 'a', 'baz': True, 'bar': 1}]
>>> lkp[('b', 2)]
[{'foo': 'b', 'baz': False, 'bar': 2}]
>>> lkp[('b', 3)]
[{'foo': 'b', 'baz': True, 'bar': 3}, {'foo': 'b', 'baz': False, 'bar': 3}]
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.dictlookup(table1, 'foo', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
[{'foo': 'a', 'bar': 1}]
>>> lkp['b']
[{'foo': 'b', 'bar': 2}, {'foo': 'b', 'bar': 3}]
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
rec = asdict(flds, row)
if k in dictionary:
# work properly with shelve
l = dictionary[k]
l.append(rec)
dictionary[k] = l
else:
dictionary[k] = [rec]
return dictionary
Table.dictlookup = dictlookup
def dictlookupone(table, key, dictionary=None, strict=False):
"""
Load a dictionary with data from the given table, mapping to dicts,
assuming there is at most one row for each key. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2],
... ['b', 3]]
>>> # if the specified key is not unique and strict=False (default),
... # the first value wins
... lkp = etl.dictlookupone(table1, 'foo')
>>> lkp['a']
{'foo': 'a', 'bar': 1}
>>> lkp['b']
{'foo': 'b', 'bar': 2}
>>> # if the specified key is not unique and strict=True, will raise
... # DuplicateKeyError
... try:
... lkp = etl.dictlookupone(table1, 'foo', strict=True)
... except etl.errors.DuplicateKeyError as e:
... print(e)
...
duplicate key: 'b'
>>> # compound keys are supported
... table2 = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, False],
... ['b', 3, True],
... ['b', 3, False]]
>>> lkp = etl.dictlookupone(table2, ('foo', 'bar'))
>>> lkp[('a', 1)]
{'foo': 'a', 'baz': True, 'bar': 1}
>>> lkp[('b', 2)]
{'foo': 'b', 'baz': False, 'bar': 2}
>>> lkp[('b', 3)]
{'foo': 'b', 'baz': True, 'bar': 3}
>>> # data can be loaded into an existing dictionary-like
... # object, including persistent dictionaries created via the
... # shelve module
... import shelve
>>> lkp = shelve.open('example.dat', flag='n')
>>> lkp = etl.dictlookupone(table1, 'foo', lkp)
>>> lkp.close()
>>> lkp = shelve.open('example.dat', flag='r')
>>> lkp['a']
{'foo': 'a', 'bar': 1}
>>> lkp['b']
{'foo': 'b', 'bar': 2}
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
if strict and k in dictionary:
raise DuplicateKeyError(k)
elif k not in dictionary:
d = asdict(flds, row)
dictionary[k] = d
return dictionary
Table.dictlookupone = dictlookupone
def recordlookup(table, key, dictionary=None):
"""
Load a dictionary with data from the given table, mapping to record objects.
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
rec = Record(row, flds)
if k in dictionary:
# work properly with shelve
l = dictionary[k]
l.append(rec)
dictionary[k] = l
else:
dictionary[k] = [rec]
return dictionary
Table.recordlookup = recordlookup
def recordlookupone(table, key, dictionary=None, strict=False):
"""
Load a dictionary with data from the given table, mapping to record objects,
assuming there is at most one row for each key.
"""
if dictionary is None:
dictionary = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
keyindices = asindices(hdr, key)
assert len(keyindices) > 0, 'no key selected'
getkey = operator.itemgetter(*keyindices)
for row in it:
k = getkey(row)
if strict and k in dictionary:
raise DuplicateKeyError(k)
elif k not in dictionary:
d = Record(row, flds)
dictionary[k] = d
return dictionary
Table.recordlookupone = recordlookupone
| 30.242588
| 83
| 0.487344
| 1,275
| 11,220
| 4.278431
| 0.110588
| 0.006233
| 0.010082
| 0.029331
| 0.890192
| 0.88011
| 0.833914
| 0.828414
| 0.827864
| 0.819982
| 0
| 0.013717
| 0.343761
| 11,220
| 370
| 84
| 30.324324
| 0.727149
| 0.546257
| 0
| 0.813953
| 0
| 0
| 0.028764
| 0
| 0
| 0
| 0
| 0
| 0.062016
| 1
| 0.046512
| false
| 0
| 0.03876
| 0
| 0.131783
| 0.007752
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b40ce3743d816cd595271bb6defeeed4e8c1dc67
| 4,691
|
py
|
Python
|
code/11_ui/intents/functions/password/intent_password.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 1
|
2021-09-08T09:21:16.000Z
|
2021-09-08T09:21:16.000Z
|
code/10_i_password/intents/functions/password/intent_password.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | null | null | null |
code/10_i_password/intents/functions/password/intent_password.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 2
|
2022-02-06T09:54:40.000Z
|
2022-03-01T07:52:51.000Z
|
from loguru import logger
from chatbot import register_call
import global_variables
import yaml
import random
import os
from pykeepass import PyKeePass
from pynput.keyboard import Key, Listener, Controller as keyboard_controller
from fuzzywuzzy import fuzz
import json
import numpy as np
@register_call("getPassword")
def getPassword(session_id = "general", entry="none"):
cfg = None
# Laden der intent-eigenen Konfigurationsdatei
config_path = os.path.join('intents','functions','password','config_password.yml')
with open(config_path, "r", encoding='utf-8') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
# Holen der Sprache aus der globalen Konfigurationsdatei
LANGUAGE = global_variables.voice_assistant.cfg['assistant']['language']
db_file = cfg['intent']['password']['db_file']
key_file = cfg['intent']['password']['key_file']
typed_pw = cfg['intent']['password'][LANGUAGE]['typed_pw']
db_file = os.path.join('intents','functions','password',db_file)
key_file = os.path.join('intents','functions','password',key_file)
if not os.path.exists(db_file):
return cfg['intent']['password'][LANGUAGE]['db_not_found']
if not os.path.exists(key_file):
return cfg['intent']['password'][LANGUAGE]['key_not_found']
UNKNOWN_ENTRY = random.choice(cfg['intent']['password'][LANGUAGE]['unknown_entry'])
UNKNOWN_ENTRY = UNKNOWN_ENTRY.format(entry)
NO_VOICE_MATCH = cfg['intent']['password'][LANGUAGE]['no_voice_match']
# Konnte die Konfigurationsdatei des Intents geladen werden?
if cfg:
try:
kp = PyKeePass(os.path.abspath(db_file), keyfile=os.path.abspath(key_file))
except Exception as e:
return cfg['intent']['password'][LANGUAGE]['could_not_access_keystore']
# Verifiziere Stimme
fp_entry = kp.find_entries(title='_fingerprint', first=True)
if fp_entry:
a = json.loads(fp_entry.notes)
b = global_variables.voice_assistant.current_speaker_fingerprint
nx = np.array(a)
ny = np.array(b)
cosDist = 1 - np.dot(nx, ny) / np.linalg.norm(nx) / np.linalg.norm(ny)
if (cosDist >= 0.3):
return NO_VOICE_MATCH
entries = kp.entries
for title in entries:
ratio = fuzz.ratio(title.title.lower(), entry.lower())
logger.info("Übereinstimmung von {} und {} ist {}%", title.title, entry, ratio)
if ratio > 70:
if (title):
keyboard = keyboard_controller()
keyboard.type(title.password)
return typed_pw.format(title.title)
return UNKNOWN_ENTRY
else:
logger.error("Konnte Konfigurationsdatei für Intent 'password' nicht laden.")
return
@register_call("getUsername")
def getUsername(session_id = "general", entry="none"):
cfg = None
# Laden der intent-eigenen Konfigurationsdatei
config_path = os.path.join('intents','functions','password','config_password.yml')
with open(config_path, "r", encoding='utf-8') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
# Holen der Sprache aus der globalen Konfigurationsdatei
LANGUAGE = global_variables.voice_assistant.cfg['assistant']['language']
db_file = cfg['intent']['password']['db_file']
key_file = cfg['intent']['password']['key_file']
db_file = os.path.join('intents','functions','password',db_file)
key_file = os.path.join('intents','functions','password',key_file)
if not os.path.exists(db_file):
return cfg['intent']['password'][LANGUAGE]['db_not_found']
if not os.path.exists(key_file):
return cfg['intent']['password'][LANGUAGE]['key_not_found']
UNKNOWN_ENTRY = random.choice(cfg['intent']['password'][LANGUAGE]['unknown_entry'])
UNKNOWN_ENTRY = UNKNOWN_ENTRY.format(entry)
NO_VOICE_MATCH = cfg['intent']['password'][LANGUAGE]['no_voice_match']
# Konnte die Konfigurationsdatei des Intents geladen werden?
if cfg:
try:
kp = PyKeePass(os.path.abspath(db_file), keyfile=os.path.abspath(key_file))
except Exception as e:
return cfg['intent']['password'][LANGUAGE]['could_not_access_keystore']
# Verifiziere Stimme
fp_entry = kp.find_entries(title='_fingerprint', first=True)
if fp_entry:
a = json.loads(fp_entry.notes)
b = global_variables.voice_assistant.current_speaker_fingerprint
nx = np.array(a)
ny = np.array(b)
cosDist = 1 - np.dot(nx, ny) / np.linalg.norm(nx) / np.linalg.norm(ny)
if (cosDist >= 0.3):
return NO_VOICE_MATCH
entries = kp.entries
for title in entries:
ratio = fuzz.ratio(title.title.lower(), entry.lower())
logger.info("Übereinstimmung von {} und {} ist {}%", title.title, entry, ratio)
if ratio > 70:
if (title):
return title.username
return UNKNOWN_ENTRY
else:
logger.error("Konnte Konfigurationsdatei für Intent 'password' nicht laden.")
return ""
| 33.507143
| 84
| 0.718184
| 642
| 4,691
| 5.093458
| 0.205607
| 0.072783
| 0.077982
| 0.084098
| 0.86055
| 0.86055
| 0.86055
| 0.86055
| 0.86055
| 0.86055
| 0
| 0.00297
| 0.138563
| 4,691
| 140
| 85
| 33.507143
| 0.806236
| 0.075677
| 0
| 0.78
| 0
| 0
| 0.206562
| 0.011553
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0.29
| 0.11
| 0
| 0.27
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
81f1a5917288e72c6de2d1beae375f51314dd7be
| 6,394
|
py
|
Python
|
Examples/preamplifier/sweep_tube_filter.py
|
apohl79/AudioTK
|
05ac241b0bc6a8f841d93257b4d81e5961b1f627
|
[
"BSD-3-Clause"
] | 10
|
2018-05-17T15:29:05.000Z
|
2021-12-19T22:26:08.000Z
|
Examples/preamplifier/sweep_tube_filter.py
|
apohl79/AudioTK
|
05ac241b0bc6a8f841d93257b4d81e5961b1f627
|
[
"BSD-3-Clause"
] | null | null | null |
Examples/preamplifier/sweep_tube_filter.py
|
apohl79/AudioTK
|
05ac241b0bc6a8f841d93257b4d81e5961b1f627
|
[
"BSD-3-Clause"
] | 2
|
2020-04-21T13:43:57.000Z
|
2020-04-28T19:10:14.000Z
|
#!/usr/bin/env python
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_32Filter, DoubleOversampling6points5order_16Filter, DoubleOversampling6points5order_8Filter, DoubleOversampling6points5order_4Filter, DoubleDecimationFilter
from ATK.EQ import DoubleButterworthLowPassFilter
from ATK.Preamplifier import DoubleKorenTriodeFilter
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
sample_rate = 96000
import sys
import os
sys.path.append(os.getcwd()+"/..")
from display.compare_spec import plot_me
def filter_32(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.set_input_sampling_rate(sample_rate)
overfilter = DoubleOversampling6points5order_32Filter()
overfilter.set_input_sampling_rate(sample_rate)
overfilter.set_output_sampling_rate(sample_rate * 32)
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleKorenTriodeFilter.build_standard_filter()
overdrivefilter.set_input_sampling_rate(sample_rate * 32)
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.set_input_sampling_rate(sample_rate * 32)
lowpassfilter.set_cut_frequency(sample_rate/2)
lowpassfilter.set_order(10)
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.set_input_sampling_rate(sample_rate * 32)
decimationfilter.set_output_sampling_rate(sample_rate)
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.set_input_sampling_rate(sample_rate)
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
def filter_16(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.set_input_sampling_rate(sample_rate)
overfilter = DoubleOversampling6points5order_16Filter()
overfilter.set_input_sampling_rate(sample_rate)
overfilter.set_output_sampling_rate(sample_rate * 16)
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleKorenTriodeFilter.build_standard_filter()
overdrivefilter.set_input_sampling_rate(sample_rate * 16)
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.set_input_sampling_rate(sample_rate * 16)
lowpassfilter.set_cut_frequency(sample_rate/2)
lowpassfilter.set_order(10)
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.set_input_sampling_rate(sample_rate * 16)
decimationfilter.set_output_sampling_rate(sample_rate)
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.set_input_sampling_rate(sample_rate)
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
def filter_8(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.set_input_sampling_rate(sample_rate)
overfilter = DoubleOversampling6points5order_8Filter()
overfilter.set_input_sampling_rate(sample_rate)
overfilter.set_output_sampling_rate(sample_rate * 8)
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleKorenTriodeFilter.build_standard_filter()
overdrivefilter.set_input_sampling_rate(sample_rate * 8)
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.set_input_sampling_rate(sample_rate * 8)
lowpassfilter.set_cut_frequency(20000)
lowpassfilter.set_order(10)
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.set_input_sampling_rate(sample_rate * 8)
decimationfilter.set_output_sampling_rate(sample_rate)
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.set_input_sampling_rate(sample_rate)
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
def filter_4(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.set_input_sampling_rate(sample_rate)
overfilter = DoubleOversampling6points5order_4Filter()
overfilter.set_input_sampling_rate(sample_rate)
overfilter.set_output_sampling_rate(sample_rate * 4)
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleKorenTriodeFilter.build_standard_filter()
overdrivefilter.set_input_sampling_rate(sample_rate * 4)
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.set_input_sampling_rate(sample_rate * 4)
lowpassfilter.set_cut_frequency(20000)
lowpassfilter.set_order(10)
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.set_input_sampling_rate(sample_rate * 4)
decimationfilter.set_output_sampling_rate(sample_rate)
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.set_input_sampling_rate(sample_rate)
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
if __name__ == "__main__":
import numpy as np
samples = 2000000
freq_max = 20000
t = np.arange(samples, dtype=np.float64).reshape(1, -1) / sample_rate
d = np.sin(np.pi * (sample_rate * freq_max / samples * (t + .1)) * t)
np.savetxt("input.txt", d)
out = filter_32(d)
plt.figure()
plt.title("Oversampling 32")
plot_me((d[0], out[0]), sample_rate)
np.savetxt("output32.txt", out)
out = filter_16(d)
plt.figure()
plt.title("Oversampling 16")
plot_me((d[0], out[0]), sample_rate)
np.savetxt("output16.txt", out)
out = filter_8(d)
plt.figure()
plt.title("Oversampling 8")
plot_me((d[0], out[0]), sample_rate)
np.savetxt("output8.txt", out)
out = filter_4(d)
plt.figure()
plt.title("Oversampling 4")
plot_me((d[0], out[0]), sample_rate)
np.savetxt("output4.txt", out)
plt.show()
| 39.469136
| 210
| 0.805286
| 798
| 6,394
| 6.165414
| 0.125313
| 0.071545
| 0.117073
| 0.143089
| 0.832317
| 0.831098
| 0.806707
| 0.803049
| 0.803049
| 0.803049
| 0
| 0.031774
| 0.10416
| 6,394
| 161
| 211
| 39.714286
| 0.827165
| 0.003128
| 0
| 0.625
| 0
| 0
| 0.019457
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.173611
| 0.104167
| 0
| 0.159722
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
c31f7c2a7f618e6e3182bb9a83dda3accf1552c9
| 6,620
|
py
|
Python
|
pyaz/backup/recoverypoint/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/backup/recoverypoint/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/backup/recoverypoint/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
A snapshot of data at that point-of-time, stored in Recovery Services Vault, from which you can restore information.
'''
from ... pyaz_utils import _call_az
def show(container_name, item_name, name, resource_group, vault_name, backup_management_type=None, use_secondary_region=None, workload_type=None):
'''
Shows details of a particular recovery point.
Required Parameters:
- container_name -- Name of the backup container. Accepts 'Name' or 'FriendlyName' from the output of az backup container list command. If 'FriendlyName' is passed then BackupManagementType is required.
- item_name -- Name of the backed up item.
- name -- Name of the recovery point. You can use the backup recovery point list command to get the name of a backed up item.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- vault_name -- Name of the Recovery services vault.
Optional Parameters:
- backup_management_type -- Specify the backup management type. Define how Azure Backup manages the backup of entities within the ARM resource. For eg: AzureWorkloads refers to workloads installed within Azure VMs, AzureStorage refers to entities within Storage account. Required only if friendly name is used as Container name.
- use_secondary_region -- Use this flag to show recoverypoints in secondary region.
- workload_type -- Specify the type of applications within the Resource which should be discovered and protected by Azure Backup.
'''
return _call_az("az backup recoverypoint show", locals())
def list(container_name, item_name, resource_group, vault_name, backup_management_type=None, end_date=None, is_ready_for_move=None, recommended_for_archive=None, start_date=None, target_tier=None, tier=None, use_secondary_region=None, workload_type=None):
'''
List all recovery points of a backed up item.
Required Parameters:
- container_name -- Name of the backup container. Accepts 'Name' or 'FriendlyName' from the output of az backup container list command. If 'FriendlyName' is passed then BackupManagementType is required.
- item_name -- Name of the backed up item.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- vault_name -- Name of the Recovery services vault.
Optional Parameters:
- backup_management_type -- Specify the backup management type. Define how Azure Backup manages the backup of entities within the ARM resource. For eg: AzureWorkloads refers to workloads installed within Azure VMs, AzureStorage refers to entities within Storage account. Required only if friendly name is used as Container name.
- end_date -- The end date of the range in UTC (d-m-Y).
- is_ready_for_move -- Use this flag to retrieve the recoverypoints that are ready to be moved to destination-tier.
- recommended_for_archive -- Use this flag to retrieve recommended archivable recoverypoints.
- start_date -- The start date of the range in UTC (d-m-Y).
- target_tier -- The destination/target tier to which a particular recovery point has to be moved.
- tier -- Provide 'tier' parameter to filter recovery points.
- use_secondary_region -- Use this flag to list recoverypoints in secondary region.
- workload_type -- Specify the type of applications within the Resource which should be discovered and protected by Azure Backup.
'''
return _call_az("az backup recoverypoint list", locals())
def move(container_name, destination_tier, item_name, name, resource_group, source_tier, vault_name, backup_management_type=None, workload_type=None):
'''
Move a particular recovery point of a backed up item from one tier to another tier.
Required Parameters:
- container_name -- Name of the backup container. Accepts 'Name' or 'FriendlyName' from the output of az backup container list command. If 'FriendlyName' is passed then BackupManagementType is required.
- destination_tier -- The destination/target tier to which a particular recovery point has to be moved.
- item_name -- Name of the backed up item.
- name -- Name of the recovery point.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- source_tier -- The source tier from which a particular recovery point has to be moved.
- vault_name -- Name of the Recovery services vault.
Optional Parameters:
- backup_management_type -- Specify the backup management type. Define how Azure Backup manages the backup of entities within the ARM resource. For eg: AzureWorkloads refers to workloads installed within Azure VMs, AzureStorage refers to entities within Storage account. Required only if friendly name is used as Container name.
- workload_type -- Specify the type of applications within the Resource which should be discovered and protected by Azure Backup.
'''
return _call_az("az backup recoverypoint move", locals())
def show_log_chain(container_name, item_name, resource_group, vault_name, backup_management_type=None, end_date=None, start_date=None, use_secondary_region=None, workload_type=None):
'''
List the start and end points of the unbroken log chain(s) of the given backup item.
Required Parameters:
- container_name -- Name of the backup container. Accepts 'Name' or 'FriendlyName' from the output of az backup container list command. If 'FriendlyName' is passed then BackupManagementType is required.
- item_name -- Name of the backed up item.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- vault_name -- Name of the Recovery services vault.
Optional Parameters:
- backup_management_type -- Specify the backup management type. Define how Azure Backup manages the backup of entities within the ARM resource. For eg: AzureWorkloads refers to workloads installed within Azure VMs, AzureStorage refers to entities within Storage account. Required only if friendly name is used as Container name.
- end_date -- The end date of the range in UTC (d-m-Y).
- start_date -- The start date of the range in UTC (d-m-Y).
- use_secondary_region -- Use this flag to list recoverypoints in secondary region.
- workload_type -- Specify the type of applications within the Resource which should be discovered and protected by Azure Backup.
'''
return _call_az("az backup recoverypoint show-log-chain", locals())
| 75.227273
| 332
| 0.761027
| 959
| 6,620
| 5.1439
| 0.138686
| 0.020272
| 0.02838
| 0.036894
| 0.841273
| 0.820191
| 0.813501
| 0.807217
| 0.799513
| 0.762416
| 0
| 0
| 0.177644
| 6,620
| 87
| 333
| 76.091954
| 0.906135
| 0.801208
| 0
| 0
| 0
| 0
| 0.115203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
c37617079d246b12bf70e16bee59de601064ff72
| 14,477
|
py
|
Python
|
api/tests/test_views.py
|
Verbozeteam/web
|
2aecd67ec823e9d6ac243d6f8a71849dd0f9ed9d
|
[
"MIT"
] | 1
|
2018-12-17T15:31:03.000Z
|
2018-12-17T15:31:03.000Z
|
api/tests/test_views.py
|
Verbozeteam/web
|
2aecd67ec823e9d6ac243d6f8a71849dd0f9ed9d
|
[
"MIT"
] | null | null | null |
api/tests/test_views.py
|
Verbozeteam/web
|
2aecd67ec823e9d6ac243d6f8a71849dd0f9ed9d
|
[
"MIT"
] | null | null | null |
from rest_framework.test import APITestCase
import json
from api.models import *
class TestTokenApi(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='12345')
def test_request_for_token_with_no_type(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message returned
self.assertEqual(response.data, {'error': 'No \'requested_token_type\' provided'})
def test_request_for_token_with_invalid_type(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'invalid_type'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message returned
self.assertEqual(response.data, {'error': 'Invalid requested_token_type provided'})
def test_request_for_anonymous_user_token(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'anonymous_user'
})
# assert an ok status returned
self.assertEqual(response.status_code, 200)
# assert a token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is an anonymous_user token
self.assertEqual(Token.objects.last().content_object, None)
def test_request_for_token_requiring_credentials_with_wrong_credentials(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': 'wrongpassword',
'requested_token_type': 'admin_user'
})
# assert unauthorized status code returned
self.assertEqual(response.status_code, 401)
# assert correct error message returned
self.assertEqual(response.data, {'error': 'Incorrect username or password'})
def test_request_for_admin_user_token_as_non_admin(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'admin_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message returned
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_admin_user_token_as_admin(self):
self.admin_user = AdminUser(user=self.user)
self.admin_user.save()
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'admin_user'
})
# assert a ok code status was retured
self.assertEqual(response.status_code, 200)
# assert a token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is associated with admin user that requested it
self.assertEqual(Token.objects.last().content_object, self.admin_user)
def test_request_for_guest_user_token_as_non_guest(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'guest_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message returned
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_guest_user_token_as_guest(self):
self.guest_user = GuestUser(user=self.user)
self.guest_user.save()
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'guest_user'
})
# assert a ok code status was retured
self.assertEqual(response.status_code, 200)
# assert a token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is associated with guest user that requested it
self.assertEqual(Token.objects.last().content_object, self.guest_user)
def test_request_for_hotel_user_token_as_non_hotel(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'hotel_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message returned
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_hotel_user_token_as_hotel(self):
self.hotel = Hotel(name='Test Hotel')
self.hotel.save()
self.hotel_user = HotelUser(user=self.user, hotel=self.hotel)
self.hotel_user.save()
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'hotel_user'
})
# assert a ok code status was retured
self.assertEqual(response.status_code, 200)
# assert a token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is associated with hotel user that requested it
self.assertEqual(Token.objects.last().content_object, self.hotel_user)
def test_request_for_hub_user_token_as_non_hub(self):
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'hub_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message returned
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_hub_user_token_as_hub(self):
self.hotel = Hotel(name='Test Hotel')
self.hotel.save()
self.hub = Hub(hotel=self.hotel)
self.hub.save()
self.hub_user = HubUser(user=self.user, hub=self.hub)
self.hub_user.save()
response = self.client.post('/api/tokens/', {
'username': 'testuser',
'password': '12345',
'requested_token_type': 'hub_user'
})
# assert a bad request status was retured
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'Testa3bat yabni?'})
def test_request_for_anonymous_token_from_logged_out_session(self):
response = self.client.post('/api/tokens/', {
'requested_token_type': 'anonymous_user'
})
# assert an ok status code returned
self.assertEqual(response.status_code, 200)
# assert a token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is an anonymous_user token
self.assertEqual(Token.objects.last().content_object, None)
def test_request_for_anonymous_token_from_logged_in_session(self):
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'anonymous_user'
})
# assert an ok status code returned
self.assertEqual(response.status_code, 200)
# assert a token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is an anonymous_user token
self.assertEqual(Token.objects.last().content_object, None)
def test_request_for_admin_token_from_logged_out_session(self):
response = self.client.post('/api/tokens/', {
'requested_token_type': 'admin_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'No user credentials and session does not exist'})
def test_request_for_admin_token_from_logged_in_non_admin_session(self):
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'admin_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_admin_token_from_logged_in_admin_session(self):
self.admin_user = AdminUser(user=self.user)
self.admin_user.save()
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'admin_user'
})
# assert an ok status code
self.assertEqual(response.status_code, 200)
# assert token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is an admin token for user
self.assertEqual(Token.objects.last().content_object, self.admin_user)
def test_request_for_guest_token_from_logged_out_session(self):
response = self.client.post('/api/tokens/', {
'requested_token_type': 'guest_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'No user credentials and session does not exist'})
def test_request_for_guest_token_from_logged_in_non_guest_session(self):
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'guest_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_guest_token_from_logged_in_guest_session(self):
self.guest_user = GuestUser(user=self.user)
self.guest_user.save()
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'guest_user'
})
# assert an ok status code
self.assertEqual(response.status_code, 200)
# assert token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is an admin token for user
self.assertEqual(Token.objects.last().content_object, self.guest_user)
def test_request_for_hotel_token_from_logged_out_session(self):
response = self.client.post('/api/tokens/', {
'requested_token_type': 'hotel_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'No user credentials and session does not exist'})
def test_request_for_hotel_token_from_logged_in_non_hotel_session(self):
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'hotel_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_hotel_token_from_logged_in_hotel_session(self):
self.hotel = Hotel(name='Test Hotel')
self.hotel.save()
self.hotel_user = HotelUser(user=self.user, hotel=self.hotel)
self.hotel_user.save()
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'hotel_user'
})
# assert an ok status code
self.assertEqual(response.status_code, 200)
# assert token was created
self.assertEqual(Token.objects.count(), 1)
# assert token is an admin token for user
self.assertEqual(Token.objects.last().content_object, self.hotel_user)
def test_request_for_hub_token_from_logged_out_session(self):
response = self.client.post('/api/tokens/', {
'requested_token_type': 'hub_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'No user credentials and session does not exist'})
def test_request_for_hub_token_from_logged_in_non_hub_session(self):
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'hub_user'
})
# assert a bad request status code returned
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'You do not have permissions to request such token'})
def test_request_for_hub_token_from_logged_in_hub_session(self):
self.hotel = Hotel(name='Test Hotel')
self.hotel.save()
self.hub = Hub(hotel=self.hotel)
self.hub.save()
self.hub_user = HubUser(user=self.user, hub=self.hub)
self.hub_user.save()
self.client.login(username='testuser', password='12345')
response = self.client.post('/api/tokens/', {
'requested_token_type': 'hub_user'
})
# assert a bad request status code
self.assertEqual(response.status_code, 400)
# assert correct error message shown
self.assertEqual(response.data, {'error': 'Testa3bat yabni?'})
| 40.780282
| 103
| 0.652967
| 1,733
| 14,477
| 5.252741
| 0.05828
| 0.100516
| 0.108646
| 0.048555
| 0.95485
| 0.951994
| 0.944304
| 0.944304
| 0.91003
| 0.889926
| 0
| 0.017678
| 0.24197
| 14,477
| 354
| 104
| 40.89548
| 0.811828
| 0.155902
| 0
| 0.831169
| 0
| 0
| 0.194522
| 0
| 0
| 0
| 0
| 0
| 0.264069
| 1
| 0.116883
| false
| 0.099567
| 0.012987
| 0
| 0.134199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
6f05ad6d1fe139b6f653eba757eb8847818fff5e
| 1,492
|
py
|
Python
|
src/hashtags/models.py
|
littleprodigy/Twitter
|
25ef96a291d295bb91f824f331fd6a648dc79117
|
[
"MIT"
] | null | null | null |
src/hashtags/models.py
|
littleprodigy/Twitter
|
25ef96a291d295bb91f824f331fd6a648dc79117
|
[
"MIT"
] | null | null | null |
src/hashtags/models.py
|
littleprodigy/Twitter
|
25ef96a291d295bb91f824f331fd6a648dc79117
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.urls import reverse_lazy
# Create your models here.
from .signals import parsed_hashtags
from tweets.models import Tweet
# Create your models here.
class HashTag(models.Model):
tag = models.CharField(max_length=120)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self): # __unicode__
return self.tag
def get_absolute_url(self):
return reverse_lazy("hashtag", kwargs={"hashtag": self.tag})
def get_tweets(self):
return Tweet.objects.filter(content__icontains="#" + self.tag)
def parsed_hashtags_receiver(sender, hashtag_list, *args, **kwargs):
if len(hashtag_list) > 0:
for tag_var in hashtag_list:
new_tag, create = HashTag.objects.get_or_create(tag=tag_var)
parsed_hashtags.connect(parsed_hashtags_receiver)
# from django.db import models
# from django.urls import reverse_lazy
# # Create your models here.
# from tweets.models import Tweet
# from .signals import parsed_hashtags
# class HashTag(models.Model):
# tag = models.CharField(max_length=120)
# timestamp = models.DateTimeField(auto_now_add=True)
# def __str__(self): # __unicode__
# return self.tag
# def parsed_hashtags_receiver(sender, hashtag_list, *args, **kwargs):
# if len(hashtag_list) > 0:
# for tag_var in hashtag_list:
# new_tag, create = HashTag.objects.get_or_create(tag=tag_var)
# parsed_hashtags.connect(parsed_hashtags_receiver)
| 25.288136
| 74
| 0.725201
| 201
| 1,492
| 5.094527
| 0.278607
| 0.109375
| 0.039063
| 0.058594
| 0.865234
| 0.759766
| 0.759766
| 0.759766
| 0.759766
| 0.759766
| 0
| 0.006531
| 0.178954
| 1,492
| 58
| 75
| 25.724138
| 0.829388
| 0.448391
| 0
| 0
| 0
| 0
| 0.018703
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.222222
| 0.166667
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
48b18a88b4dbec6bfc7bb924eba529efb26299d1
| 9,444
|
py
|
Python
|
ooiservices/tests/common_tools.py
|
asascience-open/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 2
|
2015-02-28T00:20:30.000Z
|
2015-04-30T12:40:31.000Z
|
ooiservices/tests/common_tools.py
|
asascience-open/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 266
|
2015-01-02T21:29:25.000Z
|
2020-01-23T16:00:11.000Z
|
ooiservices/tests/common_tools.py
|
oceanobservatories/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 13
|
2015-02-04T21:13:34.000Z
|
2016-10-18T14:39:36.000Z
|
#!/usr/bin/env python
"""
Asset Management - Common functions for TestCases.
"""
__author__ = 'Edna Donoughe'
import json
def request_headers():
""" Headers for uframe PUT and POST. """
return {"Content-Type": "application/json"}
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Convert input to present all values as string, leaves nulls.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_event_input_as_string(data, debug=False):
""" Take input from UI and present all values as string type. Leaves nulls.
Handles one dict level down. Used to simulate UI data from jgrid submit.
"""
try:
if debug: print '\n debug -- get_event_input_as_string'
#self.assertTrue(data is not None)
#self.assertTrue(len(data) > 0)
string_data = data.copy()
keys = data.keys()
for key in keys:
if data[key] is not None:
if not isinstance(data[key], dict):
string_data[key] = str(data[key])
else:
if debug: print '\n Field is dict: ', key
tmp_dict = data[key].copy()
for k,v in tmp_dict.iteritems():
if v is not None:
if not isinstance(v, dict):
string_data[key][k] = str(v)
return string_data
except Exception as err:
if debug: print '\n exception: ', str(err)
raise
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Convert input to present all values as unicode, leaves nulls.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_event_input_as_unicode(data, debug=False):
""" Take input from UI and present all values as string type. Leaves nulls.
Handles one dict level down. Used to simulate UI data from jgrid submit.
"""
try:
string_data = data.copy()
keys = data.keys()
for key in keys:
if data[key] is not None:
if not isinstance(data[key], dict):
string_data[key] = unicode(data[key])
else:
if debug: print '\n Field is dict: ', key
tmp_dict = data[key].copy()
for k,v in tmp_dict.iteritems():
if v is not None:
if not isinstance(v, dict):
string_data[key][k] = unicode(v)
return string_data
except Exception as err:
if debug: print '\n exception: ', str(err)
raise
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Dump dictionary provided if debug is enabled.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def dump_dict(dict, debug=False):
""" Print dict if debug enabled.
"""
if debug:
print '\n --------------\n dictionary: %s' % json.dumps(dict, indent=4, sort_keys=True)
'''
def get_asset_input_as_string(asset, debug=False):
""" Take input from UI and present all values as string type. Leaves nulls.
Handles one dict level down. Used to simulate UI data from jgrid submit.
"""
try:
if debug: print '\n debug -- get_asset_input_as_string'
string_asset = asset.copy()
keys = asset.keys()
for key in keys:
if asset[key] is not None:
if not isinstance(asset[key], dict):
if not isinstance(asset[key], list):
string_asset[key] = str(asset[key])
else:
# Have a list to process...
list_value = asset[key]
if not list_value:
string_asset[key] = str(asset[key])
else:
if len(list_value) > 0:
if not isinstance(list_value[0], dict):
string_asset[key] = str(asset[key])
else:
#process list of dicts - stringize dict contents...
#print '\n debug -- Have a list of dictionaries, field: ', key
converted_list_value = []
#print '\n debug -- len(converted_list_value): ', len(list_value)
for remote in list_value:
if debug: print '\n debug -- remote: ', remote
tmp_dict = remote.copy()
for k,v in tmp_dict.iteritems():
#print '\n remote convert k: ', k
if v is not None:
if not isinstance(v, dict):
remote[k] = str(v)
if debug: print '\n debug -- converted remote: ', remote
converted_list_value.append(remote)
string_asset[key] = str(converted_list_value)
else:
if debug: print '\n Field is dict: ', key
tmp_dict = asset[key].copy()
for k,v in tmp_dict.iteritems():
if v is not None:
if not isinstance(v, dict):
string_asset[key][k] = str(v)
if debug:
print '\n debug ********get_asset_input_as_string ***********'
print '\n string_asset(%d): ' % len(string_asset)
dump_dict(string_asset, debug)
return string_asset
except Exception as err:
if debug: print '\n exception: ', str(err)
raise
'''
def get_asset_input_as_string(asset, debug=False):
""" Take input from UI and present all values as string type. Leaves nulls.
Handles one dict level down. Used to simulate UI data from jgrid submit.
"""
debug = False
try:
if debug: print '\n debug -- get_asset_input_as_string'
string_asset = asset.copy()
keys = asset.keys()
for key in keys:
if asset[key] is not None:
if not isinstance(asset[key], dict):
if not isinstance(asset[key], list):
string_asset[key] = str(asset[key])
else:
# Have a list to process...
list_value = asset[key]
if not list_value:
string_asset[key] = str(asset[key])
else:
if len(list_value) > 0:
if not isinstance(list_value[0], dict):
string_asset[key] = str(asset[key])
else:
#process list of dicts - stringize dict contents...
#print '\n debug -- Have a list of dictionaries, field: ', key
converted_list_value = []
#print '\n debug -- len(converted_list_value): ', len(list_value)
for remote in list_value:
if debug: print '\n debug -- remote: ', remote
tmp_dict = remote.copy()
for k,v in tmp_dict.iteritems():
#print '\n remote convert k: ', k
if v is not None:
if not isinstance(v, dict):
remote[k] = str(v)
if debug: print '\n debug -- converted remote: ', remote
converted_list_value.append(remote)
string_asset[key] = str(converted_list_value)
else:
if debug: print '\n Field is dict: ', key
tmp_dict = asset[key].copy()
for k,v in tmp_dict.iteritems():
if v is not None:
if not isinstance(v, dict):
string_asset[key][k] = str(v)
if debug:
print '\n debug ********get_asset_input_as_string ***********'
print '\n string_asset(%d): %s' % (len(string_asset),
json.dumps(string_asset, indent=4, sort_keys=True))
return string_asset
except Exception as err:
if debug: print '\n exception: ', str(err)
raise
| 47.939086
| 110
| 0.414867
| 922
| 9,444
| 4.126898
| 0.113883
| 0.040999
| 0.056767
| 0.061498
| 0.881735
| 0.863863
| 0.863863
| 0.8318
| 0.8318
| 0.8318
| 0
| 0.001396
| 0.469081
| 9,444
| 197
| 111
| 47.939086
| 0.757479
| 0.120606
| 0
| 0.576087
| 0
| 0
| 0.078764
| 0.017574
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.01087
| null | null | 0.141304
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.